2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
22 from os import path as osp
23 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
26 from portage import digraph
27 from portage.const import NEWS_LIB_PATH
30 import portage.xpak, commands, errno, re, socket, time
31 from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
32 nc_len, red, teal, turquoise, xtermTitle, \
33 xtermTitleReset, yellow
34 from portage.output import create_color_func
35 good = create_color_func("GOOD")
36 bad = create_color_func("BAD")
37 # white looks bad on terminals with white background
38 from portage.output import bold as white
42 portage.dep._dep_check_strict = True
45 import portage.exception
46 from portage.cache.cache_errors import CacheError
47 from portage.data import secpass
48 from portage.elog.messages import eerror
49 from portage.util import normalize_path as normpath
50 from portage.util import cmp_sort_key, writemsg, writemsg_level
51 from portage.sets import load_default_config, SETPREFIX
52 from portage.sets.base import InternalPackageSet
54 from itertools import chain, izip
56 from _emerge.SlotObject import SlotObject
57 from _emerge.DepPriority import DepPriority
58 from _emerge.BlockerDepPriority import BlockerDepPriority
59 from _emerge.UnmergeDepPriority import UnmergeDepPriority
60 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
61 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
62 from _emerge.Task import Task
63 from _emerge.Blocker import Blocker
64 from _emerge.PollConstants import PollConstants
65 from _emerge.AsynchronousTask import AsynchronousTask
66 from _emerge.CompositeTask import CompositeTask
67 from _emerge.EbuildFetcher import EbuildFetcher
68 from _emerge.EbuildBuild import EbuildBuild
69 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
70 from _emerge.EbuildPhase import EbuildPhase
71 from _emerge.Binpkg import Binpkg
72 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
73 from _emerge.PackageMerge import PackageMerge
74 from _emerge.DependencyArg import DependencyArg
75 from _emerge.AtomArg import AtomArg
76 from _emerge.PackageArg import PackageArg
77 from _emerge.SetArg import SetArg
78 from _emerge.Dependency import Dependency
79 from _emerge.BlockerCache import BlockerCache
80 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
81 from _emerge.RepoDisplay import RepoDisplay
82 from _emerge.UseFlagDisplay import UseFlagDisplay
83 from _emerge.PollSelectAdapter import PollSelectAdapter
84 from _emerge.SequentialTaskQueue import SequentialTaskQueue
85 from _emerge.ProgressHandler import ProgressHandler
88 from cStringIO import StringIO
90 from StringIO import StringIO
92 class stdout_spinner(object):
94 "Gentoo Rocks ("+platform.system()+")",
95 "Thank you for using Gentoo. :)",
96 "Are you actually trying to read this?",
97 "How many times have you stared at this?",
98 "We are generating the cache right now",
99 "You are paying too much attention.",
100 "A theory is better than its explanation.",
101 "Phasers locked on target, Captain.",
102 "Thrashing is just virtual crashing.",
103 "To be is to program.",
104 "Real Users hate Real Programmers.",
105 "When all else fails, read the instructions.",
106 "Functionality breeds Contempt.",
107 "The future lies ahead.",
108 "3.1415926535897932384626433832795028841971694",
109 "Sometimes insanity is the only alternative.",
110 "Inaccuracy saves a world of explanation.",
113 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
117 self.update = self.update_twirl
118 self.scroll_sequence = self.scroll_msgs[
119 int(time.time() * 100) % len(self.scroll_msgs)]
121 self.min_display_latency = 0.05
123 def _return_early(self):
125 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
126 each update* method should return without doing any output when this
129 cur_time = time.time()
130 if cur_time - self.last_update < self.min_display_latency:
132 self.last_update = cur_time
135 def update_basic(self):
136 self.spinpos = (self.spinpos + 1) % 500
137 if self._return_early():
139 if (self.spinpos % 100) == 0:
140 if self.spinpos == 0:
141 sys.stdout.write(". ")
143 sys.stdout.write(".")
146 def update_scroll(self):
147 if self._return_early():
149 if(self.spinpos >= len(self.scroll_sequence)):
150 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
151 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
153 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
155 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
157 def update_twirl(self):
158 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
159 if self._return_early():
161 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
164 def update_quiet(self):
167 def userquery(prompt, responses=None, colours=None):
168 """Displays a prompt and a set of responses, then waits for a response
169 which is checked against the responses and the first to match is
170 returned. An empty response will match the first value in responses. The
171 input buffer is *not* cleared prior to the prompt!
174 responses: a List of Strings.
175 colours: a List of Functions taking and returning a String, used to
176 process the responses for display. Typically these will be functions
177 like red() but could be e.g. lambda x: "DisplayString".
178 If responses is omitted, defaults to ["Yes", "No"], [green, red].
179 If only colours is omitted, defaults to [bold, ...].
181 Returns a member of the List responses. (If called without optional
182 arguments, returns "Yes" or "No".)
183 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
185 if responses is None:
186 responses = ["Yes", "No"]
188 create_color_func("PROMPT_CHOICE_DEFAULT"),
189 create_color_func("PROMPT_CHOICE_OTHER")
191 elif colours is None:
193 colours=(colours*len(responses))[:len(responses)]
197 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
198 for key in responses:
199 # An empty response will match the first value in responses.
200 if response.upper()==key[:len(response)].upper():
202 print "Sorry, response '%s' not understood." % response,
203 except (EOFError, KeyboardInterrupt):
207 actions = frozenset([
208 "clean", "config", "depclean",
209 "info", "list-sets", "metadata",
210 "prune", "regen", "search",
211 "sync", "unmerge", "version",
214 "--ask", "--alphabetical",
215 "--buildpkg", "--buildpkgonly",
216 "--changelog", "--columns",
221 "--fetchonly", "--fetch-all-uri",
222 "--getbinpkg", "--getbinpkgonly",
223 "--help", "--ignore-default-opts",
227 "--nodeps", "--noreplace",
228 "--nospinner", "--oneshot",
229 "--onlydeps", "--pretend",
230 "--quiet", "--resume",
231 "--searchdesc", "--selective",
235 "--usepkg", "--usepkgonly",
242 "b":"--buildpkg", "B":"--buildpkgonly",
243 "c":"--clean", "C":"--unmerge",
244 "d":"--debug", "D":"--deep",
246 "f":"--fetchonly", "F":"--fetch-all-uri",
247 "g":"--getbinpkg", "G":"--getbinpkgonly",
249 "k":"--usepkg", "K":"--usepkgonly",
251 "n":"--noreplace", "N":"--newuse",
252 "o":"--onlydeps", "O":"--nodeps",
253 "p":"--pretend", "P":"--prune",
255 "s":"--search", "S":"--searchdesc",
258 "v":"--verbose", "V":"--version"
261 _emerge_log_dir = '/var/log'
263 def emergelog(xterm_titles, mystr, short_msg=None):
264 if xterm_titles and short_msg:
265 if "HOSTNAME" in os.environ:
266 short_msg = os.environ["HOSTNAME"]+": "+short_msg
267 xtermTitle(short_msg)
269 file_path = os.path.join(_emerge_log_dir, 'emerge.log')
270 mylogfile = open(file_path, "a")
271 portage.util.apply_secpass_permissions(file_path,
272 uid=portage.portage_uid, gid=portage.portage_gid,
276 mylock = portage.locks.lockfile(mylogfile)
277 # seek because we may have gotten held up by the lock.
278 # if so, we may not be positioned at the end of the file.
280 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
284 portage.locks.unlockfile(mylock)
286 except (IOError,OSError,portage.exception.PortageException), e:
288 print >> sys.stderr, "emergelog():",e
290 def countdown(secs=5, doing="Starting"):
292 print ">>> Waiting",secs,"seconds before starting..."
293 print ">>> (Control-C to abort)...\n"+doing+" in: ",
297 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
302 # formats a size given in bytes nicely
303 def format_size(mysize):
304 if isinstance(mysize, basestring):
306 if 0 != mysize % 1024:
307 # Always round up to the next kB so that it doesn't show 0 kB when
308 # some small file still needs to be fetched.
309 mysize += 1024 - mysize % 1024
310 mystr=str(mysize/1024)
314 mystr=mystr[:mycount]+","+mystr[mycount:]
318 def getgccversion(chost):
321 return: the current in-use gcc version
324 gcc_ver_command = 'gcc -dumpversion'
325 gcc_ver_prefix = 'gcc-'
327 gcc_not_found_error = red(
328 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
329 "!!! to update the environment of this terminal and possibly\n" +
330 "!!! other terminals also.\n"
333 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
334 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
335 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
337 mystatus, myoutput = commands.getstatusoutput(
338 chost + "-" + gcc_ver_command)
339 if mystatus == os.EX_OK:
340 return gcc_ver_prefix + myoutput
342 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
343 if mystatus == os.EX_OK:
344 return gcc_ver_prefix + myoutput
346 portage.writemsg(gcc_not_found_error, noiselevel=-1)
347 return "[unavailable]"
349 def getportageversion(portdir, target_root, profile, chost, vardb):
350 profilever = "unavailable"
352 realpath = os.path.realpath(profile)
353 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
354 if realpath.startswith(basepath):
355 profilever = realpath[1 + len(basepath):]
358 profilever = "!" + os.readlink(profile)
361 del realpath, basepath
364 libclist = vardb.match("virtual/libc")
365 libclist += vardb.match("virtual/glibc")
366 libclist = portage.util.unique_array(libclist)
368 xs=portage.catpkgsplit(x)
370 libcver+=","+"-".join(xs[1:])
372 libcver="-".join(xs[1:])
374 libcver="unavailable"
376 gccver = getgccversion(chost)
377 unameout=platform.release()+" "+platform.machine()
379 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
381 def create_depgraph_params(myopts, myaction):
382 #configure emerge engine parameters
384 # self: include _this_ package regardless of if it is merged.
385 # selective: exclude the package if it is merged
386 # recurse: go into the dependencies
387 # deep: go into the dependencies of already merged packages
388 # empty: pretend nothing is merged
389 # complete: completely account for all known dependencies
390 # remove: build graph for use in removing packages
391 myparams = set(["recurse"])
393 if myaction == "remove":
394 myparams.add("remove")
395 myparams.add("complete")
398 if "--update" in myopts or \
399 "--newuse" in myopts or \
400 "--reinstall" in myopts or \
401 "--noreplace" in myopts:
402 myparams.add("selective")
403 if "--emptytree" in myopts:
404 myparams.add("empty")
405 myparams.discard("selective")
406 if "--nodeps" in myopts:
407 myparams.discard("recurse")
408 if "--deep" in myopts:
410 if "--complete-graph" in myopts:
411 myparams.add("complete")
414 # search functionality
415 class search(object):
426 def __init__(self, root_config, spinner, searchdesc,
427 verbose, usepkg, usepkgonly):
428 """Searches the available and installed packages for the supplied search key.
429 The list of available and installed packages is created at object instantiation.
430 This makes successive searches faster."""
431 self.settings = root_config.settings
432 self.vartree = root_config.trees["vartree"]
433 self.spinner = spinner
434 self.verbose = verbose
435 self.searchdesc = searchdesc
436 self.root_config = root_config
437 self.setconfig = root_config.setconfig
438 self.matches = {"pkg" : []}
443 self.portdb = fake_portdb
444 for attrib in ("aux_get", "cp_all",
445 "xmatch", "findname", "getFetchMap"):
446 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
450 portdb = root_config.trees["porttree"].dbapi
451 bindb = root_config.trees["bintree"].dbapi
452 vardb = root_config.trees["vartree"].dbapi
454 if not usepkgonly and portdb._have_root_eclass_dir:
455 self._dbs.append(portdb)
457 if (usepkg or usepkgonly) and bindb.cp_all():
458 self._dbs.append(bindb)
460 self._dbs.append(vardb)
461 self._portdb = portdb
466 cp_all.update(db.cp_all())
467 return list(sorted(cp_all))
469 def _aux_get(self, *args, **kwargs):
472 return db.aux_get(*args, **kwargs)
477 def _findname(self, *args, **kwargs):
479 if db is not self._portdb:
480 # We don't want findname to return anything
481 # unless it's an ebuild in a portage tree.
482 # Otherwise, it's already built and we don't
485 func = getattr(db, "findname", None)
487 value = func(*args, **kwargs)
492 def _getFetchMap(self, *args, **kwargs):
494 func = getattr(db, "getFetchMap", None)
496 value = func(*args, **kwargs)
501 def _visible(self, db, cpv, metadata):
502 installed = db is self.vartree.dbapi
503 built = installed or db is not self._portdb
506 pkg_type = "installed"
509 return visible(self.settings,
510 Package(type_name=pkg_type, root_config=self.root_config,
511 cpv=cpv, built=built, installed=installed, metadata=metadata))
513 def _xmatch(self, level, atom):
515 This method does not expand old-style virtuals because it
516 is restricted to returning matches for a single ${CATEGORY}/${PN}
517 and old-style virual matches unreliable for that when querying
518 multiple package databases. If necessary, old-style virtuals
519 can be performed on atoms prior to calling this method.
521 cp = portage.dep_getkey(atom)
522 if level == "match-all":
525 if hasattr(db, "xmatch"):
526 matches.update(db.xmatch(level, atom))
528 matches.update(db.match(atom))
529 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
530 db._cpv_sort_ascending(result)
531 elif level == "match-visible":
534 if hasattr(db, "xmatch"):
535 matches.update(db.xmatch(level, atom))
537 db_keys = list(db._aux_cache_keys)
538 for cpv in db.match(atom):
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
544 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
545 db._cpv_sort_ascending(result)
546 elif level == "bestmatch-visible":
549 if hasattr(db, "xmatch"):
550 cpv = db.xmatch("bestmatch-visible", atom)
551 if not cpv or portage.cpv_getkey(cpv) != cp:
553 if not result or cpv == portage.best([cpv, result]):
556 db_keys = Package.metadata_keys
557 # break out of this loop with highest visible
558 # match, checked in descending order
559 for cpv in reversed(db.match(atom)):
560 if portage.cpv_getkey(cpv) != cp:
562 metadata = izip(db_keys,
563 db.aux_get(cpv, db_keys))
564 if not self._visible(db, cpv, metadata):
566 if not result or cpv == portage.best([cpv, result]):
570 raise NotImplementedError(level)
573 def execute(self,searchkey):
574 """Performs the search for the supplied search key"""
576 self.searchkey=searchkey
577 self.packagematches = []
580 self.matches = {"pkg":[], "desc":[], "set":[]}
583 self.matches = {"pkg":[], "set":[]}
584 print "Searching... ",
587 if self.searchkey.startswith('%'):
589 self.searchkey = self.searchkey[1:]
590 if self.searchkey.startswith('@'):
592 self.searchkey = self.searchkey[1:]
594 self.searchre=re.compile(self.searchkey,re.I)
596 self.searchre=re.compile(re.escape(self.searchkey), re.I)
597 for package in self.portdb.cp_all():
598 self.spinner.update()
601 match_string = package[:]
603 match_string = package.split("/")[-1]
606 if self.searchre.search(match_string):
607 if not self.portdb.xmatch("match-visible", package):
609 self.matches["pkg"].append([package,masked])
610 elif self.searchdesc: # DESCRIPTION searching
611 full_package = self.portdb.xmatch("bestmatch-visible", package)
613 #no match found; we don't want to query description
614 full_package = portage.best(
615 self.portdb.xmatch("match-all", package))
621 full_desc = self.portdb.aux_get(
622 full_package, ["DESCRIPTION"])[0]
624 print "emerge: search: aux_get() failed, skipping"
626 if self.searchre.search(full_desc):
627 self.matches["desc"].append([full_package,masked])
629 self.sdict = self.setconfig.getSets()
630 for setname in self.sdict:
631 self.spinner.update()
633 match_string = setname
635 match_string = setname.split("/")[-1]
637 if self.searchre.search(match_string):
638 self.matches["set"].append([setname, False])
639 elif self.searchdesc:
640 if self.searchre.search(
641 self.sdict[setname].getMetadata("DESCRIPTION")):
642 self.matches["set"].append([setname, False])
645 for mtype in self.matches:
646 self.matches[mtype].sort()
647 self.mlen += len(self.matches[mtype])
650 if not self.portdb.xmatch("match-all", cp):
653 if not self.portdb.xmatch("bestmatch-visible", cp):
655 self.matches["pkg"].append([cp, masked])
659 """Outputs the results of the search."""
660 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
661 print "[ Applications found : "+white(str(self.mlen))+" ]"
663 vardb = self.vartree.dbapi
664 for mtype in self.matches:
665 for match,masked in self.matches[mtype]:
669 full_package = self.portdb.xmatch(
670 "bestmatch-visible", match)
672 #no match found; we don't want to query description
674 full_package = portage.best(
675 self.portdb.xmatch("match-all",match))
676 elif mtype == "desc":
678 match = portage.cpv_getkey(match)
680 print green("*")+" "+white(match)
681 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
685 desc, homepage, license = self.portdb.aux_get(
686 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
688 print "emerge: search: aux_get() failed, skipping"
691 print green("*")+" "+white(match)+" "+red("[ Masked ]")
693 print green("*")+" "+white(match)
694 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
698 mycat = match.split("/")[0]
699 mypkg = match.split("/")[1]
700 mycpv = match + "-" + myversion
701 myebuild = self.portdb.findname(mycpv)
703 pkgdir = os.path.dirname(myebuild)
704 from portage import manifest
705 mf = manifest.Manifest(
706 pkgdir, self.settings["DISTDIR"])
708 uri_map = self.portdb.getFetchMap(mycpv)
709 except portage.exception.InvalidDependString, e:
710 file_size_str = "Unknown (%s)" % (e,)
714 mysum[0] = mf.getDistfilesSize(uri_map)
716 file_size_str = "Unknown (missing " + \
717 "digest for %s)" % (e,)
722 if db is not vardb and \
723 db.cpv_exists(mycpv):
725 if not myebuild and hasattr(db, "bintree"):
726 myebuild = db.bintree.getname(mycpv)
728 mysum[0] = os.stat(myebuild).st_size
733 if myebuild and file_size_str is None:
734 mystr = str(mysum[0] / 1024)
738 mystr = mystr[:mycount] + "," + mystr[mycount:]
739 file_size_str = mystr + " kB"
743 print " ", darkgreen("Latest version available:"),myversion
744 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
747 (darkgreen("Size of files:"), file_size_str)
748 print " ", darkgreen("Homepage:")+" ",homepage
749 print " ", darkgreen("Description:")+" ",desc
750 print " ", darkgreen("License:")+" ",license
755 def getInstallationStatus(self,package):
756 installed_package = self.vartree.dep_bestmatch(package)
758 version = self.getVersion(installed_package,search.VERSION_RELEASE)
760 result = darkgreen("Latest version installed:")+" "+version
762 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
765 def getVersion(self,full_package,detail):
766 if len(full_package) > 1:
767 package_parts = portage.catpkgsplit(full_package)
768 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
769 result = package_parts[2]+ "-" + package_parts[3]
771 result = package_parts[2]
776 class RootConfig(object):
777 """This is used internally by depgraph to track information about a
781 "ebuild" : "porttree",
782 "binary" : "bintree",
783 "installed" : "vartree"
787 for k, v in pkg_tree_map.iteritems():
790 def __init__(self, settings, trees, setconfig):
792 self.settings = settings
793 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
794 self.root = self.settings["ROOT"]
795 self.setconfig = setconfig
796 if setconfig is None:
799 self.sets = self.setconfig.getSets()
800 self.visible_pkgs = PackageVirtualDbapi(self.settings)
802 def create_world_atom(pkg, args_set, root_config):
803 """Create a new atom for the world file if one does not exist. If the
804 argument atom is precise enough to identify a specific slot then a slot
805 atom will be returned. Atoms that are in the system set may also be stored
806 in world since system atoms can only match one slot while world atoms can
807 be greedy with respect to slots. Unslotted system packages will not be
810 arg_atom = args_set.findAtomForPackage(pkg)
813 cp = portage.dep_getkey(arg_atom)
815 sets = root_config.sets
816 portdb = root_config.trees["porttree"].dbapi
817 vardb = root_config.trees["vartree"].dbapi
818 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
819 for cpv in portdb.match(cp))
820 slotted = len(available_slots) > 1 or \
821 (len(available_slots) == 1 and "0" not in available_slots)
823 # check the vdb in case this is multislot
824 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
825 for cpv in vardb.match(cp))
826 slotted = len(available_slots) > 1 or \
827 (len(available_slots) == 1 and "0" not in available_slots)
828 if slotted and arg_atom != cp:
829 # If the user gave a specific atom, store it as a
830 # slot atom in the world file.
831 slot_atom = pkg.slot_atom
833 # For USE=multislot, there are a couple of cases to
836 # 1) SLOT="0", but the real SLOT spontaneously changed to some
837 # unknown value, so just record an unslotted atom.
839 # 2) SLOT comes from an installed package and there is no
840 # matching SLOT in the portage tree.
842 # Make sure that the slot atom is available in either the
843 # portdb or the vardb, since otherwise the user certainly
844 # doesn't want the SLOT atom recorded in the world file
845 # (case 1 above). If it's only available in the vardb,
846 # the user may be trying to prevent a USE=multislot
847 # package from being removed by --depclean (case 2 above).
850 if not portdb.match(slot_atom):
851 # SLOT seems to come from an installed multislot package
853 # If there is no installed package matching the SLOT atom,
854 # it probably changed SLOT spontaneously due to USE=multislot,
855 # so just record an unslotted atom.
856 if vardb.match(slot_atom):
857 # Now verify that the argument is precise
858 # enough to identify a specific slot.
859 matches = mydb.match(arg_atom)
860 matched_slots = set()
862 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
863 if len(matched_slots) == 1:
864 new_world_atom = slot_atom
866 if new_world_atom == sets["world"].findAtomForPackage(pkg):
867 # Both atoms would be identical, so there's nothing to add.
870 # Unlike world atoms, system atoms are not greedy for slots, so they
871 # can't be safely excluded from world if they are slotted.
872 system_atom = sets["system"].findAtomForPackage(pkg)
874 if not portage.dep_getkey(system_atom).startswith("virtual/"):
876 # System virtuals aren't safe to exclude from world since they can
877 # match multiple old-style virtuals but only one of them will be
878 # pulled in by update or depclean.
879 providers = portdb.mysettings.getvirtuals().get(
880 portage.dep_getkey(system_atom))
881 if providers and len(providers) == 1 and providers[0] == cp:
883 return new_world_atom
885 def filter_iuse_defaults(iuse):
887 if flag.startswith("+") or flag.startswith("-"):
892 def _find_deep_system_runtime_deps(graph):
893 deep_system_deps = set()
896 if not isinstance(node, Package) or \
897 node.operation == 'uninstall':
899 if node.root_config.sets['system'].findAtomForPackage(node):
900 node_stack.append(node)
902 def ignore_priority(priority):
904 Ignore non-runtime priorities.
906 if isinstance(priority, DepPriority) and \
907 (priority.runtime or priority.runtime_post):
912 node = node_stack.pop()
913 if node in deep_system_deps:
915 deep_system_deps.add(node)
916 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
917 if not isinstance(child, Package) or \
918 child.operation == 'uninstall':
920 node_stack.append(child)
922 return deep_system_deps
924 class FakeVartree(portage.vartree):
925 """This is implements an in-memory copy of a vartree instance that provides
926 all the interfaces required for use by the depgraph. The vardb is locked
927 during the constructor call just long enough to read a copy of the
928 installed package information. This allows the depgraph to do it's
929 dependency calculations without holding a lock on the vardb. It also
930 allows things like vardb global updates to be done in memory so that the
931 user doesn't necessarily need write access to the vardb in cases where
932 global updates are necessary (updates are performed when necessary if there
933 is not a matching ebuild in the tree)."""
934 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
935 self._root_config = root_config
936 if pkg_cache is None:
938 real_vartree = root_config.trees["vartree"]
939 portdb = root_config.trees["porttree"].dbapi
940 self.root = real_vartree.root
941 self.settings = real_vartree.settings
942 mykeys = list(real_vartree.dbapi._aux_cache_keys)
943 if "_mtime_" not in mykeys:
944 mykeys.append("_mtime_")
945 self._db_keys = mykeys
946 self._pkg_cache = pkg_cache
947 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
948 vdb_path = os.path.join(self.root, portage.VDB_PATH)
950 # At least the parent needs to exist for the lock file.
951 portage.util.ensure_dirs(vdb_path)
952 except portage.exception.PortageException:
956 if acquire_lock and os.access(vdb_path, os.W_OK):
957 vdb_lock = portage.locks.lockdir(vdb_path)
958 real_dbapi = real_vartree.dbapi
960 for cpv in real_dbapi.cpv_all():
961 cache_key = ("installed", self.root, cpv, "nomerge")
962 pkg = self._pkg_cache.get(cache_key)
964 metadata = pkg.metadata
966 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
967 myslot = metadata["SLOT"]
968 mycp = portage.dep_getkey(cpv)
969 myslot_atom = "%s:%s" % (mycp, myslot)
971 mycounter = long(metadata["COUNTER"])
974 metadata["COUNTER"] = str(mycounter)
975 other_counter = slot_counters.get(myslot_atom, None)
976 if other_counter is not None:
977 if other_counter > mycounter:
979 slot_counters[myslot_atom] = mycounter
981 pkg = Package(built=True, cpv=cpv,
982 installed=True, metadata=metadata,
983 root_config=root_config, type_name="installed")
984 self._pkg_cache[pkg] = pkg
985 self.dbapi.cpv_inject(pkg)
986 real_dbapi.flush_cache()
989 portage.locks.unlockdir(vdb_lock)
990 # Populate the old-style virtuals using the cached values.
991 if not self.settings.treeVirtuals:
992 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
993 portage.getCPFromCPV, self.get_all_provides())
995 # Intialize variables needed for lazy cache pulls of the live ebuild
996 # metadata. This ensures that the vardb lock is released ASAP, without
997 # being delayed in case cache generation is triggered.
998 self._aux_get = self.dbapi.aux_get
999 self.dbapi.aux_get = self._aux_get_wrapper
1000 self._match = self.dbapi.match
1001 self.dbapi.match = self._match_wrapper
1002 self._aux_get_history = set()
1003 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1004 self._portdb = portdb
1005 self._global_updates = None
1007 def _match_wrapper(self, cpv, use_cache=1):
1009 Make sure the metadata in Package instances gets updated for any
1010 cpv that is returned from a match() call, since the metadata can
1011 be accessed directly from the Package instance instead of via
1014 matches = self._match(cpv, use_cache=use_cache)
1016 if cpv in self._aux_get_history:
1018 self._aux_get_wrapper(cpv, [])
1021 def _aux_get_wrapper(self, pkg, wants):
1022 if pkg in self._aux_get_history:
1023 return self._aux_get(pkg, wants)
1024 self._aux_get_history.add(pkg)
1026 # Use the live ebuild metadata if possible.
1027 live_metadata = dict(izip(self._portdb_keys,
1028 self._portdb.aux_get(pkg, self._portdb_keys)))
1029 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1031 self.dbapi.aux_update(pkg, live_metadata)
1032 except (KeyError, portage.exception.PortageException):
1033 if self._global_updates is None:
1034 self._global_updates = \
1035 grab_global_updates(self._portdb.porttree_root)
1036 perform_global_updates(
1037 pkg, self.dbapi, self._global_updates)
1038 return self._aux_get(pkg, wants)
1040 def sync(self, acquire_lock=1):
1042 Call this method to synchronize state with the real vardb
1043 after one or more packages may have been installed or
1046 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1048 # At least the parent needs to exist for the lock file.
1049 portage.util.ensure_dirs(vdb_path)
1050 except portage.exception.PortageException:
1054 if acquire_lock and os.access(vdb_path, os.W_OK):
1055 vdb_lock = portage.locks.lockdir(vdb_path)
1059 portage.locks.unlockdir(vdb_lock)
1063 real_vardb = self._root_config.trees["vartree"].dbapi
1064 current_cpv_set = frozenset(real_vardb.cpv_all())
1065 pkg_vardb = self.dbapi
1066 aux_get_history = self._aux_get_history
1068 # Remove any packages that have been uninstalled.
1069 for pkg in list(pkg_vardb):
1070 if pkg.cpv not in current_cpv_set:
1071 pkg_vardb.cpv_remove(pkg)
1072 aux_get_history.discard(pkg.cpv)
1074 # Validate counters and timestamps.
1077 validation_keys = ["COUNTER", "_mtime_"]
1078 for cpv in current_cpv_set:
1080 pkg_hash_key = ("installed", root, cpv, "nomerge")
1081 pkg = pkg_vardb.get(pkg_hash_key)
1083 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1085 counter = long(counter)
1089 if counter != pkg.counter or \
1091 pkg_vardb.cpv_remove(pkg)
1092 aux_get_history.discard(pkg.cpv)
1096 pkg = self._pkg(cpv)
1098 other_counter = slot_counters.get(pkg.slot_atom)
1099 if other_counter is not None:
1100 if other_counter > pkg.counter:
1103 slot_counters[pkg.slot_atom] = pkg.counter
1104 pkg_vardb.cpv_inject(pkg)
1106 real_vardb.flush_cache()
1108 def _pkg(self, cpv):
1109 root_config = self._root_config
1110 real_vardb = root_config.trees["vartree"].dbapi
1111 pkg = Package(cpv=cpv, installed=True,
1112 metadata=izip(self._db_keys,
1113 real_vardb.aux_get(cpv, self._db_keys)),
1114 root_config=root_config,
1115 type_name="installed")
1118 mycounter = long(pkg.metadata["COUNTER"])
1121 pkg.metadata["COUNTER"] = str(mycounter)
1125 def grab_global_updates(portdir):
1126 from portage.update import grab_updates, parse_updates
1127 updpath = os.path.join(portdir, "profiles", "updates")
1129 rawupdates = grab_updates(updpath)
1130 except portage.exception.DirectoryNotFound:
1133 for mykey, mystat, mycontent in rawupdates:
1134 commands, errors = parse_updates(mycontent)
1135 upd_commands.extend(commands)
1138 def perform_global_updates(mycpv, mydb, mycommands):
1139 from portage.update import update_dbentries
1140 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1141 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1142 updates = update_dbentries(mycommands, aux_dict)
1144 mydb.aux_update(mycpv, updates)
1146 def visible(pkgsettings, pkg):
1148 Check if a package is visible. This can raise an InvalidDependString
1149 exception if LICENSE is invalid.
1150 TODO: optionally generate a list of masking reasons
1152 @returns: True if the package is visible, False otherwise.
1154 if not pkg.metadata["SLOT"]:
1156 if not pkg.installed:
1157 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1159 eapi = pkg.metadata["EAPI"]
1160 if not portage.eapi_is_supported(eapi):
1162 if not pkg.installed:
1163 if portage._eapi_is_deprecated(eapi):
1165 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1167 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1169 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1172 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1174 except portage.exception.InvalidDependString:
1178 def get_masking_status(pkg, pkgsettings, root_config):
1180 mreasons = portage.getmaskingstatus(
1181 pkg, settings=pkgsettings,
1182 portdb=root_config.trees["porttree"].dbapi)
1184 if not pkg.installed:
1185 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1186 mreasons.append("CHOST: %s" % \
1187 pkg.metadata["CHOST"])
1189 if not pkg.metadata["SLOT"]:
1190 mreasons.append("invalid: SLOT is undefined")
1194 def get_mask_info(root_config, cpv, pkgsettings,
1195 db, pkg_type, built, installed, db_keys):
1198 metadata = dict(izip(db_keys,
1199 db.aux_get(cpv, db_keys)))
1202 if metadata and not built:
1203 pkgsettings.setcpv(cpv, mydb=metadata)
1204 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1205 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1206 if metadata is None:
1207 mreasons = ["corruption"]
1209 eapi = metadata['EAPI']
1212 if not portage.eapi_is_supported(eapi):
1213 mreasons = ['EAPI %s' % eapi]
1215 pkg = Package(type_name=pkg_type, root_config=root_config,
1216 cpv=cpv, built=built, installed=installed, metadata=metadata)
1217 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1218 return metadata, mreasons
1220 def show_masked_packages(masked_packages):
1221 shown_licenses = set()
1222 shown_comments = set()
1223 # Maybe there is both an ebuild and a binary. Only
1224 # show one of them to avoid redundant appearance.
1226 have_eapi_mask = False
1227 for (root_config, pkgsettings, cpv,
1228 metadata, mreasons) in masked_packages:
1229 if cpv in shown_cpvs:
1232 comment, filename = None, None
1233 if "package.mask" in mreasons:
1234 comment, filename = \
1235 portage.getmaskingreason(
1236 cpv, metadata=metadata,
1237 settings=pkgsettings,
1238 portdb=root_config.trees["porttree"].dbapi,
1239 return_location=True)
1240 missing_licenses = []
1242 if not portage.eapi_is_supported(metadata["EAPI"]):
1243 have_eapi_mask = True
1245 missing_licenses = \
1246 pkgsettings._getMissingLicenses(
1248 except portage.exception.InvalidDependString:
1249 # This will have already been reported
1250 # above via mreasons.
1253 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1254 if comment and comment not in shown_comments:
1257 shown_comments.add(comment)
1258 portdb = root_config.trees["porttree"].dbapi
1259 for l in missing_licenses:
1260 l_path = portdb.findLicensePath(l)
1261 if l in shown_licenses:
1263 msg = ("A copy of the '%s' license" + \
1264 " is located at '%s'.") % (l, l_path)
1267 shown_licenses.add(l)
1268 return have_eapi_mask
1270 class Package(Task):
1272 __hash__ = Task.__hash__
1273 __slots__ = ("built", "cpv", "depth",
1274 "installed", "metadata", "onlydeps", "operation",
1275 "root_config", "type_name",
1276 "category", "counter", "cp", "cpv_split",
1277 "inherited", "iuse", "mtime",
1278 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1281 "CHOST", "COUNTER", "DEPEND", "EAPI",
1282 "INHERITED", "IUSE", "KEYWORDS",
1283 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1284 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1286 def __init__(self, **kwargs):
1287 Task.__init__(self, **kwargs)
1288 self.root = self.root_config.root
1289 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1290 self.cp = portage.cpv_getkey(self.cpv)
1293 # Avoid an InvalidAtom exception when creating slot_atom.
1294 # This package instance will be masked due to empty SLOT.
1296 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1297 self.category, self.pf = portage.catsplit(self.cpv)
1298 self.cpv_split = portage.catpkgsplit(self.cpv)
1299 self.pv_split = self.cpv_split[1:]
1303 __slots__ = ("__weakref__", "enabled")
1305 def __init__(self, use):
1306 self.enabled = frozenset(use)
1308 class _iuse(object):
1310 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1312 def __init__(self, tokens, iuse_implicit):
1313 self.tokens = tuple(tokens)
1314 self.iuse_implicit = iuse_implicit
1321 enabled.append(x[1:])
1323 disabled.append(x[1:])
1326 self.enabled = frozenset(enabled)
1327 self.disabled = frozenset(disabled)
1328 self.all = frozenset(chain(enabled, disabled, other))
1330 def __getattribute__(self, name):
1333 return object.__getattribute__(self, "regex")
1334 except AttributeError:
1335 all = object.__getattribute__(self, "all")
1336 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1337 # Escape anything except ".*" which is supposed
1338 # to pass through from _get_implicit_iuse()
1339 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1340 regex = "^(%s)$" % "|".join(regex)
1341 regex = regex.replace("\\.\\*", ".*")
1342 self.regex = re.compile(regex)
1343 return object.__getattribute__(self, name)
1345 def _get_hash_key(self):
1346 hash_key = getattr(self, "_hash_key", None)
1347 if hash_key is None:
1348 if self.operation is None:
1349 self.operation = "merge"
1350 if self.onlydeps or self.installed:
1351 self.operation = "nomerge"
1353 (self.type_name, self.root, self.cpv, self.operation)
1354 return self._hash_key
1356 def __lt__(self, other):
1357 if other.cp != self.cp:
1359 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1363 def __le__(self, other):
1364 if other.cp != self.cp:
1366 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1370 def __gt__(self, other):
1371 if other.cp != self.cp:
1373 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1377 def __ge__(self, other):
1378 if other.cp != self.cp:
1380 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1384 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1385 if not x.startswith("UNUSED_"))
1386 _all_metadata_keys.discard("CDEPEND")
1387 _all_metadata_keys.update(Package.metadata_keys)
1389 from portage.cache.mappings import slot_dict_class
1390 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1392 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1394 Detect metadata updates and synchronize Package attributes.
1397 __slots__ = ("_pkg",)
1398 _wrapped_keys = frozenset(
1399 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1401 def __init__(self, pkg, metadata):
1402 _PackageMetadataWrapperBase.__init__(self)
1404 self.update(metadata)
1406 def __setitem__(self, k, v):
1407 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1408 if k in self._wrapped_keys:
1409 getattr(self, "_set_" + k.lower())(k, v)
1411 def _set_inherited(self, k, v):
1412 if isinstance(v, basestring):
1413 v = frozenset(v.split())
1414 self._pkg.inherited = v
1416 def _set_iuse(self, k, v):
1417 self._pkg.iuse = self._pkg._iuse(
1418 v.split(), self._pkg.root_config.iuse_implicit)
1420 def _set_slot(self, k, v):
1423 def _set_use(self, k, v):
1424 self._pkg.use = self._pkg._use(v.split())
1426 def _set_counter(self, k, v):
1427 if isinstance(v, basestring):
1432 self._pkg.counter = v
1434 def _set__mtime_(self, k, v):
1435 if isinstance(v, basestring):
1442 class PackageUninstall(AsynchronousTask):
1444 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
1448 unmerge(self.pkg.root_config, self.opts, "unmerge",
1449 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
1450 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
1451 writemsg_level=self._writemsg_level)
1452 except UninstallFailure, e:
1453 self.returncode = e.status
1455 self.returncode = os.EX_OK
1458 def _writemsg_level(self, msg, level=0, noiselevel=0):
1460 log_path = self.settings.get("PORTAGE_LOG_FILE")
1461 background = self.background
1463 if log_path is None:
1464 if not (background and level < logging.WARNING):
1465 portage.util.writemsg_level(msg,
1466 level=level, noiselevel=noiselevel)
1469 portage.util.writemsg_level(msg,
1470 level=level, noiselevel=noiselevel)
1472 f = open(log_path, 'a')
1478 class MergeListItem(CompositeTask):
1481 TODO: For parallel scheduling, everything here needs asynchronous
1482 execution support (start, poll, and wait methods).
1485 __slots__ = ("args_set",
1486 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
1487 "find_blockers", "logger", "mtimedb", "pkg",
1488 "pkg_count", "pkg_to_replace", "prefetcher",
1489 "settings", "statusMessage", "world_atom") + \
1495 build_opts = self.build_opts
1498 # uninstall, executed by self.merge()
1499 self.returncode = os.EX_OK
1503 args_set = self.args_set
1504 find_blockers = self.find_blockers
1505 logger = self.logger
1506 mtimedb = self.mtimedb
1507 pkg_count = self.pkg_count
1508 scheduler = self.scheduler
1509 settings = self.settings
1510 world_atom = self.world_atom
1511 ldpath_mtimes = mtimedb["ldpath"]
1513 action_desc = "Emerging"
1515 if pkg.type_name == "binary":
1516 action_desc += " binary"
1518 if build_opts.fetchonly:
1519 action_desc = "Fetching"
1521 msg = "%s (%s of %s) %s" % \
1523 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
1524 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
1525 colorize("GOOD", pkg.cpv))
1527 portdb = pkg.root_config.trees["porttree"].dbapi
1528 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
1529 if portdir_repo_name:
1530 pkg_repo_name = pkg.metadata.get("repository")
1531 if pkg_repo_name != portdir_repo_name:
1532 if not pkg_repo_name:
1533 pkg_repo_name = "unknown repo"
1534 msg += " from %s" % pkg_repo_name
1537 msg += " %s %s" % (preposition, pkg.root)
1539 if not build_opts.pretend:
1540 self.statusMessage(msg)
1541 logger.log(" >>> emerge (%s of %s) %s to %s" % \
1542 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
1544 if pkg.type_name == "ebuild":
1546 build = EbuildBuild(args_set=args_set,
1547 background=self.background,
1548 config_pool=self.config_pool,
1549 find_blockers=find_blockers,
1550 ldpath_mtimes=ldpath_mtimes, logger=logger,
1551 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
1552 prefetcher=self.prefetcher, scheduler=scheduler,
1553 settings=settings, world_atom=world_atom)
1555 self._install_task = build
1556 self._start_task(build, self._default_final_exit)
1559 elif pkg.type_name == "binary":
1561 binpkg = Binpkg(background=self.background,
1562 find_blockers=find_blockers,
1563 ldpath_mtimes=ldpath_mtimes, logger=logger,
1564 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
1565 prefetcher=self.prefetcher, settings=settings,
1566 scheduler=scheduler, world_atom=world_atom)
1568 self._install_task = binpkg
1569 self._start_task(binpkg, self._default_final_exit)
1573 self._install_task.poll()
1574 return self.returncode
1577 self._install_task.wait()
1578 return self.returncode
1583 build_opts = self.build_opts
1584 find_blockers = self.find_blockers
1585 logger = self.logger
1586 mtimedb = self.mtimedb
1587 pkg_count = self.pkg_count
1588 prefetcher = self.prefetcher
1589 scheduler = self.scheduler
1590 settings = self.settings
1591 world_atom = self.world_atom
1592 ldpath_mtimes = mtimedb["ldpath"]
1595 if not (build_opts.buildpkgonly or \
1596 build_opts.fetchonly or build_opts.pretend):
1598 uninstall = PackageUninstall(background=self.background,
1599 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
1600 pkg=pkg, scheduler=scheduler, settings=settings)
1603 retval = uninstall.wait()
1604 if retval != os.EX_OK:
1608 if build_opts.fetchonly or \
1609 build_opts.buildpkgonly:
1610 return self.returncode
1612 retval = self._install_task.install()
1615 class BlockerDB(object):
1617 def __init__(self, root_config):
1618 self._root_config = root_config
1619 self._vartree = root_config.trees["vartree"]
1620 self._portdb = root_config.trees["porttree"].dbapi
1622 self._dep_check_trees = None
1623 self._fake_vartree = None
1625 def _get_fake_vartree(self, acquire_lock=0):
1626 fake_vartree = self._fake_vartree
1627 if fake_vartree is None:
1628 fake_vartree = FakeVartree(self._root_config,
1629 acquire_lock=acquire_lock)
1630 self._fake_vartree = fake_vartree
1631 self._dep_check_trees = { self._vartree.root : {
1632 "porttree" : fake_vartree,
1633 "vartree" : fake_vartree,
1636 fake_vartree.sync(acquire_lock=acquire_lock)
1639 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
1640 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
1641 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1642 settings = self._vartree.settings
1643 stale_cache = set(blocker_cache)
1644 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
1645 dep_check_trees = self._dep_check_trees
1646 vardb = fake_vartree.dbapi
1647 installed_pkgs = list(vardb)
1649 for inst_pkg in installed_pkgs:
1650 stale_cache.discard(inst_pkg.cpv)
1651 cached_blockers = blocker_cache.get(inst_pkg.cpv)
1652 if cached_blockers is not None and \
1653 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
1654 cached_blockers = None
1655 if cached_blockers is not None:
1656 blocker_atoms = cached_blockers.atoms
1658 # Use aux_get() to trigger FakeVartree global
1659 # updates on *DEPEND when appropriate.
1660 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
1662 portage.dep._dep_check_strict = False
1663 success, atoms = portage.dep_check(depstr,
1664 vardb, settings, myuse=inst_pkg.use.enabled,
1665 trees=dep_check_trees, myroot=inst_pkg.root)
1667 portage.dep._dep_check_strict = True
1669 pkg_location = os.path.join(inst_pkg.root,
1670 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
1671 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
1672 (pkg_location, atoms), noiselevel=-1)
1675 blocker_atoms = [atom for atom in atoms \
1676 if atom.startswith("!")]
1677 blocker_atoms.sort()
1678 counter = long(inst_pkg.metadata["COUNTER"])
1679 blocker_cache[inst_pkg.cpv] = \
1680 blocker_cache.BlockerData(counter, blocker_atoms)
1681 for cpv in stale_cache:
1682 del blocker_cache[cpv]
1683 blocker_cache.flush()
1685 blocker_parents = digraph()
1687 for pkg in installed_pkgs:
1688 for blocker_atom in blocker_cache[pkg.cpv].atoms:
1689 blocker_atom = blocker_atom.lstrip("!")
1690 blocker_atoms.append(blocker_atom)
1691 blocker_parents.add(blocker_atom, pkg)
1693 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1694 blocking_pkgs = set()
1695 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
1696 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
1698 # Check for blockers in the other direction.
1699 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
1701 portage.dep._dep_check_strict = False
1702 success, atoms = portage.dep_check(depstr,
1703 vardb, settings, myuse=new_pkg.use.enabled,
1704 trees=dep_check_trees, myroot=new_pkg.root)
1706 portage.dep._dep_check_strict = True
1708 # We should never get this far with invalid deps.
1709 show_invalid_depstring_notice(new_pkg, depstr, atoms)
1712 blocker_atoms = [atom.lstrip("!") for atom in atoms \
1715 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1716 for inst_pkg in installed_pkgs:
1718 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
1719 except (portage.exception.InvalidDependString, StopIteration):
1721 blocking_pkgs.add(inst_pkg)
1723 return blocking_pkgs
1725 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1727 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
1728 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
1729 p_type, p_root, p_key, p_status = parent_node
1731 if p_status == "nomerge":
1732 category, pf = portage.catsplit(p_key)
1733 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1734 msg.append("Portage is unable to process the dependencies of the ")
1735 msg.append("'%s' package. " % p_key)
1736 msg.append("In order to correct this problem, the package ")
1737 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1738 msg.append("As a temporary workaround, the --nodeps option can ")
1739 msg.append("be used to ignore all dependencies. For reference, ")
1740 msg.append("the problematic dependencies can be found in the ")
1741 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1743 msg.append("This package can not be installed. ")
1744 msg.append("Please notify the '%s' package maintainer " % p_key)
1745 msg.append("about this problem.")
1747 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
1748 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
1750 class depgraph(object):
1752 pkg_tree_map = RootConfig.pkg_tree_map
1754 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1756 def __init__(self, settings, trees, myopts, myparams, spinner):
1757 self.settings = settings
1758 self.target_root = settings["ROOT"]
1759 self.myopts = myopts
1760 self.myparams = myparams
1762 if settings.get("PORTAGE_DEBUG", "") == "1":
1764 self.spinner = spinner
1765 self._running_root = trees["/"]["root_config"]
1766 self._opts_no_restart = Scheduler._opts_no_restart
1767 self.pkgsettings = {}
1768 # Maps slot atom to package for each Package added to the graph.
1769 self._slot_pkg_map = {}
1770 # Maps nodes to the reasons they were selected for reinstallation.
1771 self._reinstall_nodes = {}
1774 self._trees_orig = trees
1776 # Contains a filtered view of preferred packages that are selected
1777 # from available repositories.
1778 self._filtered_trees = {}
1779 # Contains installed packages and new packages that have been added
1781 self._graph_trees = {}
1782 # All Package instances
1783 self._pkg_cache = {}
1784 for myroot in trees:
1785 self.trees[myroot] = {}
1786 # Create a RootConfig instance that references
1787 # the FakeVartree instead of the real one.
1788 self.roots[myroot] = RootConfig(
1789 trees[myroot]["vartree"].settings,
1791 trees[myroot]["root_config"].setconfig)
1792 for tree in ("porttree", "bintree"):
1793 self.trees[myroot][tree] = trees[myroot][tree]
1794 self.trees[myroot]["vartree"] = \
1795 FakeVartree(trees[myroot]["root_config"],
1796 pkg_cache=self._pkg_cache)
1797 self.pkgsettings[myroot] = portage.config(
1798 clone=self.trees[myroot]["vartree"].settings)
1799 self._slot_pkg_map[myroot] = {}
1800 vardb = self.trees[myroot]["vartree"].dbapi
1801 preload_installed_pkgs = "--nodeps" not in self.myopts and \
1802 "--buildpkgonly" not in self.myopts
1803 # This fakedbapi instance will model the state that the vdb will
1804 # have after new packages have been installed.
1805 fakedb = PackageVirtualDbapi(vardb.settings)
1806 if preload_installed_pkgs:
1808 self.spinner.update()
1809 # This triggers metadata updates via FakeVartree.
1810 vardb.aux_get(pkg.cpv, [])
1811 fakedb.cpv_inject(pkg)
1813 # Now that the vardb state is cached in our FakeVartree,
1814 # we won't be needing the real vartree cache for awhile.
1815 # To make some room on the heap, clear the vardbapi
1817 trees[myroot]["vartree"].dbapi._clear_cache()
1820 self.mydbapi[myroot] = fakedb
1823 graph_tree.dbapi = fakedb
1824 self._graph_trees[myroot] = {}
1825 self._filtered_trees[myroot] = {}
1826 # Substitute the graph tree for the vartree in dep_check() since we
1827 # want atom selections to be consistent with package selections
1828 # have already been made.
1829 self._graph_trees[myroot]["porttree"] = graph_tree
1830 self._graph_trees[myroot]["vartree"] = graph_tree
1831 def filtered_tree():
1833 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1834 self._filtered_trees[myroot]["porttree"] = filtered_tree
1836 # Passing in graph_tree as the vartree here could lead to better
1837 # atom selections in some cases by causing atoms for packages that
1838 # have been added to the graph to be preferred over other choices.
1839 # However, it can trigger atom selections that result in
1840 # unresolvable direct circular dependencies. For example, this
1841 # happens with gwydion-dylan which depends on either itself or
1842 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1843 # gwydion-dylan-bin needs to be selected in order to avoid a
1844 # an unresolvable direct circular dependency.
1846 # To solve the problem described above, pass in "graph_db" so that
1847 # packages that have been added to the graph are distinguishable
1848 # from other available packages and installed packages. Also, pass
1849 # the parent package into self._select_atoms() calls so that
1850 # unresolvable direct circular dependencies can be detected and
1851 # avoided when possible.
1852 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1853 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1856 portdb = self.trees[myroot]["porttree"].dbapi
1857 bindb = self.trees[myroot]["bintree"].dbapi
1858 vardb = self.trees[myroot]["vartree"].dbapi
1859 # (db, pkg_type, built, installed, db_keys)
1860 if "--usepkgonly" not in self.myopts:
1861 db_keys = list(portdb._aux_cache_keys)
1862 dbs.append((portdb, "ebuild", False, False, db_keys))
1863 if "--usepkg" in self.myopts:
1864 db_keys = list(bindb._aux_cache_keys)
1865 dbs.append((bindb, "binary", True, False, db_keys))
1866 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
1867 dbs.append((vardb, "installed", True, True, db_keys))
1868 self._filtered_trees[myroot]["dbs"] = dbs
1869 if "--usepkg" in self.myopts:
1870 self.trees[myroot]["bintree"].populate(
1871 "--getbinpkg" in self.myopts,
1872 "--getbinpkgonly" in self.myopts)
1875 self.digraph=portage.digraph()
1876 # contains all sets added to the graph
1878 # contains atoms given as arguments
1879 self._sets["args"] = InternalPackageSet()
1880 # contains all atoms from all sets added to the graph, including
1881 # atoms given as arguments
1882 self._set_atoms = InternalPackageSet()
1883 self._atom_arg_map = {}
1884 # contains all nodes pulled in by self._set_atoms
1885 self._set_nodes = set()
1886 # Contains only Blocker -> Uninstall edges
1887 self._blocker_uninstalls = digraph()
1888 # Contains only Package -> Blocker edges
1889 self._blocker_parents = digraph()
1890 # Contains only irrelevant Package -> Blocker edges
1891 self._irrelevant_blockers = digraph()
1892 # Contains only unsolvable Package -> Blocker edges
1893 self._unsolvable_blockers = digraph()
1894 # Contains all Blocker -> Blocked Package edges
1895 self._blocked_pkgs = digraph()
1896 # Contains world packages that have been protected from
1897 # uninstallation but may not have been added to the graph
1898 # if the graph is not complete yet.
1899 self._blocked_world_pkgs = {}
1900 self._slot_collision_info = {}
1901 # Slot collision nodes are not allowed to block other packages since
1902 # blocker validation is only able to account for one package per slot.
1903 self._slot_collision_nodes = set()
1904 self._parent_atoms = {}
1905 self._slot_conflict_parent_atoms = set()
1906 self._serialized_tasks_cache = None
1907 self._scheduler_graph = None
1908 self._displayed_list = None
1909 self._pprovided_args = []
1910 self._missing_args = []
1911 self._masked_installed = set()
1912 self._unsatisfied_deps_for_display = []
1913 self._unsatisfied_blockers_for_display = None
1914 self._circular_deps_for_display = None
1915 self._dep_stack = []
1916 self._dep_disjunctive_stack = []
1917 self._unsatisfied_deps = []
1918 self._initially_unsatisfied_deps = []
1919 self._ignored_deps = []
1920 self._required_set_names = set(["system", "world"])
1921 self._select_atoms = self._select_atoms_highest_available
1922 self._select_package = self._select_pkg_highest_available
1923 self._highest_pkg_cache = {}
1925 def _show_slot_collision_notice(self):
1926 """Show an informational message advising the user to mask one of the
1927 the packages. In some cases it may be possible to resolve this
1928 automatically, but support for backtracking (removal nodes that have
1929 already been selected) will be required in order to handle all possible
1933 if not self._slot_collision_info:
1936 self._show_merge_list()
1939 msg.append("\n!!! Multiple package instances within a single " + \
1940 "package slot have been pulled\n")
1941 msg.append("!!! into the dependency graph, resulting" + \
1942 " in a slot conflict:\n\n")
1944 # Max number of parents shown, to avoid flooding the display.
1946 explanation_columns = 70
1948 for (slot_atom, root), slot_nodes \
1949 in self._slot_collision_info.iteritems():
1950 msg.append(str(slot_atom))
1953 for node in slot_nodes:
1955 msg.append(str(node))
1956 parent_atoms = self._parent_atoms.get(node)
1959 # Prefer conflict atoms over others.
1960 for parent_atom in parent_atoms:
1961 if len(pruned_list) >= max_parents:
1963 if parent_atom in self._slot_conflict_parent_atoms:
1964 pruned_list.add(parent_atom)
1966 # If this package was pulled in by conflict atoms then
1967 # show those alone since those are the most interesting.
1969 # When generating the pruned list, prefer instances
1970 # of DependencyArg over instances of Package.
1971 for parent_atom in parent_atoms:
1972 if len(pruned_list) >= max_parents:
1974 parent, atom = parent_atom
1975 if isinstance(parent, DependencyArg):
1976 pruned_list.add(parent_atom)
1977 # Prefer Packages instances that themselves have been
1978 # pulled into collision slots.
1979 for parent_atom in parent_atoms:
1980 if len(pruned_list) >= max_parents:
1982 parent, atom = parent_atom
1983 if isinstance(parent, Package) and \
1984 (parent.slot_atom, parent.root) \
1985 in self._slot_collision_info:
1986 pruned_list.add(parent_atom)
1987 for parent_atom in parent_atoms:
1988 if len(pruned_list) >= max_parents:
1990 pruned_list.add(parent_atom)
1991 omitted_parents = len(parent_atoms) - len(pruned_list)
1992 parent_atoms = pruned_list
1993 msg.append(" pulled in by\n")
1994 for parent_atom in parent_atoms:
1995 parent, atom = parent_atom
1996 msg.append(2*indent)
1997 if isinstance(parent,
1998 (PackageArg, AtomArg)):
1999 # For PackageArg and AtomArg types, it's
2000 # redundant to display the atom attribute.
2001 msg.append(str(parent))
2003 # Display the specific atom from SetArg or
2005 msg.append("%s required by %s" % (atom, parent))
2008 msg.append(2*indent)
2009 msg.append("(and %d more)\n" % omitted_parents)
2011 msg.append(" (no parents)\n")
2013 explanation = self._slot_conflict_explanation(slot_nodes)
2016 msg.append(indent + "Explanation:\n\n")
2017 for line in textwrap.wrap(explanation, explanation_columns):
2018 msg.append(2*indent + line + "\n")
2021 sys.stderr.write("".join(msg))
2024 explanations_for_all = explanations == len(self._slot_collision_info)
2026 if explanations_for_all or "--quiet" in self.myopts:
2030 msg.append("It may be possible to solve this problem ")
2031 msg.append("by using package.mask to prevent one of ")
2032 msg.append("those packages from being selected. ")
2033 msg.append("However, it is also possible that conflicting ")
2034 msg.append("dependencies exist such that they are impossible to ")
2035 msg.append("satisfy simultaneously. If such a conflict exists in ")
2036 msg.append("the dependencies of two different packages, then those ")
2037 msg.append("packages can not be installed simultaneously.")
2039 from formatter import AbstractFormatter, DumbWriter
2040 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
2042 f.add_flowing_data(x)
2046 msg.append("For more information, see MASKED PACKAGES ")
2047 msg.append("section in the emerge man page or refer ")
2048 msg.append("to the Gentoo Handbook.")
2050 f.add_flowing_data(x)
2054 def _slot_conflict_explanation(self, slot_nodes):
2056 When a slot conflict occurs due to USE deps, there are a few
2057 different cases to consider:
2059 1) New USE are correctly set but --newuse wasn't requested so an
2060 installed package with incorrect USE happened to get pulled
2061 into graph before the new one.
2063 2) New USE are incorrectly set but an installed package has correct
2064 USE so it got pulled into the graph, and a new instance also got
2065 pulled in due to --newuse or an upgrade.
2067 3) Multiple USE deps exist that can't be satisfied simultaneously,
2068 and multiple package instances got pulled into the same slot to
2069 satisfy the conflicting deps.
2071 Currently, explanations and suggested courses of action are generated
2072 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
2075 if len(slot_nodes) != 2:
2076 # Suggestions are only implemented for
2077 # conflicts between two packages.
2080 all_conflict_atoms = self._slot_conflict_parent_atoms
2082 matched_atoms = None
2083 unmatched_node = None
2084 for node in slot_nodes:
2085 parent_atoms = self._parent_atoms.get(node)
2086 if not parent_atoms:
2087 # Normally, there are always parent atoms. If there are
2088 # none then something unexpected is happening and there's
2089 # currently no suggestion for this case.
2091 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
2092 for parent_atom in conflict_atoms:
2093 parent, atom = parent_atom
2095 # Suggestions are currently only implemented for cases
2096 # in which all conflict atoms have USE deps.
2099 if matched_node is not None:
2100 # If conflict atoms match multiple nodes
2101 # then there's no suggestion.
2104 matched_atoms = conflict_atoms
2106 if unmatched_node is not None:
2107 # Neither node is matched by conflict atoms, and
2108 # there is no suggestion for this case.
2110 unmatched_node = node
2112 if matched_node is None or unmatched_node is None:
2113 # This shouldn't happen.
2116 if unmatched_node.installed and not matched_node.installed and \
2117 unmatched_node.cpv == matched_node.cpv:
2118 # If the conflicting packages are the same version then
2119 # --newuse should be all that's needed. If they are different
2120 # versions then there's some other problem.
2121 return "New USE are correctly set, but --newuse wasn't" + \
2122 " requested, so an installed package with incorrect USE " + \
2123 "happened to get pulled into the dependency graph. " + \
2124 "In order to solve " + \
2125 "this, either specify the --newuse option or explicitly " + \
2126 " reinstall '%s'." % matched_node.slot_atom
2128 if matched_node.installed and not unmatched_node.installed:
2129 atoms = sorted(set(atom for parent, atom in matched_atoms))
2130 explanation = ("New USE for '%s' are incorrectly set. " + \
2131 "In order to solve this, adjust USE to satisfy '%s'") % \
2132 (matched_node.slot_atom, atoms[0])
2134 for atom in atoms[1:-1]:
2135 explanation += ", '%s'" % (atom,)
2138 explanation += " and '%s'" % (atoms[-1],)
2144 def _process_slot_conflicts(self):
2146 Process slot conflict data to identify specific atoms which
2147 lead to conflict. These atoms only match a subset of the
2148 packages that have been pulled into a given slot.
2150 for (slot_atom, root), slot_nodes \
2151 in self._slot_collision_info.iteritems():
2153 all_parent_atoms = set()
2154 for pkg in slot_nodes:
2155 parent_atoms = self._parent_atoms.get(pkg)
2156 if not parent_atoms:
2158 all_parent_atoms.update(parent_atoms)
2160 for pkg in slot_nodes:
2161 parent_atoms = self._parent_atoms.get(pkg)
2162 if parent_atoms is None:
2163 parent_atoms = set()
2164 self._parent_atoms[pkg] = parent_atoms
2165 for parent_atom in all_parent_atoms:
2166 if parent_atom in parent_atoms:
2168 # Use package set for matching since it will match via
2169 # PROVIDE when necessary, while match_from_list does not.
2170 parent, atom = parent_atom
2171 atom_set = InternalPackageSet(
2172 initial_atoms=(atom,))
2173 if atom_set.findAtomForPackage(pkg):
2174 parent_atoms.add(parent_atom)
2176 self._slot_conflict_parent_atoms.add(parent_atom)
2178 def _reinstall_for_flags(self, forced_flags,
2179 orig_use, orig_iuse, cur_use, cur_iuse):
2180 """Return a set of flags that trigger reinstallation, or None if there
2181 are no such flags."""
2182 if "--newuse" in self.myopts:
2183 flags = set(orig_iuse.symmetric_difference(
2184 cur_iuse).difference(forced_flags))
2185 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2186 cur_iuse.intersection(cur_use)))
2189 elif "changed-use" == self.myopts.get("--reinstall"):
2190 flags = orig_iuse.intersection(orig_use).symmetric_difference(
2191 cur_iuse.intersection(cur_use))
2196 def _create_graph(self, allow_unsatisfied=False):
2197 dep_stack = self._dep_stack
2198 dep_disjunctive_stack = self._dep_disjunctive_stack
2199 while dep_stack or dep_disjunctive_stack:
2200 self.spinner.update()
2202 dep = dep_stack.pop()
2203 if isinstance(dep, Package):
2204 if not self._add_pkg_deps(dep,
2205 allow_unsatisfied=allow_unsatisfied):
2208 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2210 if dep_disjunctive_stack:
2211 if not self._pop_disjunction(allow_unsatisfied):
2215 def _add_dep(self, dep, allow_unsatisfied=False):
2216 debug = "--debug" in self.myopts
2217 buildpkgonly = "--buildpkgonly" in self.myopts
2218 nodeps = "--nodeps" in self.myopts
2219 empty = "empty" in self.myparams
2220 deep = "deep" in self.myparams
2221 update = "--update" in self.myopts and dep.depth <= 1
2223 if not buildpkgonly and \
2225 dep.parent not in self._slot_collision_nodes:
2226 if dep.parent.onlydeps:
2227 # It's safe to ignore blockers if the
2228 # parent is an --onlydeps node.
2230 # The blocker applies to the root where
2231 # the parent is or will be installed.
2232 blocker = Blocker(atom=dep.atom,
2233 eapi=dep.parent.metadata["EAPI"],
2234 root=dep.parent.root)
2235 self._blocker_parents.add(blocker, dep.parent)
2237 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2238 onlydeps=dep.onlydeps)
2240 if dep.priority.optional:
2241 # This could be an unecessary build-time dep
2242 # pulled in by --with-bdeps=y.
2244 if allow_unsatisfied:
2245 self._unsatisfied_deps.append(dep)
2247 self._unsatisfied_deps_for_display.append(
2248 ((dep.root, dep.atom), {"myparent":dep.parent}))
2250 # In some cases, dep_check will return deps that shouldn't
2251 # be proccessed any further, so they are identified and
2252 # discarded here. Try to discard as few as possible since
2253 # discarded dependencies reduce the amount of information
2254 # available for optimization of merge order.
2255 if dep.priority.satisfied and \
2256 not dep_pkg.installed and \
2257 not (existing_node or empty or deep or update):
2259 if dep.root == self.target_root:
2261 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2262 except StopIteration:
2264 except portage.exception.InvalidDependString:
2265 if not dep_pkg.installed:
2266 # This shouldn't happen since the package
2267 # should have been masked.
2270 self._ignored_deps.append(dep)
2273 if not self._add_pkg(dep_pkg, dep):
2277 def _add_pkg(self, pkg, dep):
2284 myparent = dep.parent
2285 priority = dep.priority
2287 if priority is None:
2288 priority = DepPriority()
2290 Fills the digraph with nodes comprised of packages to merge.
2291 mybigkey is the package spec of the package to merge.
2292 myparent is the package depending on mybigkey ( or None )
2293 addme = Should we add this package to the digraph or are we just looking at it's deps?
2294 Think --onlydeps, we need to ignore packages in that case.
2297 #IUSE-aware emerge -> USE DEP aware depgraph
2298 #"no downgrade" emerge
2300 # Ensure that the dependencies of the same package
2301 # are never processed more than once.
2302 previously_added = pkg in self.digraph
2304 # select the correct /var database that we'll be checking against
2305 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2306 pkgsettings = self.pkgsettings[pkg.root]
2311 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2312 except portage.exception.InvalidDependString, e:
2313 if not pkg.installed:
2314 show_invalid_depstring_notice(
2315 pkg, pkg.metadata["PROVIDE"], str(e))
2319 if not pkg.onlydeps:
2320 if not pkg.installed and \
2321 "empty" not in self.myparams and \
2322 vardbapi.match(pkg.slot_atom):
2323 # Increase the priority of dependencies on packages that
2324 # are being rebuilt. This optimizes merge order so that
2325 # dependencies are rebuilt/updated as soon as possible,
2326 # which is needed especially when emerge is called by
2327 # revdep-rebuild since dependencies may be affected by ABI
2328 # breakage that has rendered them useless. Don't adjust
2329 # priority here when in "empty" mode since all packages
2330 # are being merged in that case.
2331 priority.rebuild = True
2333 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2334 slot_collision = False
2336 existing_node_matches = pkg.cpv == existing_node.cpv
2337 if existing_node_matches and \
2338 pkg != existing_node and \
2339 dep.atom is not None:
2340 # Use package set for matching since it will match via
2341 # PROVIDE when necessary, while match_from_list does not.
2342 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
2343 if not atom_set.findAtomForPackage(existing_node):
2344 existing_node_matches = False
2345 if existing_node_matches:
2346 # The existing node can be reused.
2348 for parent_atom in arg_atoms:
2349 parent, atom = parent_atom
2350 self.digraph.add(existing_node, parent,
2352 self._add_parent_atom(existing_node, parent_atom)
2353 # If a direct circular dependency is not an unsatisfied
2354 # buildtime dependency then drop it here since otherwise
2355 # it can skew the merge order calculation in an unwanted
2357 if existing_node != myparent or \
2358 (priority.buildtime and not priority.satisfied):
2359 self.digraph.addnode(existing_node, myparent,
2361 if dep.atom is not None and dep.parent is not None:
2362 self._add_parent_atom(existing_node,
2363 (dep.parent, dep.atom))
2367 # A slot collision has occurred. Sometimes this coincides
2368 # with unresolvable blockers, so the slot collision will be
2369 # shown later if there are no unresolvable blockers.
2370 self._add_slot_conflict(pkg)
2371 slot_collision = True
2374 # Now add this node to the graph so that self.display()
2375 # can show use flags and --tree portage.output. This node is
2376 # only being partially added to the graph. It must not be
2377 # allowed to interfere with the other nodes that have been
2378 # added. Do not overwrite data for existing nodes in
2379 # self.mydbapi since that data will be used for blocker
2381 # Even though the graph is now invalid, continue to process
2382 # dependencies so that things like --fetchonly can still
2383 # function despite collisions.
2385 elif not previously_added:
2386 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2387 self.mydbapi[pkg.root].cpv_inject(pkg)
2388 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
2390 if not pkg.installed:
2391 # Allow this package to satisfy old-style virtuals in case it
2392 # doesn't already. Any pre-existing providers will be preferred
2395 pkgsettings.setinst(pkg.cpv, pkg.metadata)
2396 # For consistency, also update the global virtuals.
2397 settings = self.roots[pkg.root].settings
2399 settings.setinst(pkg.cpv, pkg.metadata)
2401 except portage.exception.InvalidDependString, e:
2402 show_invalid_depstring_notice(
2403 pkg, pkg.metadata["PROVIDE"], str(e))
2408 self._set_nodes.add(pkg)
2410 # Do this even when addme is False (--onlydeps) so that the
2411 # parent/child relationship is always known in case
2412 # self._show_slot_collision_notice() needs to be called later.
2413 self.digraph.add(pkg, myparent, priority=priority)
2414 if dep.atom is not None and dep.parent is not None:
2415 self._add_parent_atom(pkg, (dep.parent, dep.atom))
2418 for parent_atom in arg_atoms:
2419 parent, atom = parent_atom
2420 self.digraph.add(pkg, parent, priority=priority)
2421 self._add_parent_atom(pkg, parent_atom)
2423 """ This section determines whether we go deeper into dependencies or not.
2424 We want to go deeper on a few occasions:
2425 Installing package A, we need to make sure package A's deps are met.
2426 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2427 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2429 dep_stack = self._dep_stack
2430 if "recurse" not in self.myparams:
2432 elif pkg.installed and \
2433 "deep" not in self.myparams:
2434 dep_stack = self._ignored_deps
2436 self.spinner.update()
2441 if not previously_added:
2442 dep_stack.append(pkg)
2445 def _add_parent_atom(self, pkg, parent_atom):
2446 parent_atoms = self._parent_atoms.get(pkg)
2447 if parent_atoms is None:
2448 parent_atoms = set()
2449 self._parent_atoms[pkg] = parent_atoms
2450 parent_atoms.add(parent_atom)
2452 def _add_slot_conflict(self, pkg):
2453 self._slot_collision_nodes.add(pkg)
2454 slot_key = (pkg.slot_atom, pkg.root)
2455 slot_nodes = self._slot_collision_info.get(slot_key)
2456 if slot_nodes is None:
2458 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
2459 self._slot_collision_info[slot_key] = slot_nodes
2462 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2464 mytype = pkg.type_name
2467 metadata = pkg.metadata
2468 myuse = pkg.use.enabled
2470 depth = pkg.depth + 1
2471 removal_action = "remove" in self.myparams
2474 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2476 edepend[k] = metadata[k]
2478 if not pkg.built and \
2479 "--buildpkgonly" in self.myopts and \
2480 "deep" not in self.myparams and \
2481 "empty" not in self.myparams:
2482 edepend["RDEPEND"] = ""
2483 edepend["PDEPEND"] = ""
2484 bdeps_optional = False
2486 if pkg.built and not removal_action:
2487 if self.myopts.get("--with-bdeps", "n") == "y":
2488 # Pull in build time deps as requested, but marked them as
2489 # "optional" since they are not strictly required. This allows
2490 # more freedom in the merge order calculation for solving
2491 # circular dependencies. Don't convert to PDEPEND since that
2492 # could make --with-bdeps=y less effective if it is used to
2493 # adjust merge order to prevent built_with_use() calls from
2495 bdeps_optional = True
2497 # built packages do not have build time dependencies.
2498 edepend["DEPEND"] = ""
2500 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
2501 edepend["DEPEND"] = ""
2504 root_deps = self.myopts.get("--root-deps")
2505 if root_deps is not None:
2506 if root_deps is True:
2508 elif root_deps == "rdeps":
2509 edepend["DEPEND"] = ""
2512 (bdeps_root, edepend["DEPEND"],
2513 self._priority(buildtime=(not bdeps_optional),
2514 optional=bdeps_optional)),
2515 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
2516 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
2519 debug = "--debug" in self.myopts
2520 strict = mytype != "installed"
2523 portage.dep._dep_check_strict = False
2525 for dep_root, dep_string, dep_priority in deps:
2530 print "Parent: ", jbigkey
2531 print "Depstring:", dep_string
2532 print "Priority:", dep_priority
2536 dep_string = portage.dep.paren_normalize(
2537 portage.dep.use_reduce(
2538 portage.dep.paren_reduce(dep_string),
2539 uselist=pkg.use.enabled))
2541 dep_string = list(self._queue_disjunctive_deps(
2542 pkg, dep_root, dep_priority, dep_string))
2544 except portage.exception.InvalidDependString, e:
2548 show_invalid_depstring_notice(pkg, dep_string, str(e))
2554 dep_string = portage.dep.paren_enclose(dep_string)
2556 if not self._add_pkg_dep_string(
2557 pkg, dep_root, dep_priority, dep_string,
2561 except portage.exception.AmbiguousPackageName, e:
2563 portage.writemsg("\n\n!!! An atom in the dependencies " + \
2564 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2566 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2567 portage.writemsg("\n", noiselevel=-1)
2568 if mytype == "binary":
2570 "!!! This binary package cannot be installed: '%s'\n" % \
2571 mykey, noiselevel=-1)
2572 elif mytype == "ebuild":
2573 portdb = self.roots[myroot].trees["porttree"].dbapi
2574 myebuild, mylocation = portdb.findname2(mykey)
2575 portage.writemsg("!!! This ebuild cannot be installed: " + \
2576 "'%s'\n" % myebuild, noiselevel=-1)
2577 portage.writemsg("!!! Please notify the package maintainer " + \
2578 "that atoms must be fully-qualified.\n", noiselevel=-1)
2581 portage.dep._dep_check_strict = True
2584 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2586 depth = pkg.depth + 1
2587 debug = "--debug" in self.myopts
2588 strict = pkg.type_name != "installed"
2592 print "Parent: ", pkg
2593 print "Depstring:", dep_string
2594 print "Priority:", dep_priority
2597 selected_atoms = self._select_atoms(dep_root,
2598 dep_string, myuse=pkg.use.enabled, parent=pkg,
2599 strict=strict, priority=dep_priority)
2600 except portage.exception.InvalidDependString, e:
2601 show_invalid_depstring_notice(pkg, dep_string, str(e))
2608 print "Candidates:", selected_atoms
2610 vardb = self.roots[dep_root].trees["vartree"].dbapi
2612 for atom in selected_atoms:
2615 atom = portage.dep.Atom(atom)
2617 mypriority = dep_priority.copy()
2618 if not atom.blocker and vardb.match(atom):
2619 mypriority.satisfied = True
2621 if not self._add_dep(Dependency(atom=atom,
2622 blocker=atom.blocker, depth=depth, parent=pkg,
2623 priority=mypriority, root=dep_root),
2624 allow_unsatisfied=allow_unsatisfied):
2627 except portage.exception.InvalidAtom, e:
2628 show_invalid_depstring_notice(
2629 pkg, dep_string, str(e))
2631 if not pkg.installed:
2635 print "Exiting...", pkg
2639 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2641 Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
2642 Yields non-disjunctive deps. Raises InvalidDependString when
2646 while i < len(dep_struct):
2648 if isinstance(x, list):
2649 for y in self._queue_disjunctive_deps(
2650 pkg, dep_root, dep_priority, x):
2653 self._queue_disjunction(pkg, dep_root, dep_priority,
2654 [ x, dep_struct[ i + 1 ] ] )
2658 x = portage.dep.Atom(x)
2659 except portage.exception.InvalidAtom:
2660 if not pkg.installed:
2661 raise portage.exception.InvalidDependString(
2662 "invalid atom: '%s'" % x)
2664 # Note: Eventually this will check for PROPERTIES=virtual
2665 # or whatever other metadata gets implemented for this
2667 if x.cp.startswith('virtual/'):
2668 self._queue_disjunction( pkg, dep_root,
2669 dep_priority, [ str(x) ] )
2674 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2675 self._dep_disjunctive_stack.append(
2676 (pkg, dep_root, dep_priority, dep_struct))
2678 def _pop_disjunction(self, allow_unsatisfied):
2680 Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
2681 populate self._dep_stack.
2683 pkg, dep_root, dep_priority, dep_struct = \
2684 self._dep_disjunctive_stack.pop()
2685 dep_string = portage.dep.paren_enclose(dep_struct)
2686 if not self._add_pkg_dep_string(
2687 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
2691 def _priority(self, **kwargs):
2692 if "remove" in self.myparams:
2693 priority_constructor = UnmergeDepPriority
2695 priority_constructor = DepPriority
2696 return priority_constructor(**kwargs)
2698 def _dep_expand(self, root_config, atom_without_category):
2700 @param root_config: a root config instance
2701 @type root_config: RootConfig
2702 @param atom_without_category: an atom without a category component
2703 @type atom_without_category: String
2705 @returns: a list of atoms containing categories (possibly empty)
2707 null_cp = portage.dep_getkey(insert_category_into_atom(
2708 atom_without_category, "null"))
2709 cat, atom_pn = portage.catsplit(null_cp)
2711 dbs = self._filtered_trees[root_config.root]["dbs"]
2713 for db, pkg_type, built, installed, db_keys in dbs:
2714 for cat in db.categories:
2715 if db.cp_list("%s/%s" % (cat, atom_pn)):
2719 for cat in categories:
2720 deps.append(insert_category_into_atom(
2721 atom_without_category, cat))
2724 def _have_new_virt(self, root, atom_cp):
2726 for db, pkg_type, built, installed, db_keys in \
2727 self._filtered_trees[root]["dbs"]:
2728 if db.cp_list(atom_cp):
2733 def _iter_atoms_for_pkg(self, pkg):
2734 # TODO: add multiple $ROOT support
2735 if pkg.root != self.target_root:
2737 atom_arg_map = self._atom_arg_map
2738 root_config = self.roots[pkg.root]
2739 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2740 atom_cp = portage.dep_getkey(atom)
2741 if atom_cp != pkg.cp and \
2742 self._have_new_virt(pkg.root, atom_cp):
2744 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2745 visible_pkgs.reverse() # descending order
2747 for visible_pkg in visible_pkgs:
2748 if visible_pkg.cp != atom_cp:
2750 if pkg >= visible_pkg:
2751 # This is descending order, and we're not
2752 # interested in any versions <= pkg given.
2754 if pkg.slot_atom != visible_pkg.slot_atom:
2755 higher_slot = visible_pkg
2757 if higher_slot is not None:
2759 for arg in atom_arg_map[(atom, pkg.root)]:
2760 if isinstance(arg, PackageArg) and \
2765 def select_files(self, myfiles):
2766 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2767 appropriate depgraph and return a favorite list."""
2768 debug = "--debug" in self.myopts
2769 root_config = self.roots[self.target_root]
2770 sets = root_config.sets
2771 getSetAtoms = root_config.setconfig.getSetAtoms
2773 myroot = self.target_root
2774 dbs = self._filtered_trees[myroot]["dbs"]
2775 vardb = self.trees[myroot]["vartree"].dbapi
2776 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
2777 portdb = self.trees[myroot]["porttree"].dbapi
2778 bindb = self.trees[myroot]["bintree"].dbapi
2779 pkgsettings = self.pkgsettings[myroot]
2781 onlydeps = "--onlydeps" in self.myopts
2784 ext = os.path.splitext(x)[1]
2786 if not os.path.exists(x):
2788 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2789 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2790 elif os.path.exists(
2791 os.path.join(pkgsettings["PKGDIR"], x)):
2792 x = os.path.join(pkgsettings["PKGDIR"], x)
2794 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2795 print "!!! Please ensure the tbz2 exists as specified.\n"
2796 return 0, myfavorites
2797 mytbz2=portage.xpak.tbz2(x)
2798 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2799 if os.path.realpath(x) != \
2800 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2801 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2802 return 0, myfavorites
2803 db_keys = list(bindb._aux_cache_keys)
2804 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
2805 pkg = Package(type_name="binary", root_config=root_config,
2806 cpv=mykey, built=True, metadata=metadata,
2808 self._pkg_cache[pkg] = pkg
2809 args.append(PackageArg(arg=x, package=pkg,
2810 root_config=root_config))
2811 elif ext==".ebuild":
2812 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2813 pkgdir = os.path.dirname(ebuild_path)
2814 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2815 cp = pkgdir[len(tree_root)+1:]
2816 e = portage.exception.PackageNotFound(
2817 ("%s is not in a valid portage tree " + \
2818 "hierarchy or does not exist") % x)
2819 if not portage.isvalidatom(cp):
2821 cat = portage.catsplit(cp)[0]
2822 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2823 if not portage.isvalidatom("="+mykey):
2825 ebuild_path = portdb.findname(mykey)
2827 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2828 cp, os.path.basename(ebuild_path)):
2829 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2830 return 0, myfavorites
2831 if mykey not in portdb.xmatch(
2832 "match-visible", portage.dep_getkey(mykey)):
2833 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2834 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2835 print colorize("BAD", "*** page for details.")
2836 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2839 raise portage.exception.PackageNotFound(
2840 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2841 db_keys = list(portdb._aux_cache_keys)
2842 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
2843 pkg = Package(type_name="ebuild", root_config=root_config,
2844 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2845 pkgsettings.setcpv(pkg)
2846 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
2847 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
2848 self._pkg_cache[pkg] = pkg
2849 args.append(PackageArg(arg=x, package=pkg,
2850 root_config=root_config))
2851 elif x.startswith(os.path.sep):
2852 if not x.startswith(myroot):
2853 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2854 " $ROOT.\n") % x, noiselevel=-1)
2856 # Queue these up since it's most efficient to handle
2857 # multiple files in a single iter_owners() call.
2858 lookup_owners.append(x)
2860 if x in ("system", "world"):
2862 if x.startswith(SETPREFIX):
2863 s = x[len(SETPREFIX):]
2865 raise portage.exception.PackageSetNotFound(s)
2868 # Recursively expand sets so that containment tests in
2869 # self._get_parent_sets() properly match atoms in nested
2870 # sets (like if world contains system).
2871 expanded_set = InternalPackageSet(
2872 initial_atoms=getSetAtoms(s))
2873 self._sets[s] = expanded_set
2874 args.append(SetArg(arg=x, set=expanded_set,
2875 root_config=root_config))
2877 if not is_valid_package_atom(x):
2878 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2880 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2881 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2883 # Don't expand categories or old-style virtuals here unless
2884 # necessary. Expansion of old-style virtuals here causes at
2885 # least the following problems:
2886 # 1) It's more difficult to determine which set(s) an atom
2887 # came from, if any.
2888 # 2) It takes away freedom from the resolver to choose other
2889 # possible expansions when necessary.
2891 args.append(AtomArg(arg=x, atom=x,
2892 root_config=root_config))
2894 expanded_atoms = self._dep_expand(root_config, x)
2895 installed_cp_set = set()
2896 for atom in expanded_atoms:
2897 atom_cp = portage.dep_getkey(atom)
2898 if vardb.cp_list(atom_cp):
2899 installed_cp_set.add(atom_cp)
2901 if len(installed_cp_set) > 1:
2902 non_virtual_cps = set()
2903 for atom_cp in installed_cp_set:
2904 if not atom_cp.startswith("virtual/"):
2905 non_virtual_cps.add(atom_cp)
2906 if len(non_virtual_cps) == 1:
2907 installed_cp_set = non_virtual_cps
2909 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2910 installed_cp = iter(installed_cp_set).next()
2911 expanded_atoms = [atom for atom in expanded_atoms \
2912 if portage.dep_getkey(atom) == installed_cp]
2914 if len(expanded_atoms) > 1:
2917 ambiguous_package_name(x, expanded_atoms, root_config,
2918 self.spinner, self.myopts)
2919 return False, myfavorites
2921 atom = expanded_atoms[0]
2923 null_atom = insert_category_into_atom(x, "null")
2924 null_cp = portage.dep_getkey(null_atom)
2925 cat, atom_pn = portage.catsplit(null_cp)
2926 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2928 # Allow the depgraph to choose which virtual.
2929 atom = insert_category_into_atom(x, "virtual")
2931 atom = insert_category_into_atom(x, "null")
2933 args.append(AtomArg(arg=x, atom=atom,
2934 root_config=root_config))
2938 search_for_multiple = False
2939 if len(lookup_owners) > 1:
2940 search_for_multiple = True
2942 for x in lookup_owners:
2943 if not search_for_multiple and os.path.isdir(x):
2944 search_for_multiple = True
2945 relative_paths.append(x[len(myroot):])
2948 for pkg, relative_path in \
2949 real_vardb._owners.iter_owners(relative_paths):
2950 owners.add(pkg.mycpv)
2951 if not search_for_multiple:
2955 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2956 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2960 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2962 # portage now masks packages with missing slot, but it's
2963 # possible that one was installed by an older version
2964 atom = portage.cpv_getkey(cpv)
2966 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
2967 args.append(AtomArg(arg=atom, atom=atom,
2968 root_config=root_config))
2970 if "--update" in self.myopts:
2971 # In some cases, the greedy slots behavior can pull in a slot that
2972 # the user would want to uninstall due to it being blocked by a
2973 # newer version in a different slot. Therefore, it's necessary to
2974 # detect and discard any that should be uninstalled. Each time
2975 # that arguments are updated, package selections are repeated in
2976 # order to ensure consistency with the current arguments:
2978 # 1) Initialize args
2979 # 2) Select packages and generate initial greedy atoms
2980 # 3) Update args with greedy atoms
2981 # 4) Select packages and generate greedy atoms again, while
2982 # accounting for any blockers between selected packages
2983 # 5) Update args with revised greedy atoms
2985 self._set_args(args)
2988 greedy_args.append(arg)
2989 if not isinstance(arg, AtomArg):
2991 for atom in self._greedy_slots(arg.root_config, arg.atom):
2993 AtomArg(arg=arg.arg, atom=atom,
2994 root_config=arg.root_config))
2996 self._set_args(greedy_args)
2999 # Revise greedy atoms, accounting for any blockers
3000 # between selected packages.
3001 revised_greedy_args = []
3003 revised_greedy_args.append(arg)
3004 if not isinstance(arg, AtomArg):
3006 for atom in self._greedy_slots(arg.root_config, arg.atom,
3007 blocker_lookahead=True):
3008 revised_greedy_args.append(
3009 AtomArg(arg=arg.arg, atom=atom,
3010 root_config=arg.root_config))
3011 args = revised_greedy_args
3012 del revised_greedy_args
3014 self._set_args(args)
3016 myfavorites = set(myfavorites)
3018 if isinstance(arg, (AtomArg, PackageArg)):
3019 myfavorites.add(arg.atom)
3020 elif isinstance(arg, SetArg):
3021 myfavorites.add(arg.arg)
3022 myfavorites = list(myfavorites)
3024 pprovideddict = pkgsettings.pprovideddict
3026 portage.writemsg("\n", noiselevel=-1)
3027 # Order needs to be preserved since a feature of --nodeps
3028 # is to allow the user to force a specific merge order.
3032 for atom in arg.set:
3033 self.spinner.update()
3034 dep = Dependency(atom=atom, onlydeps=onlydeps,
3035 root=myroot, parent=arg)
3036 atom_cp = portage.dep_getkey(atom)
3038 pprovided = pprovideddict.get(portage.dep_getkey(atom))
3039 if pprovided and portage.match_from_list(atom, pprovided):
3040 # A provided package has been specified on the command line.
3041 self._pprovided_args.append((arg, atom))
3043 if isinstance(arg, PackageArg):
3044 if not self._add_pkg(arg.package, dep) or \
3045 not self._create_graph():
3046 sys.stderr.write(("\n\n!!! Problem resolving " + \
3047 "dependencies for %s\n") % arg.arg)
3048 return 0, myfavorites
3051 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
3052 (arg, atom), noiselevel=-1)
3053 pkg, existing_node = self._select_package(
3054 myroot, atom, onlydeps=onlydeps)
3056 if not (isinstance(arg, SetArg) and \
3057 arg.name in ("system", "world")):
3058 self._unsatisfied_deps_for_display.append(
3059 ((myroot, atom), {}))
3060 return 0, myfavorites
3061 self._missing_args.append((arg, atom))
3063 if atom_cp != pkg.cp:
3064 # For old-style virtuals, we need to repeat the
3065 # package.provided check against the selected package.
3066 expanded_atom = atom.replace(atom_cp, pkg.cp)
3067 pprovided = pprovideddict.get(pkg.cp)
3069 portage.match_from_list(expanded_atom, pprovided):
3070 # A provided package has been
3071 # specified on the command line.
3072 self._pprovided_args.append((arg, atom))
3074 if pkg.installed and "selective" not in self.myparams:
3075 self._unsatisfied_deps_for_display.append(
3076 ((myroot, atom), {}))
3077 # Previous behavior was to bail out in this case, but
3078 # since the dep is satisfied by the installed package,
3079 # it's more friendly to continue building the graph
3080 # and just show a warning message. Therefore, only bail
3081 # out here if the atom is not from either the system or
3083 if not (isinstance(arg, SetArg) and \
3084 arg.name in ("system", "world")):
3085 return 0, myfavorites
3087 # Add the selected package to the graph as soon as possible
3088 # so that later dep_check() calls can use it as feedback
3089 # for making more consistent atom selections.
3090 if not self._add_pkg(pkg, dep):
3091 if isinstance(arg, SetArg):
3092 sys.stderr.write(("\n\n!!! Problem resolving " + \
3093 "dependencies for %s from %s\n") % \
3096 sys.stderr.write(("\n\n!!! Problem resolving " + \
3097 "dependencies for %s\n") % atom)
3098 return 0, myfavorites
3100 except portage.exception.MissingSignature, e:
3101 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
3102 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3103 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3104 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3105 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3106 return 0, myfavorites
3107 except portage.exception.InvalidSignature, e:
3108 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
3109 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3110 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3111 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3112 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3113 return 0, myfavorites
3114 except SystemExit, e:
3115 raise # Needed else can't exit
3116 except Exception, e:
3117 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
3118 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
3121 # Now that the root packages have been added to the graph,
3122 # process the dependencies.
3123 if not self._create_graph():
3124 return 0, myfavorites
3127 if "--usepkgonly" in self.myopts:
3128 for xs in self.digraph.all_nodes():
3129 if not isinstance(xs, Package):
3131 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
3135 print "Missing binary for:",xs[2]
3139 except self._unknown_internal_error:
3140 return False, myfavorites
3142 # We're true here unless we are missing binaries.
3143 return (not missing,myfavorites)
3145 def _set_args(self, args):
3147 Create the "args" package set from atoms and packages given as
3148 arguments. This method can be called multiple times if necessary.
3149 The package selection cache is automatically invalidated, since
3150 arguments influence package selections.
3152 args_set = self._sets["args"]
3155 if not isinstance(arg, (AtomArg, PackageArg)):
3158 if atom in args_set:
3162 self._set_atoms.clear()
3163 self._set_atoms.update(chain(*self._sets.itervalues()))
3164 atom_arg_map = self._atom_arg_map
3165 atom_arg_map.clear()
3167 for atom in arg.set:
3168 atom_key = (atom, arg.root_config.root)
3169 refs = atom_arg_map.get(atom_key)
3172 atom_arg_map[atom_key] = refs
3176 # Invalidate the package selection cache, since
3177 # arguments influence package selections.
3178 self._highest_pkg_cache.clear()
3179 for trees in self._filtered_trees.itervalues():
3180 trees["porttree"].dbapi._clear_cache()
3182 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3184 Return a list of slot atoms corresponding to installed slots that
3185 differ from the slot of the highest visible match. When
3186 blocker_lookahead is True, slot atoms that would trigger a blocker
3187 conflict are automatically discarded, potentially allowing automatic
3188 uninstallation of older slots when appropriate.
3190 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3191 if highest_pkg is None:
3193 vardb = root_config.trees["vartree"].dbapi
3195 for cpv in vardb.match(atom):
3196 # don't mix new virtuals with old virtuals
3197 if portage.cpv_getkey(cpv) == highest_pkg.cp:
3198 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
3200 slots.add(highest_pkg.metadata["SLOT"])
3204 slots.remove(highest_pkg.metadata["SLOT"])
3207 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3208 pkg, in_graph = self._select_package(root_config.root, slot_atom)
3209 if pkg is not None and \
3210 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3211 greedy_pkgs.append(pkg)
3214 if not blocker_lookahead:
3215 return [pkg.slot_atom for pkg in greedy_pkgs]
3218 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
3219 for pkg in greedy_pkgs + [highest_pkg]:
3220 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
3222 atoms = self._select_atoms(
3223 pkg.root, dep_str, pkg.use.enabled,
3224 parent=pkg, strict=True)
3225 except portage.exception.InvalidDependString:
3227 blocker_atoms = (x for x in atoms if x.blocker)
3228 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3230 if highest_pkg not in blockers:
3233 # filter packages with invalid deps
3234 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3236 # filter packages that conflict with highest_pkg
3237 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3238 (blockers[highest_pkg].findAtomForPackage(pkg) or \
3239 blockers[pkg].findAtomForPackage(highest_pkg))]
3244 # If two packages conflict, discard the lower version.
3245 discard_pkgs = set()
3246 greedy_pkgs.sort(reverse=True)
3247 for i in xrange(len(greedy_pkgs) - 1):
3248 pkg1 = greedy_pkgs[i]
3249 if pkg1 in discard_pkgs:
3251 for j in xrange(i + 1, len(greedy_pkgs)):
3252 pkg2 = greedy_pkgs[j]
3253 if pkg2 in discard_pkgs:
3255 if blockers[pkg1].findAtomForPackage(pkg2) or \
3256 blockers[pkg2].findAtomForPackage(pkg1):
3258 discard_pkgs.add(pkg2)
3260 return [pkg.slot_atom for pkg in greedy_pkgs \
3261 if pkg not in discard_pkgs]
3263 def _select_atoms_from_graph(self, *pargs, **kwargs):
3265 Prefer atoms matching packages that have already been
3266 added to the graph or those that are installed and have
3267 not been scheduled for replacement.
3269 kwargs["trees"] = self._graph_trees
3270 return self._select_atoms_highest_available(*pargs, **kwargs)
3272 def _select_atoms_highest_available(self, root, depstring,
3273 myuse=None, parent=None, strict=True, trees=None, priority=None):
3274 """This will raise InvalidDependString if necessary. If trees is
3275 None then self._filtered_trees is used."""
3276 pkgsettings = self.pkgsettings[root]
3278 trees = self._filtered_trees
3279 if not getattr(priority, "buildtime", False):
3280 # The parent should only be passed to dep_check() for buildtime
3281 # dependencies since that's the only case when it's appropriate
3282 # to trigger the circular dependency avoidance code which uses it.
3283 # It's important not to trigger the same circular dependency
3284 # avoidance code for runtime dependencies since it's not needed
3285 # and it can promote an incorrect package choice.
3289 if parent is not None:
3290 trees[root]["parent"] = parent
3292 portage.dep._dep_check_strict = False
3293 mycheck = portage.dep_check(depstring, None,
3294 pkgsettings, myuse=myuse,
3295 myroot=root, trees=trees)
3297 if parent is not None:
3298 trees[root].pop("parent")
3299 portage.dep._dep_check_strict = True
3301 raise portage.exception.InvalidDependString(mycheck[1])
3302 selected_atoms = mycheck[1]
3303 return selected_atoms
3305 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
3306 atom = portage.dep.Atom(atom)
3307 atom_set = InternalPackageSet(initial_atoms=(atom,))
3308 atom_without_use = atom
3310 atom_without_use = portage.dep.remove_slot(atom)
3312 atom_without_use += ":" + atom.slot
3313 atom_without_use = portage.dep.Atom(atom_without_use)
3314 xinfo = '"%s"' % atom
3317 # Discard null/ from failed cpv_expand category expansion.
3318 xinfo = xinfo.replace("null/", "")
3319 masked_packages = []
3321 masked_pkg_instances = set()
3322 missing_licenses = []
3323 have_eapi_mask = False
3324 pkgsettings = self.pkgsettings[root]
3325 implicit_iuse = pkgsettings._get_implicit_iuse()
3326 root_config = self.roots[root]
3327 portdb = self.roots[root].trees["porttree"].dbapi
3328 dbs = self._filtered_trees[root]["dbs"]
3329 for db, pkg_type, built, installed, db_keys in dbs:
3333 if hasattr(db, "xmatch"):
3334 cpv_list = db.xmatch("match-all", atom_without_use)
3336 cpv_list = db.match(atom_without_use)
3339 for cpv in cpv_list:
3340 metadata, mreasons = get_mask_info(root_config, cpv,
3341 pkgsettings, db, pkg_type, built, installed, db_keys)
3342 if metadata is not None:
3343 pkg = Package(built=built, cpv=cpv,
3344 installed=installed, metadata=metadata,
3345 root_config=root_config)
3346 if pkg.cp != atom.cp:
3347 # A cpv can be returned from dbapi.match() as an
3348 # old-style virtual match even in cases when the
3349 # package does not actually PROVIDE the virtual.
3350 # Filter out any such false matches here.
3351 if not atom_set.findAtomForPackage(pkg):
3354 masked_pkg_instances.add(pkg)
3356 missing_use.append(pkg)
3359 masked_packages.append(
3360 (root_config, pkgsettings, cpv, metadata, mreasons))
3362 missing_use_reasons = []
3363 missing_iuse_reasons = []
3364 for pkg in missing_use:
3365 use = pkg.use.enabled
3366 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
3367 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
3369 for x in atom.use.required:
3370 if iuse_re.match(x) is None:
3371 missing_iuse.append(x)
3374 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3375 missing_iuse_reasons.append((pkg, mreasons))
3377 need_enable = sorted(atom.use.enabled.difference(use))
3378 need_disable = sorted(atom.use.disabled.intersection(use))
3379 if need_enable or need_disable:
3381 changes.extend(colorize("red", "+" + x) \
3382 for x in need_enable)
3383 changes.extend(colorize("blue", "-" + x) \
3384 for x in need_disable)
3385 mreasons.append("Change USE: %s" % " ".join(changes))
3386 missing_use_reasons.append((pkg, mreasons))
3388 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3389 in missing_use_reasons if pkg not in masked_pkg_instances]
3391 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3392 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3394 show_missing_use = False
3395 if unmasked_use_reasons:
3396 # Only show the latest version.
3397 show_missing_use = unmasked_use_reasons[:1]
3398 elif unmasked_iuse_reasons:
3399 if missing_use_reasons:
3400 # All packages with required IUSE are masked,
3401 # so display a normal masking message.
3404 show_missing_use = unmasked_iuse_reasons
3406 if show_missing_use:
3407 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
3408 print "!!! One of the following packages is required to complete your request:"
3409 for pkg, mreasons in show_missing_use:
3410 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
3412 elif masked_packages:
3414 colorize("BAD", "All ebuilds that could satisfy ") + \
3415 colorize("INFORM", xinfo) + \
3416 colorize("BAD", " have been masked.")
3417 print "!!! One of the following masked packages is required to complete your request:"
3418 have_eapi_mask = show_masked_packages(masked_packages)
3421 msg = ("The current version of portage supports " + \
3422 "EAPI '%s'. You must upgrade to a newer version" + \
3423 " of portage before EAPI masked packages can" + \
3424 " be installed.") % portage.const.EAPI
3425 from textwrap import wrap
3426 for line in wrap(msg, 75):
3431 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
3433 # Show parent nodes and the argument that pulled them in.
3434 traversed_nodes = set()
3437 while node is not None:
3438 traversed_nodes.add(node)
3439 msg.append('(dependency required by "%s" [%s])' % \
3440 (colorize('INFORM', str(node.cpv)), node.type_name))
3441 # When traversing to parents, prefer arguments over packages
3442 # since arguments are root nodes. Never traverse the same
3443 # package twice, in order to prevent an infinite loop.
3444 selected_parent = None
3445 for parent in self.digraph.parent_nodes(node):
3446 if isinstance(parent, DependencyArg):
3447 msg.append('(dependency required by "%s" [argument])' % \
3448 (colorize('INFORM', str(parent))))
3449 selected_parent = None
3451 if parent not in traversed_nodes:
3452 selected_parent = parent
3453 node = selected_parent
3459 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3460 cache_key = (root, atom, onlydeps)
3461 ret = self._highest_pkg_cache.get(cache_key)
3464 if pkg and not existing:
3465 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
3466 if existing and existing == pkg:
3467 # Update the cache to reflect that the
3468 # package has been added to the graph.
3470 self._highest_pkg_cache[cache_key] = ret
3472 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3473 self._highest_pkg_cache[cache_key] = ret
3476 settings = pkg.root_config.settings
3477 if visible(settings, pkg) and not (pkg.installed and \
3478 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
3479 pkg.root_config.visible_pkgs.cpv_inject(pkg)
3482 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3483 root_config = self.roots[root]
3484 pkgsettings = self.pkgsettings[root]
3485 dbs = self._filtered_trees[root]["dbs"]
3486 vardb = self.roots[root].trees["vartree"].dbapi
3487 portdb = self.roots[root].trees["porttree"].dbapi
3488 # List of acceptable packages, ordered by type preference.
3489 matched_packages = []
3490 highest_version = None
3491 if not isinstance(atom, portage.dep.Atom):
3492 atom = portage.dep.Atom(atom)
3494 atom_set = InternalPackageSet(initial_atoms=(atom,))
3495 existing_node = None
3497 usepkgonly = "--usepkgonly" in self.myopts
3498 empty = "empty" in self.myparams
3499 selective = "selective" in self.myparams
3501 noreplace = "--noreplace" in self.myopts
3502 # Behavior of the "selective" parameter depends on
3503 # whether or not a package matches an argument atom.
3504 # If an installed package provides an old-style
3505 # virtual that is no longer provided by an available
3506 # package, the installed package may match an argument
3507 # atom even though none of the available packages do.
3508 # Therefore, "selective" logic does not consider
3509 # whether or not an installed package matches an
3510 # argument atom. It only considers whether or not
3511 # available packages match argument atoms, which is
3512 # represented by the found_available_arg flag.
3513 found_available_arg = False
3514 for find_existing_node in True, False:
3517 for db, pkg_type, built, installed, db_keys in dbs:
3520 if installed and not find_existing_node:
3521 want_reinstall = reinstall or empty or \
3522 (found_available_arg and not selective)
3523 if want_reinstall and matched_packages:
3525 if hasattr(db, "xmatch"):
3526 cpv_list = db.xmatch("match-all", atom)
3528 cpv_list = db.match(atom)
3530 # USE=multislot can make an installed package appear as if
3531 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3532 # won't do any good as long as USE=multislot is enabled since
3533 # the newly built package still won't have the expected slot.
3534 # Therefore, assume that such SLOT dependencies are already
3535 # satisfied rather than forcing a rebuild.
3536 if installed and not cpv_list and atom.slot:
3537 for cpv in db.match(atom.cp):
3538 slot_available = False
3539 for other_db, other_type, other_built, \
3540 other_installed, other_keys in dbs:
3543 other_db.aux_get(cpv, ["SLOT"])[0]:
3544 slot_available = True
3548 if not slot_available:
3550 inst_pkg = self._pkg(cpv, "installed",
3551 root_config, installed=installed)
3552 # Remove the slot from the atom and verify that
3553 # the package matches the resulting atom.
3554 atom_without_slot = portage.dep.remove_slot(atom)
3556 atom_without_slot += str(atom.use)
3557 atom_without_slot = portage.dep.Atom(atom_without_slot)
3558 if portage.match_from_list(
3559 atom_without_slot, [inst_pkg]):
3560 cpv_list = [inst_pkg.cpv]
3565 pkg_status = "merge"
3566 if installed or onlydeps:
3567 pkg_status = "nomerge"
3570 for cpv in cpv_list:
3571 # Make --noreplace take precedence over --newuse.
3572 if not installed and noreplace and \
3573 cpv in vardb.match(atom):
3574 # If the installed version is masked, it may
3575 # be necessary to look at lower versions,
3576 # in case there is a visible downgrade.
3578 reinstall_for_flags = None
3579 cache_key = (pkg_type, root, cpv, pkg_status)
3580 calculated_use = True
3581 pkg = self._pkg_cache.get(cache_key)
3583 calculated_use = False
3585 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3588 pkg = Package(built=built, cpv=cpv,
3589 installed=installed, metadata=metadata,
3590 onlydeps=onlydeps, root_config=root_config,
3592 metadata = pkg.metadata
3594 metadata['CHOST'] = pkgsettings.get('CHOST', '')
3595 if not built and ("?" in metadata["LICENSE"] or \
3596 "?" in metadata["PROVIDE"]):
3597 # This is avoided whenever possible because
3598 # it's expensive. It only needs to be done here
3599 # if it has an effect on visibility.
3600 pkgsettings.setcpv(pkg)
3601 metadata["USE"] = pkgsettings["PORTAGE_USE"]
3602 calculated_use = True
3603 self._pkg_cache[pkg] = pkg
3605 if not installed or (built and matched_packages):
3606 # Only enforce visibility on installed packages
3607 # if there is at least one other visible package
3608 # available. By filtering installed masked packages
3609 # here, packages that have been masked since they
3610 # were installed can be automatically downgraded
3611 # to an unmasked version.
3613 if not visible(pkgsettings, pkg):
3615 except portage.exception.InvalidDependString:
3619 # Enable upgrade or downgrade to a version
3620 # with visible KEYWORDS when the installed
3621 # version is masked by KEYWORDS, but never
3622 # reinstall the same exact version only due
3623 # to a KEYWORDS mask.
3624 if built and matched_packages:
3626 different_version = None
3627 for avail_pkg in matched_packages:
3628 if not portage.dep.cpvequal(
3629 pkg.cpv, avail_pkg.cpv):
3630 different_version = avail_pkg
3632 if different_version is not None:
3635 pkgsettings._getMissingKeywords(
3636 pkg.cpv, pkg.metadata):
3639 # If the ebuild no longer exists or it's
3640 # keywords have been dropped, reject built
3641 # instances (installed or binary).
3642 # If --usepkgonly is enabled, assume that
3643 # the ebuild status should be ignored.
3647 pkg.cpv, "ebuild", root_config)
3648 except portage.exception.PackageNotFound:
3651 if not visible(pkgsettings, pkg_eb):
3654 if not pkg.built and not calculated_use:
3655 # This is avoided whenever possible because
3657 pkgsettings.setcpv(pkg)
3658 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3660 if pkg.cp != atom.cp:
3661 # A cpv can be returned from dbapi.match() as an
3662 # old-style virtual match even in cases when the
3663 # package does not actually PROVIDE the virtual.
3664 # Filter out any such false matches here.
3665 if not atom_set.findAtomForPackage(pkg):
3669 if root == self.target_root:
3671 # Ebuild USE must have been calculated prior
3672 # to this point, in case atoms have USE deps.
3673 myarg = self._iter_atoms_for_pkg(pkg).next()
3674 except StopIteration:
3676 except portage.exception.InvalidDependString:
3678 # masked by corruption
3680 if not installed and myarg:
3681 found_available_arg = True
3683 if atom.use and not pkg.built:
3684 use = pkg.use.enabled
3685 if atom.use.enabled.difference(use):
3687 if atom.use.disabled.intersection(use):
3689 if pkg.cp == atom_cp:
3690 if highest_version is None:
3691 highest_version = pkg
3692 elif pkg > highest_version:
3693 highest_version = pkg
3694 # At this point, we've found the highest visible
3695 # match from the current repo. Any lower versions
3696 # from this repo are ignored, so this so the loop
3697 # will always end with a break statement below
3699 if find_existing_node:
3700 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3703 if portage.dep.match_from_list(atom, [e_pkg]):
3704 if highest_version and \
3705 e_pkg.cp == atom_cp and \
3706 e_pkg < highest_version and \
3707 e_pkg.slot_atom != highest_version.slot_atom:
3708 # There is a higher version available in a
3709 # different slot, so this existing node is
3713 matched_packages.append(e_pkg)
3714 existing_node = e_pkg
3716 # Compare built package to current config and
3717 # reject the built package if necessary.
3718 if built and not installed and \
3719 ("--newuse" in self.myopts or \
3720 "--reinstall" in self.myopts):
3721 iuses = pkg.iuse.all
3722 old_use = pkg.use.enabled
3724 pkgsettings.setcpv(myeb)
3726 pkgsettings.setcpv(pkg)
3727 now_use = pkgsettings["PORTAGE_USE"].split()
3728 forced_flags = set()
3729 forced_flags.update(pkgsettings.useforce)
3730 forced_flags.update(pkgsettings.usemask)
3732 if myeb and not usepkgonly:
3733 cur_iuse = myeb.iuse.all
3734 if self._reinstall_for_flags(forced_flags,
3738 # Compare current config to installed package
3739 # and do not reinstall if possible.
3740 if not installed and \
3741 ("--newuse" in self.myopts or \
3742 "--reinstall" in self.myopts) and \
3743 cpv in vardb.match(atom):
3744 pkgsettings.setcpv(pkg)
3745 forced_flags = set()
3746 forced_flags.update(pkgsettings.useforce)
3747 forced_flags.update(pkgsettings.usemask)
3748 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3749 old_iuse = set(filter_iuse_defaults(
3750 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3751 cur_use = pkg.use.enabled
3752 cur_iuse = pkg.iuse.all
3753 reinstall_for_flags = \
3754 self._reinstall_for_flags(
3755 forced_flags, old_use, old_iuse,
3757 if reinstall_for_flags:
3761 matched_packages.append(pkg)
3762 if reinstall_for_flags:
3763 self._reinstall_nodes[pkg] = \
3767 if not matched_packages:
3770 if "--debug" in self.myopts:
3771 for pkg in matched_packages:
3772 portage.writemsg("%s %s\n" % \
3773 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3775 # Filter out any old-style virtual matches if they are
3776 # mixed with new-style virtual matches.
3777 cp = portage.dep_getkey(atom)
3778 if len(matched_packages) > 1 and \
3779 "virtual" == portage.catsplit(cp)[0]:
3780 for pkg in matched_packages:
3783 # Got a new-style virtual, so filter
3784 # out any old-style virtuals.
3785 matched_packages = [pkg for pkg in matched_packages \
3789 if len(matched_packages) > 1:
3790 bestmatch = portage.best(
3791 [pkg.cpv for pkg in matched_packages])
3792 matched_packages = [pkg for pkg in matched_packages \
3793 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3795 # ordered by type preference ("ebuild" type is the last resort)
3796 return matched_packages[-1], existing_node
3798 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3800 Select packages that have already been added to the graph or
3801 those that are installed and have not been scheduled for
3804 graph_db = self._graph_trees[root]["porttree"].dbapi
3805 matches = graph_db.match_pkgs(atom)
3808 pkg = matches[-1] # highest match
3809 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
3810 return pkg, in_graph
3812 def _complete_graph(self):
3814 Add any deep dependencies of required sets (args, system, world) that
3815 have not been pulled into the graph yet. This ensures that the graph
3816 is consistent such that initially satisfied deep dependencies are not
3817 broken in the new graph. Initially unsatisfied dependencies are
3818 irrelevant since we only want to avoid breaking dependencies that are
3821 Since this method can consume enough time to disturb users, it is
3822 currently only enabled by the --complete-graph option.
3824 if "--buildpkgonly" in self.myopts or \
3825 "recurse" not in self.myparams:
3828 if "complete" not in self.myparams:
3829 # Skip this to avoid consuming enough time to disturb users.
3832 # Put the depgraph into a mode that causes it to only
3833 # select packages that have already been added to the
3834 # graph or those that are installed and have not been
3835 # scheduled for replacement. Also, toggle the "deep"
3836 # parameter so that all dependencies are traversed and
3838 self._select_atoms = self._select_atoms_from_graph
3839 self._select_package = self._select_pkg_from_graph
3840 already_deep = "deep" in self.myparams
3841 if not already_deep:
3842 self.myparams.add("deep")
3844 for root in self.roots:
3845 required_set_names = self._required_set_names.copy()
3846 if root == self.target_root and \
3847 (already_deep or "empty" in self.myparams):
3848 required_set_names.difference_update(self._sets)
3849 if not required_set_names and not self._ignored_deps:
3851 root_config = self.roots[root]
3852 setconfig = root_config.setconfig
3854 # Reuse existing SetArg instances when available.
3855 for arg in self.digraph.root_nodes():
3856 if not isinstance(arg, SetArg):
3858 if arg.root_config != root_config:
3860 if arg.name in required_set_names:
3862 required_set_names.remove(arg.name)
3863 # Create new SetArg instances only when necessary.
3864 for s in required_set_names:
3865 expanded_set = InternalPackageSet(
3866 initial_atoms=setconfig.getSetAtoms(s))
3867 atom = SETPREFIX + s
3868 args.append(SetArg(arg=atom, set=expanded_set,
3869 root_config=root_config))
3870 vardb = root_config.trees["vartree"].dbapi
3872 for atom in arg.set:
3873 self._dep_stack.append(
3874 Dependency(atom=atom, root=root, parent=arg))
3875 if self._ignored_deps:
3876 self._dep_stack.extend(self._ignored_deps)
3877 self._ignored_deps = []
3878 if not self._create_graph(allow_unsatisfied=True):
3880 # Check the unsatisfied deps to see if any initially satisfied deps
3881 # will become unsatisfied due to an upgrade. Initially unsatisfied
3882 # deps are irrelevant since we only want to avoid breaking deps
3883 # that are initially satisfied.
3884 while self._unsatisfied_deps:
3885 dep = self._unsatisfied_deps.pop()
3886 matches = vardb.match_pkgs(dep.atom)
3888 self._initially_unsatisfied_deps.append(dep)
3890 # An scheduled installation broke a deep dependency.
3891 # Add the installed package to the graph so that it
3892 # will be appropriately reported as a slot collision
3893 # (possibly solvable via backtracking).
3894 pkg = matches[-1] # highest match
3895 if not self._add_pkg(pkg, dep):
3897 if not self._create_graph(allow_unsatisfied=True):
3901 def _pkg(self, cpv, type_name, root_config, installed=False):
3903 Get a package instance from the cache, or create a new
3904 one if necessary. Raises KeyError from aux_get if it
3905 failures for some reason (package does not exist or is
3910 operation = "nomerge"
3911 pkg = self._pkg_cache.get(
3912 (type_name, root_config.root, cpv, operation))
3914 tree_type = self.pkg_tree_map[type_name]
3915 db = root_config.trees[tree_type].dbapi
3916 db_keys = list(self._trees_orig[root_config.root][
3917 tree_type].dbapi._aux_cache_keys)
3919 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3921 raise portage.exception.PackageNotFound(cpv)
3922 pkg = Package(cpv=cpv, metadata=metadata,
3923 root_config=root_config, installed=installed)
3924 if type_name == "ebuild":
3925 settings = self.pkgsettings[root_config.root]
3926 settings.setcpv(pkg)
3927 pkg.metadata["USE"] = settings["PORTAGE_USE"]
3928 pkg.metadata['CHOST'] = settings.get('CHOST', '')
3929 self._pkg_cache[pkg] = pkg
3932 def validate_blockers(self):
3933 """Remove any blockers from the digraph that do not match any of the
3934 packages within the graph. If necessary, create hard deps to ensure
3935 correct merge order such that mutually blocking packages are never
3936 installed simultaneously."""
3938 if "--buildpkgonly" in self.myopts or \
3939 "--nodeps" in self.myopts:
3942 #if "deep" in self.myparams:
3944 # Pull in blockers from all installed packages that haven't already
3945 # been pulled into the depgraph. This is not enabled by default
3946 # due to the performance penalty that is incurred by all the
3947 # additional dep_check calls that are required.
3949 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3950 for myroot in self.trees:
3951 vardb = self.trees[myroot]["vartree"].dbapi
3952 portdb = self.trees[myroot]["porttree"].dbapi
3953 pkgsettings = self.pkgsettings[myroot]
3954 final_db = self.mydbapi[myroot]
3956 blocker_cache = BlockerCache(myroot, vardb)
3957 stale_cache = set(blocker_cache)
3960 stale_cache.discard(cpv)
3961 pkg_in_graph = self.digraph.contains(pkg)
3963 # Check for masked installed packages. Only warn about
3964 # packages that are in the graph in order to avoid warning
3965 # about those that will be automatically uninstalled during
3966 # the merge process or by --depclean.
3968 if pkg_in_graph and not visible(pkgsettings, pkg):
3969 self._masked_installed.add(pkg)
3971 blocker_atoms = None
3977 self._blocker_parents.child_nodes(pkg))
3982 self._irrelevant_blockers.child_nodes(pkg))
3985 if blockers is not None:
3986 blockers = set(str(blocker.atom) \
3987 for blocker in blockers)
3989 # If this node has any blockers, create a "nomerge"
3990 # node for it so that they can be enforced.
3991 self.spinner.update()
3992 blocker_data = blocker_cache.get(cpv)
3993 if blocker_data is not None and \
3994 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3997 # If blocker data from the graph is available, use
3998 # it to validate the cache and update the cache if
4000 if blocker_data is not None and \
4001 blockers is not None:
4002 if not blockers.symmetric_difference(
4003 blocker_data.atoms):
4007 if blocker_data is None and \
4008 blockers is not None:
4009 # Re-use the blockers from the graph.
4010 blocker_atoms = sorted(blockers)
4011 counter = long(pkg.metadata["COUNTER"])
4013 blocker_cache.BlockerData(counter, blocker_atoms)
4014 blocker_cache[pkg.cpv] = blocker_data
4018 blocker_atoms = blocker_data.atoms
4020 # Use aux_get() to trigger FakeVartree global
4021 # updates on *DEPEND when appropriate.
4022 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4023 # It is crucial to pass in final_db here in order to
4024 # optimize dep_check calls by eliminating atoms via
4025 # dep_wordreduce and dep_eval calls.
4027 portage.dep._dep_check_strict = False
4029 success, atoms = portage.dep_check(depstr,
4030 final_db, pkgsettings, myuse=pkg.use.enabled,
4031 trees=self._graph_trees, myroot=myroot)
4032 except Exception, e:
4033 if isinstance(e, SystemExit):
4035 # This is helpful, for example, if a ValueError
4036 # is thrown from cpv_expand due to multiple
4037 # matches (this can happen if an atom lacks a
4039 show_invalid_depstring_notice(
4040 pkg, depstr, str(e))
4044 portage.dep._dep_check_strict = True
4046 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4047 if replacement_pkg and \
4048 replacement_pkg[0].operation == "merge":
4049 # This package is being replaced anyway, so
4050 # ignore invalid dependencies so as not to
4051 # annoy the user too much (otherwise they'd be
4052 # forced to manually unmerge it first).
4054 show_invalid_depstring_notice(pkg, depstr, atoms)
4056 blocker_atoms = [myatom for myatom in atoms \
4057 if myatom.startswith("!")]
4058 blocker_atoms.sort()
4059 counter = long(pkg.metadata["COUNTER"])
4060 blocker_cache[cpv] = \
4061 blocker_cache.BlockerData(counter, blocker_atoms)
4064 for atom in blocker_atoms:
4065 blocker = Blocker(atom=portage.dep.Atom(atom),
4066 eapi=pkg.metadata["EAPI"], root=myroot)
4067 self._blocker_parents.add(blocker, pkg)
4068 except portage.exception.InvalidAtom, e:
4069 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4070 show_invalid_depstring_notice(
4071 pkg, depstr, "Invalid Atom: %s" % (e,))
4073 for cpv in stale_cache:
4074 del blocker_cache[cpv]
4075 blocker_cache.flush()
4078 # Discard any "uninstall" tasks scheduled by previous calls
4079 # to this method, since those tasks may not make sense given
4080 # the current graph state.
4081 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
4082 if previous_uninstall_tasks:
4083 self._blocker_uninstalls = digraph()
4084 self.digraph.difference_update(previous_uninstall_tasks)
4086 for blocker in self._blocker_parents.leaf_nodes():
4087 self.spinner.update()
4088 root_config = self.roots[blocker.root]
4089 virtuals = root_config.settings.getvirtuals()
4090 myroot = blocker.root
4091 initial_db = self.trees[myroot]["vartree"].dbapi
4092 final_db = self.mydbapi[myroot]
4094 provider_virtual = False
4095 if blocker.cp in virtuals and \
4096 not self._have_new_virt(blocker.root, blocker.cp):
4097 provider_virtual = True
4099 # Use this to check PROVIDE for each matched package
4101 atom_set = InternalPackageSet(
4102 initial_atoms=[blocker.atom])
4104 if provider_virtual:
4106 for provider_entry in virtuals[blocker.cp]:
4108 portage.dep_getkey(provider_entry)
4109 atoms.append(blocker.atom.replace(
4110 blocker.cp, provider_cp))
4112 atoms = [blocker.atom]
4114 blocked_initial = set()
4116 for pkg in initial_db.match_pkgs(atom):
4117 if atom_set.findAtomForPackage(pkg):
4118 blocked_initial.add(pkg)
4120 blocked_final = set()
4122 for pkg in final_db.match_pkgs(atom):
4123 if atom_set.findAtomForPackage(pkg):
4124 blocked_final.add(pkg)
4126 if not blocked_initial and not blocked_final:
4127 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
4128 self._blocker_parents.remove(blocker)
4129 # Discard any parents that don't have any more blockers.
4130 for pkg in parent_pkgs:
4131 self._irrelevant_blockers.add(blocker, pkg)
4132 if not self._blocker_parents.child_nodes(pkg):
4133 self._blocker_parents.remove(pkg)
4135 for parent in self._blocker_parents.parent_nodes(blocker):
4136 unresolved_blocks = False
4137 depends_on_order = set()
4138 for pkg in blocked_initial:
4139 if pkg.slot_atom == parent.slot_atom:
4140 # TODO: Support blocks within slots in cases where it
4141 # might make sense. For example, a new version might
4142 # require that the old version be uninstalled at build
4145 if parent.installed:
4146 # Two currently installed packages conflict with
4147 # eachother. Ignore this case since the damage
4148 # is already done and this would be likely to
4149 # confuse users if displayed like a normal blocker.
4152 self._blocked_pkgs.add(pkg, blocker)
4154 if parent.operation == "merge":
4155 # Maybe the blocked package can be replaced or simply
4156 # unmerged to resolve this block.
4157 depends_on_order.add((pkg, parent))
4159 # None of the above blocker resolutions techniques apply,
4160 # so apparently this one is unresolvable.
4161 unresolved_blocks = True
4162 for pkg in blocked_final:
4163 if pkg.slot_atom == parent.slot_atom:
4164 # TODO: Support blocks within slots.
4166 if parent.operation == "nomerge" and \
4167 pkg.operation == "nomerge":
4168 # This blocker will be handled the next time that a
4169 # merge of either package is triggered.
4172 self._blocked_pkgs.add(pkg, blocker)
4174 # Maybe the blocking package can be
4175 # unmerged to resolve this block.
4176 if parent.operation == "merge" and pkg.installed:
4177 depends_on_order.add((pkg, parent))
4179 elif parent.operation == "nomerge":
4180 depends_on_order.add((parent, pkg))
4182 # None of the above blocker resolutions techniques apply,
4183 # so apparently this one is unresolvable.
4184 unresolved_blocks = True
4186 # Make sure we don't unmerge any package that have been pulled
4188 if not unresolved_blocks and depends_on_order:
4189 for inst_pkg, inst_task in depends_on_order:
4190 if self.digraph.contains(inst_pkg) and \
4191 self.digraph.parent_nodes(inst_pkg):
4192 unresolved_blocks = True
4195 if not unresolved_blocks and depends_on_order:
4196 for inst_pkg, inst_task in depends_on_order:
4197 uninst_task = Package(built=inst_pkg.built,
4198 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4199 metadata=inst_pkg.metadata,
4200 operation="uninstall",
4201 root_config=inst_pkg.root_config,
4202 type_name=inst_pkg.type_name)
4203 self._pkg_cache[uninst_task] = uninst_task
4204 # Enforce correct merge order with a hard dep.
4205 self.digraph.addnode(uninst_task, inst_task,
4206 priority=BlockerDepPriority.instance)
4207 # Count references to this blocker so that it can be
4208 # invalidated after nodes referencing it have been
4210 self._blocker_uninstalls.addnode(uninst_task, blocker)
4211 if not unresolved_blocks and not depends_on_order:
4212 self._irrelevant_blockers.add(blocker, parent)
4213 self._blocker_parents.remove_edge(blocker, parent)
4214 if not self._blocker_parents.parent_nodes(blocker):
4215 self._blocker_parents.remove(blocker)
4216 if not self._blocker_parents.child_nodes(parent):
4217 self._blocker_parents.remove(parent)
4218 if unresolved_blocks:
4219 self._unsolvable_blockers.add(blocker, parent)
4223 def _accept_blocker_conflicts(self):
4225 for x in ("--buildpkgonly", "--fetchonly",
4226 "--fetch-all-uri", "--nodeps"):
4227 if x in self.myopts:
4232 def _merge_order_bias(self, mygraph):
4234 For optimal leaf node selection, promote deep system runtime deps and
4235 order nodes from highest to lowest overall reference count.
4239 for node in mygraph.order:
4240 node_info[node] = len(mygraph.parent_nodes(node))
4241 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4243 def cmp_merge_preference(node1, node2):
4245 if node1.operation == 'uninstall':
4246 if node2.operation == 'uninstall':
4250 if node2.operation == 'uninstall':
4251 if node1.operation == 'uninstall':
4255 node1_sys = node1 in deep_system_deps
4256 node2_sys = node2 in deep_system_deps
4257 if node1_sys != node2_sys:
4262 return node_info[node2] - node_info[node1]
4264 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4266 def altlist(self, reversed=False):
4268 while self._serialized_tasks_cache is None:
4269 self._resolve_conflicts()
4271 self._serialized_tasks_cache, self._scheduler_graph = \
4272 self._serialize_tasks()
4273 except self._serialize_tasks_retry:
4276 retlist = self._serialized_tasks_cache[:]
4281 def schedulerGraph(self):
4283 The scheduler graph is identical to the normal one except that
4284 uninstall edges are reversed in specific cases that require
4285 conflicting packages to be temporarily installed simultaneously.
4286 This is intended for use by the Scheduler in it's parallelization
4287 logic. It ensures that temporary simultaneous installation of
4288 conflicting packages is avoided when appropriate (especially for
4289 !!atom blockers), but allowed in specific cases that require it.
4291 Note that this method calls break_refs() which alters the state of
4292 internal Package instances such that this depgraph instance should
4293 not be used to perform any more calculations.
4295 if self._scheduler_graph is None:
4297 self.break_refs(self._scheduler_graph.order)
4298 return self._scheduler_graph
4300 def break_refs(self, nodes):
4302 Take a mergelist like that returned from self.altlist() and
4303 break any references that lead back to the depgraph. This is
4304 useful if you want to hold references to packages without
4305 also holding the depgraph on the heap.
4308 if hasattr(node, "root_config"):
4309 # The FakeVartree references the _package_cache which
4310 # references the depgraph. So that Package instances don't
4311 # hold the depgraph and FakeVartree on the heap, replace
4312 # the RootConfig that references the FakeVartree with the
4313 # original RootConfig instance which references the actual
4315 node.root_config = \
4316 self._trees_orig[node.root_config.root]["root_config"]
4318 def _resolve_conflicts(self):
4319 if not self._complete_graph():
4320 raise self._unknown_internal_error()
4322 if not self.validate_blockers():
4323 raise self._unknown_internal_error()
4325 if self._slot_collision_info:
4326 self._process_slot_conflicts()
4328 def _serialize_tasks(self):
4330 if "--debug" in self.myopts:
4331 writemsg("\ndigraph:\n\n", noiselevel=-1)
4332 self.digraph.debug_print()
4333 writemsg("\n", noiselevel=-1)
4335 scheduler_graph = self.digraph.copy()
4337 if '--nodeps' in self.myopts:
4338 # Preserve the package order given on the command line.
4339 return ([node for node in scheduler_graph \
4340 if isinstance(node, Package) \
4341 and node.operation == 'merge'], scheduler_graph)
4343 mygraph=self.digraph.copy()
4344 # Prune "nomerge" root nodes if nothing depends on them, since
4345 # otherwise they slow down merge order calculation. Don't remove
4346 # non-root nodes since they help optimize merge order in some cases
4347 # such as revdep-rebuild.
4348 removed_nodes = set()
4350 for node in mygraph.root_nodes():
4351 if not isinstance(node, Package) or \
4352 node.installed or node.onlydeps:
4353 removed_nodes.add(node)
4355 self.spinner.update()
4356 mygraph.difference_update(removed_nodes)
4357 if not removed_nodes:
4359 removed_nodes.clear()
4360 self._merge_order_bias(mygraph)
4361 def cmp_circular_bias(n1, n2):
4363 RDEPEND is stronger than PDEPEND and this function
4364 measures such a strength bias within a circular
4365 dependency relationship.
4367 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4368 ignore_priority=priority_range.ignore_medium_soft)
4369 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4370 ignore_priority=priority_range.ignore_medium_soft)
4371 if n1_n2_medium == n2_n1_medium:
4376 myblocker_uninstalls = self._blocker_uninstalls.copy()
4378 # Contains uninstall tasks that have been scheduled to
4379 # occur after overlapping blockers have been installed.
4380 scheduled_uninstalls = set()
4381 # Contains any Uninstall tasks that have been ignored
4382 # in order to avoid the circular deps code path. These
4383 # correspond to blocker conflicts that could not be
4385 ignored_uninstall_tasks = set()
4386 have_uninstall_task = False
4387 complete = "complete" in self.myparams
4390 def get_nodes(**kwargs):
4392 Returns leaf nodes excluding Uninstall instances
4393 since those should be executed as late as possible.
4395 return [node for node in mygraph.leaf_nodes(**kwargs) \
4396 if isinstance(node, Package) and \
4397 (node.operation != "uninstall" or \
4398 node in scheduled_uninstalls)]
4400 # sys-apps/portage needs special treatment if ROOT="/"
4401 running_root = self._running_root.root
4402 from portage.const import PORTAGE_PACKAGE_ATOM
4403 runtime_deps = InternalPackageSet(
4404 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4405 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
4406 PORTAGE_PACKAGE_ATOM)
4407 replacement_portage = self.mydbapi[running_root].match_pkgs(
4408 PORTAGE_PACKAGE_ATOM)
4411 running_portage = running_portage[0]
4413 running_portage = None
4415 if replacement_portage:
4416 replacement_portage = replacement_portage[0]
4418 replacement_portage = None
4420 if replacement_portage == running_portage:
4421 replacement_portage = None
4423 if replacement_portage is not None:
4424 # update from running_portage to replacement_portage asap
4425 asap_nodes.append(replacement_portage)
4427 if running_portage is not None:
4429 portage_rdepend = self._select_atoms_highest_available(
4430 running_root, running_portage.metadata["RDEPEND"],
4431 myuse=running_portage.use.enabled,
4432 parent=running_portage, strict=False)
4433 except portage.exception.InvalidDependString, e:
4434 portage.writemsg("!!! Invalid RDEPEND in " + \
4435 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4436 (running_root, running_portage.cpv, e), noiselevel=-1)
4438 portage_rdepend = []
4439 runtime_deps.update(atom for atom in portage_rdepend \
4440 if not atom.startswith("!"))
4442 def gather_deps(ignore_priority, mergeable_nodes,
4443 selected_nodes, node):
4445 Recursively gather a group of nodes that RDEPEND on
4446 eachother. This ensures that they are merged as a group
4447 and get their RDEPENDs satisfied as soon as possible.
4449 if node in selected_nodes:
4451 if node not in mergeable_nodes:
4453 if node == replacement_portage and \
4454 mygraph.child_nodes(node,
4455 ignore_priority=priority_range.ignore_medium_soft):
4456 # Make sure that portage always has all of it's
4457 # RDEPENDs installed first.
4459 selected_nodes.add(node)
4460 for child in mygraph.child_nodes(node,
4461 ignore_priority=ignore_priority):
4462 if not gather_deps(ignore_priority,
4463 mergeable_nodes, selected_nodes, child):
4467 def ignore_uninst_or_med(priority):
4468 if priority is BlockerDepPriority.instance:
4470 return priority_range.ignore_medium(priority)
4472 def ignore_uninst_or_med_soft(priority):
4473 if priority is BlockerDepPriority.instance:
4475 return priority_range.ignore_medium_soft(priority)
4477 tree_mode = "--tree" in self.myopts
4478 # Tracks whether or not the current iteration should prefer asap_nodes
4479 # if available. This is set to False when the previous iteration
4480 # failed to select any nodes. It is reset whenever nodes are
4481 # successfully selected.
4484 # Controls whether or not the current iteration should drop edges that
4485 # are "satisfied" by installed packages, in order to solve circular
4486 # dependencies. The deep runtime dependencies of installed packages are
4487 # not checked in this case (bug #199856), so it must be avoided
4488 # whenever possible.
4489 drop_satisfied = False
4491 # State of variables for successive iterations that loosen the
4492 # criteria for node selection.
4494 # iteration prefer_asap drop_satisfied
4499 # If no nodes are selected on the last iteration, it is due to
4500 # unresolved blockers or circular dependencies.
4502 while not mygraph.empty():
4503 self.spinner.update()
4504 selected_nodes = None
4505 ignore_priority = None
4506 if drop_satisfied or (prefer_asap and asap_nodes):
4507 priority_range = DepPrioritySatisfiedRange
4509 priority_range = DepPriorityNormalRange
4510 if prefer_asap and asap_nodes:
4511 # ASAP nodes are merged before their soft deps. Go ahead and
4512 # select root nodes here if necessary, since it's typical for
4513 # the parent to have been removed from the graph already.
4514 asap_nodes = [node for node in asap_nodes \
4515 if mygraph.contains(node)]
4516 for node in asap_nodes:
4517 if not mygraph.child_nodes(node,
4518 ignore_priority=priority_range.ignore_soft):
4519 selected_nodes = [node]
4520 asap_nodes.remove(node)
4522 if not selected_nodes and \
4523 not (prefer_asap and asap_nodes):
4524 for i in xrange(priority_range.NONE,
4525 priority_range.MEDIUM_SOFT + 1):
4526 ignore_priority = priority_range.ignore_priority[i]
4527 nodes = get_nodes(ignore_priority=ignore_priority)
4529 # If there is a mix of uninstall nodes with other
4530 # types, save the uninstall nodes for later since
4531 # sometimes a merge node will render an uninstall
4532 # node unnecessary (due to occupying the same slot),
4533 # and we want to avoid executing a separate uninstall
4534 # task in that case.
4536 good_uninstalls = []
4537 with_some_uninstalls_excluded = []
4539 if node.operation == "uninstall":
4540 slot_node = self.mydbapi[node.root
4541 ].match_pkgs(node.slot_atom)
4543 slot_node[0].operation == "merge":
4545 good_uninstalls.append(node)
4546 with_some_uninstalls_excluded.append(node)
4548 nodes = good_uninstalls
4549 elif with_some_uninstalls_excluded:
4550 nodes = with_some_uninstalls_excluded
4554 if ignore_priority is None and not tree_mode:
4555 # Greedily pop all of these nodes since no
4556 # relationship has been ignored. This optimization
4557 # destroys --tree output, so it's disabled in tree
4559 selected_nodes = nodes
4561 # For optimal merge order:
4562 # * Only pop one node.
4563 # * Removing a root node (node without a parent)
4564 # will not produce a leaf node, so avoid it.
4565 # * It's normal for a selected uninstall to be a
4566 # root node, so don't check them for parents.
4568 if node.operation == "uninstall" or \
4569 mygraph.parent_nodes(node):
4570 selected_nodes = [node]
4576 if not selected_nodes:
4577 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4579 mergeable_nodes = set(nodes)
4580 if prefer_asap and asap_nodes:
4582 for i in xrange(priority_range.SOFT,
4583 priority_range.MEDIUM_SOFT + 1):
4584 ignore_priority = priority_range.ignore_priority[i]
4586 if not mygraph.parent_nodes(node):
4588 selected_nodes = set()
4589 if gather_deps(ignore_priority,
4590 mergeable_nodes, selected_nodes, node):
4593 selected_nodes = None
4597 if prefer_asap and asap_nodes and not selected_nodes:
4598 # We failed to find any asap nodes to merge, so ignore
4599 # them for the next iteration.
4603 if selected_nodes and ignore_priority is not None:
4604 # Try to merge ignored medium_soft deps as soon as possible
4605 # if they're not satisfied by installed packages.
4606 for node in selected_nodes:
4607 children = set(mygraph.child_nodes(node))
4608 soft = children.difference(
4609 mygraph.child_nodes(node,
4610 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4611 medium_soft = children.difference(
4612 mygraph.child_nodes(node,
4614 DepPrioritySatisfiedRange.ignore_medium_soft))
4615 medium_soft.difference_update(soft)
4616 for child in medium_soft:
4617 if child in selected_nodes:
4619 if child in asap_nodes:
4621 asap_nodes.append(child)
4623 if selected_nodes and len(selected_nodes) > 1:
4624 if not isinstance(selected_nodes, list):
4625 selected_nodes = list(selected_nodes)
4626 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4628 if not selected_nodes and not myblocker_uninstalls.is_empty():
4629 # An Uninstall task needs to be executed in order to
4630 # avoid conflict if possible.
4633 priority_range = DepPrioritySatisfiedRange
4635 priority_range = DepPriorityNormalRange
4637 mergeable_nodes = get_nodes(
4638 ignore_priority=ignore_uninst_or_med)
4640 min_parent_deps = None
4642 for task in myblocker_uninstalls.leaf_nodes():
4643 # Do some sanity checks so that system or world packages
4644 # don't get uninstalled inappropriately here (only really
4645 # necessary when --complete-graph has not been enabled).
4647 if task in ignored_uninstall_tasks:
4650 if task in scheduled_uninstalls:
4651 # It's been scheduled but it hasn't
4652 # been executed yet due to dependence
4653 # on installation of blocking packages.
4656 root_config = self.roots[task.root]
4657 inst_pkg = self._pkg_cache[
4658 ("installed", task.root, task.cpv, "nomerge")]
4660 if self.digraph.contains(inst_pkg):
4663 forbid_overlap = False
4664 heuristic_overlap = False
4665 for blocker in myblocker_uninstalls.parent_nodes(task):
4666 if blocker.eapi in ("0", "1"):
4667 heuristic_overlap = True
4668 elif blocker.atom.blocker.overlap.forbid:
4669 forbid_overlap = True
4671 if forbid_overlap and running_root == task.root:
4674 if heuristic_overlap and running_root == task.root:
4675 # Never uninstall sys-apps/portage or it's essential
4676 # dependencies, except through replacement.
4678 runtime_dep_atoms = \
4679 list(runtime_deps.iterAtomsForPackage(task))
4680 except portage.exception.InvalidDependString, e:
4681 portage.writemsg("!!! Invalid PROVIDE in " + \
4682 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4683 (task.root, task.cpv, e), noiselevel=-1)
4687 # Don't uninstall a runtime dep if it appears
4688 # to be the only suitable one installed.
4690 vardb = root_config.trees["vartree"].dbapi
4691 for atom in runtime_dep_atoms:
4692 other_version = None
4693 for pkg in vardb.match_pkgs(atom):
4694 if pkg.cpv == task.cpv and \
4695 pkg.metadata["COUNTER"] == \
4696 task.metadata["COUNTER"]:
4700 if other_version is None:
4706 # For packages in the system set, don't take
4707 # any chances. If the conflict can't be resolved
4708 # by a normal replacement operation then abort.
4711 for atom in root_config.sets[
4712 "system"].iterAtomsForPackage(task):
4715 except portage.exception.InvalidDependString, e:
4716 portage.writemsg("!!! Invalid PROVIDE in " + \
4717 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4718 (task.root, task.cpv, e), noiselevel=-1)
4724 # Note that the world check isn't always
4725 # necessary since self._complete_graph() will
4726 # add all packages from the system and world sets to the
4727 # graph. This just allows unresolved conflicts to be
4728 # detected as early as possible, which makes it possible
4729 # to avoid calling self._complete_graph() when it is
4730 # unnecessary due to blockers triggering an abortion.
4732 # For packages in the world set, go ahead an uninstall
4733 # when necessary, as long as the atom will be satisfied
4734 # in the final state.
4735 graph_db = self.mydbapi[task.root]
4738 for atom in root_config.sets[
4739 "world"].iterAtomsForPackage(task):
4741 for pkg in graph_db.match_pkgs(atom):
4748 self._blocked_world_pkgs[inst_pkg] = atom
4750 except portage.exception.InvalidDependString, e:
4751 portage.writemsg("!!! Invalid PROVIDE in " + \
4752 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4753 (task.root, task.cpv, e), noiselevel=-1)
4759 # Check the deps of parent nodes to ensure that
4760 # the chosen task produces a leaf node. Maybe
4761 # this can be optimized some more to make the
4762 # best possible choice, but the current algorithm
4763 # is simple and should be near optimal for most
4765 mergeable_parent = False
4767 for parent in mygraph.parent_nodes(task):
4768 parent_deps.update(mygraph.child_nodes(parent,
4769 ignore_priority=priority_range.ignore_medium_soft))
4770 if parent in mergeable_nodes and \
4771 gather_deps(ignore_uninst_or_med_soft,
4772 mergeable_nodes, set(), parent):
4773 mergeable_parent = True
4775 if not mergeable_parent:
4778 parent_deps.remove(task)
4779 if min_parent_deps is None or \
4780 len(parent_deps) < min_parent_deps:
4781 min_parent_deps = len(parent_deps)
4784 if uninst_task is not None:
4785 # The uninstall is performed only after blocking
4786 # packages have been merged on top of it. File
4787 # collisions between blocking packages are detected
4788 # and removed from the list of files to be uninstalled.
4789 scheduled_uninstalls.add(uninst_task)
4790 parent_nodes = mygraph.parent_nodes(uninst_task)
4792 # Reverse the parent -> uninstall edges since we want
4793 # to do the uninstall after blocking packages have
4794 # been merged on top of it.
4795 mygraph.remove(uninst_task)
4796 for blocked_pkg in parent_nodes:
4797 mygraph.add(blocked_pkg, uninst_task,
4798 priority=BlockerDepPriority.instance)
4799 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4800 scheduler_graph.add(blocked_pkg, uninst_task,
4801 priority=BlockerDepPriority.instance)
4803 # Reset the state variables for leaf node selection and
4804 # continue trying to select leaf nodes.
4806 drop_satisfied = False
4809 if not selected_nodes:
4810 # Only select root nodes as a last resort. This case should
4811 # only trigger when the graph is nearly empty and the only
4812 # remaining nodes are isolated (no parents or children). Since
4813 # the nodes must be isolated, ignore_priority is not needed.
4814 selected_nodes = get_nodes()
4816 if not selected_nodes and not drop_satisfied:
4817 drop_satisfied = True
4820 if not selected_nodes and not myblocker_uninstalls.is_empty():
4821 # If possible, drop an uninstall task here in order to avoid
4822 # the circular deps code path. The corresponding blocker will
4823 # still be counted as an unresolved conflict.
4825 for node in myblocker_uninstalls.leaf_nodes():
4827 mygraph.remove(node)
4832 ignored_uninstall_tasks.add(node)
4835 if uninst_task is not None:
4836 # Reset the state variables for leaf node selection and
4837 # continue trying to select leaf nodes.
4839 drop_satisfied = False
4842 if not selected_nodes:
4843 self._circular_deps_for_display = mygraph
4844 raise self._unknown_internal_error()
4846 # At this point, we've succeeded in selecting one or more nodes, so
4847 # reset state variables for leaf node selection.
4849 drop_satisfied = False
4851 mygraph.difference_update(selected_nodes)
4853 for node in selected_nodes:
4854 if isinstance(node, Package) and \
4855 node.operation == "nomerge":
4858 # Handle interactions between blockers
4859 # and uninstallation tasks.
4860 solved_blockers = set()
4862 if isinstance(node, Package) and \
4863 "uninstall" == node.operation:
4864 have_uninstall_task = True
4867 vardb = self.trees[node.root]["vartree"].dbapi
4868 previous_cpv = vardb.match(node.slot_atom)
4870 # The package will be replaced by this one, so remove
4871 # the corresponding Uninstall task if necessary.
4872 previous_cpv = previous_cpv[0]
4874 ("installed", node.root, previous_cpv, "uninstall")
4876 mygraph.remove(uninst_task)
4880 if uninst_task is not None and \
4881 uninst_task not in ignored_uninstall_tasks and \
4882 myblocker_uninstalls.contains(uninst_task):
4883 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4884 myblocker_uninstalls.remove(uninst_task)
4885 # Discard any blockers that this Uninstall solves.
4886 for blocker in blocker_nodes:
4887 if not myblocker_uninstalls.child_nodes(blocker):
4888 myblocker_uninstalls.remove(blocker)
4889 solved_blockers.add(blocker)
4891 retlist.append(node)
4893 if (isinstance(node, Package) and \
4894 "uninstall" == node.operation) or \
4895 (uninst_task is not None and \
4896 uninst_task in scheduled_uninstalls):
4897 # Include satisfied blockers in the merge list
4898 # since the user might be interested and also
4899 # it serves as an indicator that blocking packages
4900 # will be temporarily installed simultaneously.
4901 for blocker in solved_blockers:
4902 retlist.append(Blocker(atom=blocker.atom,
4903 root=blocker.root, eapi=blocker.eapi,
4906 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
4907 for node in myblocker_uninstalls.root_nodes():
4908 unsolvable_blockers.add(node)
4910 for blocker in unsolvable_blockers:
4911 retlist.append(blocker)
4913 # If any Uninstall tasks need to be executed in order
4914 # to avoid a conflict, complete the graph with any
4915 # dependencies that may have been initially
4916 # neglected (to ensure that unsafe Uninstall tasks
4917 # are properly identified and blocked from execution).
4918 if have_uninstall_task and \
4920 not unsolvable_blockers:
4921 self.myparams.add("complete")
4922 raise self._serialize_tasks_retry("")
4924 if unsolvable_blockers and \
4925 not self._accept_blocker_conflicts():
4926 self._unsatisfied_blockers_for_display = unsolvable_blockers
4927 self._serialized_tasks_cache = retlist[:]
4928 self._scheduler_graph = scheduler_graph
4929 raise self._unknown_internal_error()
4931 if self._slot_collision_info and \
4932 not self._accept_blocker_conflicts():
4933 self._serialized_tasks_cache = retlist[:]
4934 self._scheduler_graph = scheduler_graph
4935 raise self._unknown_internal_error()
4937 return retlist, scheduler_graph
4939 def _show_circular_deps(self, mygraph):
4940 # No leaf nodes are available, so we have a circular
4941 # dependency panic situation. Reduce the noise level to a
4942 # minimum via repeated elimination of root nodes since they
4943 # have no parents and thus can not be part of a cycle.
4945 root_nodes = mygraph.root_nodes(
4946 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
4949 mygraph.difference_update(root_nodes)
4950 # Display the USE flags that are enabled on nodes that are part
4951 # of dependency cycles in case that helps the user decide to
4952 # disable some of them.
4954 tempgraph = mygraph.copy()
4955 while not tempgraph.empty():
4956 nodes = tempgraph.leaf_nodes()
4958 node = tempgraph.order[0]
4961 display_order.append(node)
4962 tempgraph.remove(node)
4963 display_order.reverse()
4964 self.myopts.pop("--quiet", None)
4965 self.myopts.pop("--verbose", None)
4966 self.myopts["--tree"] = True
4967 portage.writemsg("\n\n", noiselevel=-1)
4968 self.display(display_order)
4969 prefix = colorize("BAD", " * ")
4970 portage.writemsg("\n", noiselevel=-1)
4971 portage.writemsg(prefix + "Error: circular dependencies:\n",
4973 portage.writemsg("\n", noiselevel=-1)
4974 mygraph.debug_print()
4975 portage.writemsg("\n", noiselevel=-1)
4976 portage.writemsg(prefix + "Note that circular dependencies " + \
4977 "can often be avoided by temporarily\n", noiselevel=-1)
4978 portage.writemsg(prefix + "disabling USE flags that trigger " + \
4979 "optional dependencies.\n", noiselevel=-1)
4981 def _show_merge_list(self):
4982 if self._serialized_tasks_cache is not None and \
4983 not (self._displayed_list and \
4984 (self._displayed_list == self._serialized_tasks_cache or \
4985 self._displayed_list == \
4986 list(reversed(self._serialized_tasks_cache)))):
4987 display_list = self._serialized_tasks_cache[:]
4988 if "--tree" in self.myopts:
4989 display_list.reverse()
4990 self.display(display_list)
4992 def _show_unsatisfied_blockers(self, blockers):
4993 self._show_merge_list()
4994 msg = "Error: The above package list contains " + \
4995 "packages which cannot be installed " + \
4996 "at the same time on the same system."
4997 prefix = colorize("BAD", " * ")
4998 from textwrap import wrap
4999 portage.writemsg("\n", noiselevel=-1)
5000 for line in wrap(msg, 70):
5001 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5003 # Display the conflicting packages along with the packages
5004 # that pulled them in. This is helpful for troubleshooting
5005 # cases in which blockers don't solve automatically and
5006 # the reasons are not apparent from the normal merge list
5010 for blocker in blockers:
5011 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
5012 self._blocker_parents.parent_nodes(blocker)):
5013 parent_atoms = self._parent_atoms.get(pkg)
5014 if not parent_atoms:
5015 atom = self._blocked_world_pkgs.get(pkg)
5016 if atom is not None:
5017 parent_atoms = set([("@world", atom)])
5019 conflict_pkgs[pkg] = parent_atoms
5022 # Reduce noise by pruning packages that are only
5023 # pulled in by other conflict packages.
5025 for pkg, parent_atoms in conflict_pkgs.iteritems():
5026 relevant_parent = False
5027 for parent, atom in parent_atoms:
5028 if parent not in conflict_pkgs:
5029 relevant_parent = True
5031 if not relevant_parent:
5032 pruned_pkgs.add(pkg)
5033 for pkg in pruned_pkgs:
5034 del conflict_pkgs[pkg]
5040 # Max number of parents shown, to avoid flooding the display.
5042 for pkg, parent_atoms in conflict_pkgs.iteritems():
5046 # Prefer packages that are not directly involved in a conflict.
5047 for parent_atom in parent_atoms:
5048 if len(pruned_list) >= max_parents:
5050 parent, atom = parent_atom
5051 if parent not in conflict_pkgs:
5052 pruned_list.add(parent_atom)
5054 for parent_atom in parent_atoms:
5055 if len(pruned_list) >= max_parents:
5057 pruned_list.add(parent_atom)
5059 omitted_parents = len(parent_atoms) - len(pruned_list)
5060 msg.append(indent + "%s pulled in by\n" % pkg)
5062 for parent_atom in pruned_list:
5063 parent, atom = parent_atom
5064 msg.append(2*indent)
5065 if isinstance(parent,
5066 (PackageArg, AtomArg)):
5067 # For PackageArg and AtomArg types, it's
5068 # redundant to display the atom attribute.
5069 msg.append(str(parent))
5071 # Display the specific atom from SetArg or
5073 msg.append("%s required by %s" % (atom, parent))
5077 msg.append(2*indent)
5078 msg.append("(and %d more)\n" % omitted_parents)
5082 sys.stderr.write("".join(msg))
5085 if "--quiet" not in self.myopts:
5086 show_blocker_docs_link()
5088 def display(self, mylist, favorites=[], verbosity=None):
5090 # This is used to prevent display_problems() from
5091 # redundantly displaying this exact same merge list
5092 # again via _show_merge_list().
5093 self._displayed_list = mylist
5095 if verbosity is None:
5096 verbosity = ("--quiet" in self.myopts and 1 or \
5097 "--verbose" in self.myopts and 3 or 2)
5098 favorites_set = InternalPackageSet(favorites)
5099 oneshot = "--oneshot" in self.myopts or \
5100 "--onlydeps" in self.myopts
5101 columns = "--columns" in self.myopts
5106 counters = PackageCounters()
5108 if verbosity == 1 and "--verbose" not in self.myopts:
5109 def create_use_string(*args):
5112 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
5114 is_new, reinst_flags,
5115 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
5116 alphabetical=("--alphabetical" in self.myopts)):
5124 cur_iuse = set(cur_iuse)
5125 enabled_flags = cur_iuse.intersection(cur_use)
5126 removed_iuse = set(old_iuse).difference(cur_iuse)
5127 any_iuse = cur_iuse.union(old_iuse)
5128 any_iuse = list(any_iuse)
5130 for flag in any_iuse:
5133 reinst_flag = reinst_flags and flag in reinst_flags
5134 if flag in enabled_flags:
5136 if is_new or flag in old_use and \
5137 (all_flags or reinst_flag):
5138 flag_str = red(flag)
5139 elif flag not in old_iuse:
5140 flag_str = yellow(flag) + "%*"
5141 elif flag not in old_use:
5142 flag_str = green(flag) + "*"
5143 elif flag in removed_iuse:
5144 if all_flags or reinst_flag:
5145 flag_str = yellow("-" + flag) + "%"
5148 flag_str = "(" + flag_str + ")"
5149 removed.append(flag_str)
5152 if is_new or flag in old_iuse and \
5153 flag not in old_use and \
5154 (all_flags or reinst_flag):
5155 flag_str = blue("-" + flag)
5156 elif flag not in old_iuse:
5157 flag_str = yellow("-" + flag)
5158 if flag not in iuse_forced:
5160 elif flag in old_use:
5161 flag_str = green("-" + flag) + "*"
5163 if flag in iuse_forced:
5164 flag_str = "(" + flag_str + ")"
5166 enabled.append(flag_str)
5168 disabled.append(flag_str)
5171 ret = " ".join(enabled)
5173 ret = " ".join(enabled + disabled + removed)
5175 ret = '%s="%s" ' % (name, ret)
5178 repo_display = RepoDisplay(self.roots)
5182 mygraph = self.digraph.copy()
5184 # If there are any Uninstall instances, add the corresponding
5185 # blockers to the digraph (useful for --tree display).
5187 executed_uninstalls = set(node for node in mylist \
5188 if isinstance(node, Package) and node.operation == "unmerge")
5190 for uninstall in self._blocker_uninstalls.leaf_nodes():
5191 uninstall_parents = \
5192 self._blocker_uninstalls.parent_nodes(uninstall)
5193 if not uninstall_parents:
5196 # Remove the corresponding "nomerge" node and substitute
5197 # the Uninstall node.
5198 inst_pkg = self._pkg_cache[
5199 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
5201 mygraph.remove(inst_pkg)
5206 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
5208 inst_pkg_blockers = []
5210 # Break the Package -> Uninstall edges.
5211 mygraph.remove(uninstall)
5213 # Resolution of a package's blockers
5214 # depend on it's own uninstallation.
5215 for blocker in inst_pkg_blockers:
5216 mygraph.add(uninstall, blocker)
5218 # Expand Package -> Uninstall edges into
5219 # Package -> Blocker -> Uninstall edges.
5220 for blocker in uninstall_parents:
5221 mygraph.add(uninstall, blocker)
5222 for parent in self._blocker_parents.parent_nodes(blocker):
5223 if parent != inst_pkg:
5224 mygraph.add(blocker, parent)
5226 # If the uninstall task did not need to be executed because
5227 # of an upgrade, display Blocker -> Upgrade edges since the
5228 # corresponding Blocker -> Uninstall edges will not be shown.
5230 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
5231 if upgrade_node is not None and \
5232 uninstall not in executed_uninstalls:
5233 for blocker in uninstall_parents:
5234 mygraph.add(upgrade_node, blocker)
5236 unsatisfied_blockers = []
5241 if isinstance(x, Blocker) and not x.satisfied:
5242 unsatisfied_blockers.append(x)
5245 if "--tree" in self.myopts:
5246 depth = len(tree_nodes)
5247 while depth and graph_key not in \
5248 mygraph.child_nodes(tree_nodes[depth-1]):
5251 tree_nodes = tree_nodes[:depth]
5252 tree_nodes.append(graph_key)
5253 display_list.append((x, depth, True))
5254 shown_edges.add((graph_key, tree_nodes[depth-1]))
5256 traversed_nodes = set() # prevent endless circles
5257 traversed_nodes.add(graph_key)
5258 def add_parents(current_node, ordered):
5260 # Do not traverse to parents if this node is an
5261 # an argument or a direct member of a set that has
5262 # been specified as an argument (system or world).
5263 if current_node not in self._set_nodes:
5264 parent_nodes = mygraph.parent_nodes(current_node)
5266 child_nodes = set(mygraph.child_nodes(current_node))
5267 selected_parent = None
5268 # First, try to avoid a direct cycle.
5269 for node in parent_nodes:
5270 if not isinstance(node, (Blocker, Package)):
5272 if node not in traversed_nodes and \
5273 node not in child_nodes:
5274 edge = (current_node, node)
5275 if edge in shown_edges:
5277 selected_parent = node
5279 if not selected_parent:
5280 # A direct cycle is unavoidable.
5281 for node in parent_nodes:
5282 if not isinstance(node, (Blocker, Package)):
5284 if node not in traversed_nodes:
5285 edge = (current_node, node)
5286 if edge in shown_edges:
5288 selected_parent = node
5291 shown_edges.add((current_node, selected_parent))
5292 traversed_nodes.add(selected_parent)
5293 add_parents(selected_parent, False)
5294 display_list.append((current_node,
5295 len(tree_nodes), ordered))
5296 tree_nodes.append(current_node)
5298 add_parents(graph_key, True)
5300 display_list.append((x, depth, True))
5301 mylist = display_list
5302 for x in unsatisfied_blockers:
5303 mylist.append((x, 0, True))
5305 last_merge_depth = 0
5306 for i in xrange(len(mylist)-1,-1,-1):
5307 graph_key, depth, ordered = mylist[i]
5308 if not ordered and depth == 0 and i > 0 \
5309 and graph_key == mylist[i-1][0] and \
5310 mylist[i-1][1] == 0:
5311 # An ordered node got a consecutive duplicate when the tree was
5315 if ordered and graph_key[-1] != "nomerge":
5316 last_merge_depth = depth
5318 if depth >= last_merge_depth or \
5319 i < len(mylist) - 1 and \
5320 depth >= mylist[i+1][1]:
5323 from portage import flatten
5324 from portage.dep import use_reduce, paren_reduce
5325 # files to fetch list - avoids counting a same file twice
5326 # in size display (verbose mode)
5329 # Use this set to detect when all the "repoadd" strings are "[0]"
5330 # and disable the entire repo display in this case.
5333 for mylist_index in xrange(len(mylist)):
5334 x, depth, ordered = mylist[mylist_index]
5338 portdb = self.trees[myroot]["porttree"].dbapi
5339 bindb = self.trees[myroot]["bintree"].dbapi
5340 vardb = self.trees[myroot]["vartree"].dbapi
5341 vartree = self.trees[myroot]["vartree"]
5342 pkgsettings = self.pkgsettings[myroot]
5345 indent = " " * depth
5347 if isinstance(x, Blocker):
5349 blocker_style = "PKG_BLOCKER_SATISFIED"
5350 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
5352 blocker_style = "PKG_BLOCKER"
5353 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
5355 counters.blocks += 1
5357 counters.blocks_satisfied += 1
5358 resolved = portage.key_expand(
5359 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
5360 if "--columns" in self.myopts and "--quiet" in self.myopts:
5361 addl += " " + colorize(blocker_style, resolved)
5363 addl = "[%s %s] %s%s" % \
5364 (colorize(blocker_style, "blocks"),
5365 addl, indent, colorize(blocker_style, resolved))
5366 block_parents = self._blocker_parents.parent_nodes(x)
5367 block_parents = set([pnode[2] for pnode in block_parents])
5368 block_parents = ", ".join(block_parents)
5370 addl += colorize(blocker_style,
5371 " (\"%s\" is blocking %s)") % \
5372 (str(x.atom).lstrip("!"), block_parents)
5374 addl += colorize(blocker_style,
5375 " (is blocking %s)") % block_parents
5376 if isinstance(x, Blocker) and x.satisfied:
5381 blockers.append(addl)
5384 pkg_merge = ordered and pkg_status == "merge"
5385 if not pkg_merge and pkg_status == "merge":
5386 pkg_status = "nomerge"
5387 built = pkg_type != "ebuild"
5388 installed = pkg_type == "installed"
5390 metadata = pkg.metadata
5392 repo_name = metadata["repository"]
5393 if pkg_type == "ebuild":
5394 ebuild_path = portdb.findname(pkg_key)
5395 if not ebuild_path: # shouldn't happen
5396 raise portage.exception.PackageNotFound(pkg_key)
5397 repo_path_real = os.path.dirname(os.path.dirname(
5398 os.path.dirname(ebuild_path)))
5400 repo_path_real = portdb.getRepositoryPath(repo_name)
5401 pkg_use = list(pkg.use.enabled)
5403 restrict = flatten(use_reduce(paren_reduce(
5404 pkg.metadata["RESTRICT"]), uselist=pkg_use))
5405 except portage.exception.InvalidDependString, e:
5406 if not pkg.installed:
5407 show_invalid_depstring_notice(x,
5408 pkg.metadata["RESTRICT"], str(e))
5412 if "ebuild" == pkg_type and x[3] != "nomerge" and \
5413 "fetch" in restrict:
5416 counters.restrict_fetch += 1
5417 if portdb.fetch_check(pkg_key, pkg_use):
5420 counters.restrict_fetch_satisfied += 1
5422 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
5423 #param is used for -u, where you still *do* want to see when something is being upgraded.
5426 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
5427 if vardb.cpv_exists(pkg_key):
5428 addl=" "+yellow("R")+fetch+" "
5431 counters.reinst += 1
5432 elif pkg_status == "uninstall":
5433 counters.uninst += 1
5434 # filter out old-style virtual matches
5435 elif installed_versions and \
5436 portage.cpv_getkey(installed_versions[0]) == \
5437 portage.cpv_getkey(pkg_key):
5438 myinslotlist = vardb.match(pkg.slot_atom)
5439 # If this is the first install of a new-style virtual, we
5440 # need to filter out old-style virtual matches.
5441 if myinslotlist and \
5442 portage.cpv_getkey(myinslotlist[0]) != \
5443 portage.cpv_getkey(pkg_key):
5446 myoldbest = myinslotlist[:]
5448 if not portage.dep.cpvequal(pkg_key,
5449 portage.best([pkg_key] + myoldbest)):
5451 addl += turquoise("U")+blue("D")
5453 counters.downgrades += 1
5456 addl += turquoise("U") + " "
5458 counters.upgrades += 1
5460 # New slot, mark it new.
5461 addl = " " + green("NS") + fetch + " "
5462 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
5464 counters.newslot += 1
5466 if "--changelog" in self.myopts:
5467 inst_matches = vardb.match(pkg.slot_atom)
5469 changelogs.extend(self.calc_changelog(
5470 portdb.findname(pkg_key),
5471 inst_matches[0], pkg_key))
5473 addl = " " + green("N") + " " + fetch + " "
5482 forced_flags = set()
5483 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
5484 forced_flags.update(pkgsettings.useforce)
5485 forced_flags.update(pkgsettings.usemask)
5487 cur_use = [flag for flag in pkg.use.enabled \
5488 if flag in pkg.iuse.all]
5489 cur_iuse = sorted(pkg.iuse.all)
5491 if myoldbest and myinslotlist:
5492 previous_cpv = myoldbest[0]
5494 previous_cpv = pkg.cpv
5495 if vardb.cpv_exists(previous_cpv):
5496 old_iuse, old_use = vardb.aux_get(
5497 previous_cpv, ["IUSE", "USE"])
5498 old_iuse = list(set(
5499 filter_iuse_defaults(old_iuse.split())))
5501 old_use = old_use.split()
5508 old_use = [flag for flag in old_use if flag in old_iuse]
5510 use_expand = pkgsettings["USE_EXPAND"].lower().split()
5512 use_expand.reverse()
5513 use_expand_hidden = \
5514 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
5516 def map_to_use_expand(myvals, forcedFlags=False,
5520 for exp in use_expand:
5523 for val in myvals[:]:
5524 if val.startswith(exp.lower()+"_"):
5525 if val in forced_flags:
5526 forced[exp].add(val[len(exp)+1:])
5527 ret[exp].append(val[len(exp)+1:])
5530 forced["USE"] = [val for val in myvals \
5531 if val in forced_flags]
5533 for exp in use_expand_hidden:
5539 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
5540 # are the only thing that triggered reinstallation.
5541 reinst_flags_map = {}
5542 reinstall_for_flags = self._reinstall_nodes.get(pkg)
5543 reinst_expand_map = None
5544 if reinstall_for_flags:
5545 reinst_flags_map = map_to_use_expand(
5546 list(reinstall_for_flags), removeHidden=False)
5547 for k in list(reinst_flags_map):
5548 if not reinst_flags_map[k]:
5549 del reinst_flags_map[k]
5550 if not reinst_flags_map.get("USE"):
5551 reinst_expand_map = reinst_flags_map.copy()
5552 reinst_expand_map.pop("USE", None)
5553 if reinst_expand_map and \
5554 not set(reinst_expand_map).difference(
5556 use_expand_hidden = \
5557 set(use_expand_hidden).difference(
5560 cur_iuse_map, iuse_forced = \
5561 map_to_use_expand(cur_iuse, forcedFlags=True)
5562 cur_use_map = map_to_use_expand(cur_use)
5563 old_iuse_map = map_to_use_expand(old_iuse)
5564 old_use_map = map_to_use_expand(old_use)
5567 use_expand.insert(0, "USE")
5569 for key in use_expand:
5570 if key in use_expand_hidden:
5572 verboseadd += create_use_string(key.upper(),
5573 cur_iuse_map[key], iuse_forced[key],
5574 cur_use_map[key], old_iuse_map[key],
5575 old_use_map[key], is_new,
5576 reinst_flags_map.get(key))
5581 if pkg_type == "ebuild" and pkg_merge:
5583 myfilesdict = portdb.getfetchsizes(pkg_key,
5584 useflags=pkg_use, debug=self.edebug)
5585 except portage.exception.InvalidDependString, e:
5586 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
5587 show_invalid_depstring_notice(x, src_uri, str(e))
5590 if myfilesdict is None:
5591 myfilesdict="[empty/missing/bad digest]"
5593 for myfetchfile in myfilesdict:
5594 if myfetchfile not in myfetchlist:
5595 mysize+=myfilesdict[myfetchfile]
5596 myfetchlist.append(myfetchfile)
5598 counters.totalsize += mysize
5599 verboseadd += format_size(mysize)
5602 # assign index for a previous version in the same slot
5603 has_previous = False
5604 repo_name_prev = None
5605 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
5607 slot_matches = vardb.match(slot_atom)
5610 repo_name_prev = vardb.aux_get(slot_matches[0],
5613 # now use the data to generate output
5614 if pkg.installed or not has_previous:
5615 repoadd = repo_display.repoStr(repo_path_real)
5617 repo_path_prev = None
5619 repo_path_prev = portdb.getRepositoryPath(
5621 if repo_path_prev == repo_path_real:
5622 repoadd = repo_display.repoStr(repo_path_real)
5624 repoadd = "%s=>%s" % (
5625 repo_display.repoStr(repo_path_prev),
5626 repo_display.repoStr(repo_path_real))
5628 repoadd_set.add(repoadd)
5630 xs = [portage.cpv_getkey(pkg_key)] + \
5631 list(portage.catpkgsplit(pkg_key)[2:])
5638 if "COLUMNWIDTH" in self.settings:
5640 mywidth = int(self.settings["COLUMNWIDTH"])
5641 except ValueError, e:
5642 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
5644 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
5645 self.settings["COLUMNWIDTH"], noiselevel=-1)
5647 oldlp = mywidth - 30
5650 # Convert myoldbest from a list to a string.
5654 for pos, key in enumerate(myoldbest):
5655 key = portage.catpkgsplit(key)[2] + \
5656 "-" + portage.catpkgsplit(key)[3]
5657 if key[-3:] == "-r0":
5659 myoldbest[pos] = key
5660 myoldbest = blue("["+", ".join(myoldbest)+"]")
5663 root_config = self.roots[myroot]
5664 system_set = root_config.sets["system"]
5665 world_set = root_config.sets["world"]
5670 pkg_system = system_set.findAtomForPackage(pkg)
5671 pkg_world = world_set.findAtomForPackage(pkg)
5672 if not (oneshot or pkg_world) and \
5673 myroot == self.target_root and \
5674 favorites_set.findAtomForPackage(pkg):
5675 # Maybe it will be added to world now.
5676 if create_world_atom(pkg, favorites_set, root_config):
5678 except portage.exception.InvalidDependString:
5679 # This is reported elsewhere if relevant.
5682 def pkgprint(pkg_str):
5685 return colorize("PKG_MERGE_SYSTEM", pkg_str)
5687 return colorize("PKG_MERGE_WORLD", pkg_str)
5689 return colorize("PKG_MERGE", pkg_str)
5690 elif pkg_status == "uninstall":
5691 return colorize("PKG_UNINSTALL", pkg_str)
5694 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
5696 return colorize("PKG_NOMERGE_WORLD", pkg_str)
5698 return colorize("PKG_NOMERGE", pkg_str)
5701 properties = flatten(use_reduce(paren_reduce(
5702 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
5703 except portage.exception.InvalidDependString, e:
5704 if not pkg.installed:
5705 show_invalid_depstring_notice(pkg,
5706 pkg.metadata["PROPERTIES"], str(e))
5710 interactive = "interactive" in properties
5711 if interactive and pkg.operation == "merge":
5712 addl = colorize("WARN", "I") + addl[1:]
5714 counters.interactive += 1
5719 if "--columns" in self.myopts:
5720 if "--quiet" in self.myopts:
5721 myprint=addl+" "+indent+pkgprint(pkg_cp)
5722 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
5723 myprint=myprint+myoldbest
5724 myprint=myprint+darkgreen("to "+x[1])
5728 myprint = "[%s] %s%s" % \
5729 (pkgprint(pkg_status.ljust(13)),
5730 indent, pkgprint(pkg.cp))
5732 myprint = "[%s %s] %s%s" % \
5733 (pkgprint(pkg.type_name), addl,
5734 indent, pkgprint(pkg.cp))
5735 if (newlp-nc_len(myprint)) > 0:
5736 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5737 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
5738 if (oldlp-nc_len(myprint)) > 0:
5739 myprint=myprint+" "*(oldlp-nc_len(myprint))
5740 myprint=myprint+myoldbest
5741 myprint += darkgreen("to " + pkg.root)
5744 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
5746 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
5747 myprint += indent + pkgprint(pkg_key) + " " + \
5748 myoldbest + darkgreen("to " + myroot)
5750 if "--columns" in self.myopts:
5751 if "--quiet" in self.myopts:
5752 myprint=addl+" "+indent+pkgprint(pkg_cp)
5753 myprint=myprint+" "+green(xs[1]+xs[2])+" "
5754 myprint=myprint+myoldbest
5758 myprint = "[%s] %s%s" % \
5759 (pkgprint(pkg_status.ljust(13)),
5760 indent, pkgprint(pkg.cp))
5762 myprint = "[%s %s] %s%s" % \
5763 (pkgprint(pkg.type_name), addl,
5764 indent, pkgprint(pkg.cp))
5765 if (newlp-nc_len(myprint)) > 0:
5766 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5767 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
5768 if (oldlp-nc_len(myprint)) > 0:
5769 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
5770 myprint += myoldbest
5773 myprint = "[%s] %s%s %s" % \
5774 (pkgprint(pkg_status.ljust(13)),
5775 indent, pkgprint(pkg.cpv),
5778 myprint = "[%s %s] %s%s %s" % \
5779 (pkgprint(pkg_type), addl, indent,
5780 pkgprint(pkg.cpv), myoldbest)
5782 if columns and pkg.operation == "uninstall":
5784 p.append((myprint, verboseadd, repoadd))
5786 if "--tree" not in self.myopts and \
5787 "--quiet" not in self.myopts and \
5788 not self._opts_no_restart.intersection(self.myopts) and \
5789 pkg.root == self._running_root.root and \
5790 portage.match_from_list(
5791 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
5792 not vardb.cpv_exists(pkg.cpv) and \
5793 "--quiet" not in self.myopts:
5794 if mylist_index < len(mylist) - 1:
5795 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
5796 p.append(colorize("WARN", " then resume the merge."))
5799 show_repos = repoadd_set and repoadd_set != set(["0"])
5802 if isinstance(x, basestring):
5803 out.write("%s\n" % (x,))
5806 myprint, verboseadd, repoadd = x
5809 myprint += " " + verboseadd
5811 if show_repos and repoadd:
5812 myprint += " " + teal("[%s]" % repoadd)
5814 out.write("%s\n" % (myprint,))
5823 sys.stdout.write(str(repo_display))
5825 if "--changelog" in self.myopts:
5827 for revision,text in changelogs:
5828 print bold('*'+revision)
5829 sys.stdout.write(text)
5834 def display_problems(self):
5836 Display problems with the dependency graph such as slot collisions.
5837 This is called internally by display() to show the problems _after_
5838 the merge list where it is most likely to be seen, but if display()
5839 is not going to be called then this method should be called explicitly
5840 to ensure that the user is notified of problems with the graph.
5842 All output goes to stderr, except for unsatisfied dependencies which
5843 go to stdout for parsing by programs such as autounmask.
5846 # Note that show_masked_packages() sends it's output to
5847 # stdout, and some programs such as autounmask parse the
5848 # output in cases when emerge bails out. However, when
5849 # show_masked_packages() is called for installed packages
5850 # here, the message is a warning that is more appropriate
5851 # to send to stderr, so temporarily redirect stdout to
5852 # stderr. TODO: Fix output code so there's a cleaner way
5853 # to redirect everything to stderr.
5858 sys.stdout = sys.stderr
5859 self._display_problems()
5865 # This goes to stdout for parsing by programs like autounmask.
5866 for pargs, kwargs in self._unsatisfied_deps_for_display:
5867 self._show_unsatisfied_dep(*pargs, **kwargs)
5869 def _display_problems(self):
5870 if self._circular_deps_for_display is not None:
5871 self._show_circular_deps(
5872 self._circular_deps_for_display)
5874 # The user is only notified of a slot conflict if
5875 # there are no unresolvable blocker conflicts.
5876 if self._unsatisfied_blockers_for_display is not None:
5877 self._show_unsatisfied_blockers(
5878 self._unsatisfied_blockers_for_display)
5880 self._show_slot_collision_notice()
5882 # TODO: Add generic support for "set problem" handlers so that
5883 # the below warnings aren't special cases for world only.
5885 if self._missing_args:
5886 world_problems = False
5887 if "world" in self._sets:
5888 # Filter out indirect members of world (from nested sets)
5889 # since only direct members of world are desired here.
5890 world_set = self.roots[self.target_root].sets["world"]
5891 for arg, atom in self._missing_args:
5892 if arg.name == "world" and atom in world_set:
5893 world_problems = True
5897 sys.stderr.write("\n!!! Problems have been " + \
5898 "detected with your world file\n")
5899 sys.stderr.write("!!! Please run " + \
5900 green("emaint --check world")+"\n\n")
5902 if self._missing_args:
5903 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5904 " Ebuilds for the following packages are either all\n")
5905 sys.stderr.write(colorize("BAD", "!!!") + \
5906 " masked or don't exist:\n")
5907 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5908 self._missing_args) + "\n")
5910 if self._pprovided_args:
5912 for arg, atom in self._pprovided_args:
5913 if isinstance(arg, SetArg):
5915 arg_atom = (atom, atom)
5918 arg_atom = (arg.arg, atom)
5919 refs = arg_refs.setdefault(arg_atom, [])
5920 if parent not in refs:
5923 msg.append(bad("\nWARNING: "))
5924 if len(self._pprovided_args) > 1:
5925 msg.append("Requested packages will not be " + \
5926 "merged because they are listed in\n")
5928 msg.append("A requested package will not be " + \
5929 "merged because it is listed in\n")
5930 msg.append("package.provided:\n\n")
5931 problems_sets = set()
5932 for (arg, atom), refs in arg_refs.iteritems():
5935 problems_sets.update(refs)
5937 ref_string = ", ".join(["'%s'" % name for name in refs])
5938 ref_string = " pulled in by " + ref_string
5939 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5941 if "world" in problems_sets:
5942 msg.append("This problem can be solved in one of the following ways:\n\n")
5943 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5944 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5945 msg.append(" C) Remove offending entries from package.provided.\n\n")
5946 msg.append("The best course of action depends on the reason that an offending\n")
5947 msg.append("package.provided entry exists.\n\n")
5948 sys.stderr.write("".join(msg))
5950 masked_packages = []
5951 for pkg in self._masked_installed:
5952 root_config = pkg.root_config
5953 pkgsettings = self.pkgsettings[pkg.root]
5954 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5955 masked_packages.append((root_config, pkgsettings,
5956 pkg.cpv, pkg.metadata, mreasons))
5958 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5959 " The following installed packages are masked:\n")
5960 show_masked_packages(masked_packages)
5964 def calc_changelog(self,ebuildpath,current,next):
5965 if ebuildpath == None or not os.path.exists(ebuildpath):
5967 current = '-'.join(portage.catpkgsplit(current)[1:])
5968 if current.endswith('-r0'):
5969 current = current[:-3]
5970 next = '-'.join(portage.catpkgsplit(next)[1:])
5971 if next.endswith('-r0'):
5973 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
5975 changelog = open(changelogpath).read()
5976 except SystemExit, e:
5977 raise # Needed else can't exit
5980 divisions = self.find_changelog_tags(changelog)
5981 #print 'XX from',current,'to',next
5982 #for div,text in divisions: print 'XX',div
5983 # skip entries for all revisions above the one we are about to emerge
5984 for i in range(len(divisions)):
5985 if divisions[i][0]==next:
5986 divisions = divisions[i:]
5988 # find out how many entries we are going to display
5989 for i in range(len(divisions)):
5990 if divisions[i][0]==current:
5991 divisions = divisions[:i]
5994 # couldnt find the current revision in the list. display nothing
5998 def find_changelog_tags(self,changelog):
6002 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
6004 if release is not None:
6005 divs.append((release,changelog))
6007 if release is not None:
6008 divs.append((release,changelog[:match.start()]))
6009 changelog = changelog[match.end():]
6010 release = match.group(1)
6011 if release.endswith('.ebuild'):
6012 release = release[:-7]
6013 if release.endswith('-r0'):
6014 release = release[:-3]
6016 def saveNomergeFavorites(self):
6017 """Find atoms in favorites that are not in the mergelist and add them
6018 to the world file if necessary."""
6019 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6020 "--oneshot", "--onlydeps", "--pretend"):
6021 if x in self.myopts:
6023 root_config = self.roots[self.target_root]
6024 world_set = root_config.sets["world"]
6026 world_locked = False
6027 if hasattr(world_set, "lock"):
6031 if hasattr(world_set, "load"):
6032 world_set.load() # maybe it's changed on disk
6034 args_set = self._sets["args"]
6035 portdb = self.trees[self.target_root]["porttree"].dbapi
6036 added_favorites = set()
6037 for x in self._set_nodes:
6038 pkg_type, root, pkg_key, pkg_status = x
6039 if pkg_status != "nomerge":
6043 myfavkey = create_world_atom(x, args_set, root_config)
6045 if myfavkey in added_favorites:
6047 added_favorites.add(myfavkey)
6048 except portage.exception.InvalidDependString, e:
6049 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6050 (pkg_key, str(e)), noiselevel=-1)
6051 writemsg("!!! see '%s'\n\n" % os.path.join(
6052 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
6055 for k in self._sets:
6056 if k in ("args", "world") or not root_config.sets[k].world_candidate:
6061 all_added.append(SETPREFIX + k)
6062 all_added.extend(added_favorites)
6065 print ">>> Recording %s in \"world\" favorites file..." % \
6066 colorize("INFORM", str(a))
6068 world_set.update(all_added)
6073 def loadResumeCommand(self, resume_data, skip_masked=True,
6076 Add a resume command to the graph and validate it in the process. This
6077 will raise a PackageNotFound exception if a package is not available.
6080 if not isinstance(resume_data, dict):
6083 mergelist = resume_data.get("mergelist")
6084 if not isinstance(mergelist, list):
6087 fakedb = self.mydbapi
6089 serialized_tasks = []
6092 if not (isinstance(x, list) and len(x) == 4):
6094 pkg_type, myroot, pkg_key, action = x
6095 if pkg_type not in self.pkg_tree_map:
6097 if action != "merge":
6099 tree_type = self.pkg_tree_map[pkg_type]
6100 mydb = trees[myroot][tree_type].dbapi
6101 db_keys = list(self._trees_orig[myroot][
6102 tree_type].dbapi._aux_cache_keys)
6104 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
6106 # It does no exist or it is corrupt.
6107 if action == "uninstall":
6110 # TODO: log these somewhere
6112 raise portage.exception.PackageNotFound(pkg_key)
6113 installed = action == "uninstall"
6114 built = pkg_type != "ebuild"
6115 root_config = self.roots[myroot]
6116 pkg = Package(built=built, cpv=pkg_key,
6117 installed=installed, metadata=metadata,
6118 operation=action, root_config=root_config,
6120 if pkg_type == "ebuild":
6121 pkgsettings = self.pkgsettings[myroot]
6122 pkgsettings.setcpv(pkg)
6123 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6124 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
6125 self._pkg_cache[pkg] = pkg
6127 root_config = self.roots[pkg.root]
6128 if "merge" == pkg.operation and \
6129 not visible(root_config.settings, pkg):
6131 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6133 self._unsatisfied_deps_for_display.append(
6134 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6136 fakedb[myroot].cpv_inject(pkg)
6137 serialized_tasks.append(pkg)
6138 self.spinner.update()
6140 if self._unsatisfied_deps_for_display:
6143 if not serialized_tasks or "--nodeps" in self.myopts:
6144 self._serialized_tasks_cache = serialized_tasks
6145 self._scheduler_graph = self.digraph
6147 self._select_package = self._select_pkg_from_graph
6148 self.myparams.add("selective")
6149 # Always traverse deep dependencies in order to account for
6150 # potentially unsatisfied dependencies of installed packages.
6151 # This is necessary for correct --keep-going or --resume operation
6152 # in case a package from a group of circularly dependent packages
6153 # fails. In this case, a package which has recently been installed
6154 # may have an unsatisfied circular dependency (pulled in by
6155 # PDEPEND, for example). So, even though a package is already
6156 # installed, it may not have all of it's dependencies satisfied, so
6157 # it may not be usable. If such a package is in the subgraph of
6158 # deep depenedencies of a scheduled build, that build needs to
6159 # be cancelled. In order for this type of situation to be
6160 # recognized, deep traversal of dependencies is required.
6161 self.myparams.add("deep")
6163 favorites = resume_data.get("favorites")
6164 args_set = self._sets["args"]
6165 if isinstance(favorites, list):
6166 args = self._load_favorites(favorites)
6170 for task in serialized_tasks:
6171 if isinstance(task, Package) and \
6172 task.operation == "merge":
6173 if not self._add_pkg(task, None):
6176 # Packages for argument atoms need to be explicitly
6177 # added via _add_pkg() so that they are included in the
6178 # digraph (needed at least for --tree display).
6180 for atom in arg.set:
6181 pkg, existing_node = self._select_package(
6182 arg.root_config.root, atom)
6183 if existing_node is None and \
6185 if not self._add_pkg(pkg, Dependency(atom=atom,
6186 root=pkg.root, parent=arg)):
6189 # Allow unsatisfied deps here to avoid showing a masking
6190 # message for an unsatisfied dep that isn't necessarily
6192 if not self._create_graph(allow_unsatisfied=True):
6195 unsatisfied_deps = []
6196 for dep in self._unsatisfied_deps:
6197 if not isinstance(dep.parent, Package):
6199 if dep.parent.operation == "merge":
6200 unsatisfied_deps.append(dep)
6203 # For unsatisfied deps of installed packages, only account for
6204 # them if they are in the subgraph of dependencies of a package
6205 # which is scheduled to be installed.
6206 unsatisfied_install = False
6208 dep_stack = self.digraph.parent_nodes(dep.parent)
6210 node = dep_stack.pop()
6211 if not isinstance(node, Package):
6213 if node.operation == "merge":
6214 unsatisfied_install = True
6216 if node in traversed:
6219 dep_stack.extend(self.digraph.parent_nodes(node))
6221 if unsatisfied_install:
6222 unsatisfied_deps.append(dep)
6224 if masked_tasks or unsatisfied_deps:
6225 # This probably means that a required package
6226 # was dropped via --skipfirst. It makes the
6227 # resume list invalid, so convert it to a
6228 # UnsatisfiedResumeDep exception.
6229 raise self.UnsatisfiedResumeDep(self,
6230 masked_tasks + unsatisfied_deps)
6231 self._serialized_tasks_cache = None
6234 except self._unknown_internal_error:
6239 def _load_favorites(self, favorites):
6241 Use a list of favorites to resume state from a
6242 previous select_files() call. This creates similar
6243 DependencyArg instances to those that would have
6244 been created by the original select_files() call.
6245 This allows Package instances to be matched with
6246 DependencyArg instances during graph creation.
6248 root_config = self.roots[self.target_root]
6249 getSetAtoms = root_config.setconfig.getSetAtoms
6250 sets = root_config.sets
6253 if not isinstance(x, basestring):
6255 if x in ("system", "world"):
6257 if x.startswith(SETPREFIX):
6258 s = x[len(SETPREFIX):]
6263 # Recursively expand sets so that containment tests in
6264 # self._get_parent_sets() properly match atoms in nested
6265 # sets (like if world contains system).
6266 expanded_set = InternalPackageSet(
6267 initial_atoms=getSetAtoms(s))
6268 self._sets[s] = expanded_set
6269 args.append(SetArg(arg=x, set=expanded_set,
6270 root_config=root_config))
6272 if not portage.isvalidatom(x):
6274 args.append(AtomArg(arg=x, atom=x,
6275 root_config=root_config))
6277 self._set_args(args)
6280 class UnsatisfiedResumeDep(portage.exception.PortageException):
6282 A dependency of a resume list is not installed. This
6283 can occur when a required package is dropped from the
6284 merge list via --skipfirst.
6286 def __init__(self, depgraph, value):
6287 portage.exception.PortageException.__init__(self, value)
6288 self.depgraph = depgraph
6290 class _internal_exception(portage.exception.PortageException):
6291 def __init__(self, value=""):
6292 portage.exception.PortageException.__init__(self, value)
6294 class _unknown_internal_error(_internal_exception):
6296 Used by the depgraph internally to terminate graph creation.
6297 The specific reason for the failure should have been dumped
6298 to stderr, unfortunately, the exact reason for the failure
6302 class _serialize_tasks_retry(_internal_exception):
6304 This is raised by the _serialize_tasks() method when it needs to
6305 be called again for some reason. The only case that it's currently
6306 used for is when neglected dependencies need to be added to the
6307 graph in order to avoid making a potentially unsafe decision.
6310 class _dep_check_composite_db(portage.dbapi):
6312 A dbapi-like interface that is optimized for use in dep_check() calls.
6313 This is built on top of the existing depgraph package selection logic.
6314 Some packages that have been added to the graph may be masked from this
6315 view in order to influence the atom preference selection that occurs
6318 def __init__(self, depgraph, root):
6319 portage.dbapi.__init__(self)
6320 self._depgraph = depgraph
6322 self._match_cache = {}
6323 self._cpv_pkg_map = {}
6325 def _clear_cache(self):
6326 self._match_cache.clear()
6327 self._cpv_pkg_map.clear()
6329 def match(self, atom):
6330 ret = self._match_cache.get(atom)
6335 atom = self._dep_expand(atom)
6336 pkg, existing = self._depgraph._select_package(self._root, atom)
6340 # Return the highest available from select_package() as well as
6341 # any matching slots in the graph db.
6343 slots.add(pkg.metadata["SLOT"])
6344 atom_cp = portage.dep_getkey(atom)
6345 if pkg.cp.startswith("virtual/"):
6346 # For new-style virtual lookahead that occurs inside
6347 # dep_check(), examine all slots. This is needed
6348 # so that newer slots will not unnecessarily be pulled in
6349 # when a satisfying lower slot is already installed. For
6350 # example, if virtual/jdk-1.4 is satisfied via kaffe then
6351 # there's no need to pull in a newer slot to satisfy a
6352 # virtual/jdk dependency.
6353 for db, pkg_type, built, installed, db_keys in \
6354 self._depgraph._filtered_trees[self._root]["dbs"]:
6355 for cpv in db.match(atom):
6356 if portage.cpv_getkey(cpv) != pkg.cp:
6358 slots.add(db.aux_get(cpv, ["SLOT"])[0])
6360 if self._visible(pkg):
6361 self._cpv_pkg_map[pkg.cpv] = pkg
6363 slots.remove(pkg.metadata["SLOT"])
6365 slot_atom = "%s:%s" % (atom_cp, slots.pop())
6366 pkg, existing = self._depgraph._select_package(
6367 self._root, slot_atom)
6370 if not self._visible(pkg):
6372 self._cpv_pkg_map[pkg.cpv] = pkg
6375 self._cpv_sort_ascending(ret)
6376 self._match_cache[orig_atom] = ret
6379 def _visible(self, pkg):
6380 if pkg.installed and "selective" not in self._depgraph.myparams:
6382 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
6383 except (StopIteration, portage.exception.InvalidDependString):
6390 self._depgraph.pkgsettings[pkg.root], pkg):
6392 except portage.exception.InvalidDependString:
6394 in_graph = self._depgraph._slot_pkg_map[
6395 self._root].get(pkg.slot_atom)
6396 if in_graph is None:
6397 # Mask choices for packages which are not the highest visible
6398 # version within their slot (since they usually trigger slot
6400 highest_visible, in_graph = self._depgraph._select_package(
6401 self._root, pkg.slot_atom)
6402 if pkg != highest_visible:
6404 elif in_graph != pkg:
6405 # Mask choices for packages that would trigger a slot
6406 # conflict with a previously selected package.
6410 def _dep_expand(self, atom):
6412 This is only needed for old installed packages that may
6413 contain atoms that are not fully qualified with a specific
6414 category. Emulate the cpv_expand() function that's used by
6415 dbapi.match() in cases like this. If there are multiple
6416 matches, it's often due to a new-style virtual that has
6417 been added, so try to filter those out to avoid raising
6420 root_config = self._depgraph.roots[self._root]
6422 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
6423 if len(expanded_atoms) > 1:
6424 non_virtual_atoms = []
6425 for x in expanded_atoms:
6426 if not portage.dep_getkey(x).startswith("virtual/"):
6427 non_virtual_atoms.append(x)
6428 if len(non_virtual_atoms) == 1:
6429 expanded_atoms = non_virtual_atoms
6430 if len(expanded_atoms) > 1:
6431 # compatible with portage.cpv_expand()
6432 raise portage.exception.AmbiguousPackageName(
6433 [portage.dep_getkey(x) for x in expanded_atoms])
6435 atom = expanded_atoms[0]
6437 null_atom = insert_category_into_atom(atom, "null")
6438 null_cp = portage.dep_getkey(null_atom)
6439 cat, atom_pn = portage.catsplit(null_cp)
6440 virts_p = root_config.settings.get_virts_p().get(atom_pn)
6442 # Allow the resolver to choose which virtual.
6443 atom = insert_category_into_atom(atom, "virtual")
6445 atom = insert_category_into_atom(atom, "null")
6448 def aux_get(self, cpv, wants):
6449 metadata = self._cpv_pkg_map[cpv].metadata
6450 return [metadata.get(x, "") for x in wants]
6452 class PackageCounters(object):
6462 self.blocks_satisfied = 0
6464 self.restrict_fetch = 0
6465 self.restrict_fetch_satisfied = 0
6466 self.interactive = 0
6469 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
6472 myoutput.append("Total: %s package" % total_installs)
6473 if total_installs != 1:
6474 myoutput.append("s")
6475 if total_installs != 0:
6476 myoutput.append(" (")
6477 if self.upgrades > 0:
6478 details.append("%s upgrade" % self.upgrades)
6479 if self.upgrades > 1:
6481 if self.downgrades > 0:
6482 details.append("%s downgrade" % self.downgrades)
6483 if self.downgrades > 1:
6486 details.append("%s new" % self.new)
6487 if self.newslot > 0:
6488 details.append("%s in new slot" % self.newslot)
6489 if self.newslot > 1:
6492 details.append("%s reinstall" % self.reinst)
6496 details.append("%s uninstall" % self.uninst)
6499 if self.interactive > 0:
6500 details.append("%s %s" % (self.interactive,
6501 colorize("WARN", "interactive")))
6502 myoutput.append(", ".join(details))
6503 if total_installs != 0:
6504 myoutput.append(")")
6505 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
6506 if self.restrict_fetch:
6507 myoutput.append("\nFetch Restriction: %s package" % \
6508 self.restrict_fetch)
6509 if self.restrict_fetch > 1:
6510 myoutput.append("s")
6511 if self.restrict_fetch_satisfied < self.restrict_fetch:
6512 myoutput.append(bad(" (%s unsatisfied)") % \
6513 (self.restrict_fetch - self.restrict_fetch_satisfied))
6515 myoutput.append("\nConflict: %s block" % \
6518 myoutput.append("s")
6519 if self.blocks_satisfied < self.blocks:
6520 myoutput.append(bad(" (%s unsatisfied)") % \
6521 (self.blocks - self.blocks_satisfied))
6522 return "".join(myoutput)
6525 _can_poll_device = None
6527 def can_poll_device():
6529 Test if it's possible to use poll() on a device such as a pty. This
6530 is known to fail on Darwin.
6532 @returns: True if poll() on a device succeeds, False otherwise.
6535 global _can_poll_device
6536 if _can_poll_device is not None:
6537 return _can_poll_device
6539 if not hasattr(select, "poll"):
6540 _can_poll_device = False
6541 return _can_poll_device
6544 dev_null = open('/dev/null', 'rb')
6546 _can_poll_device = False
6547 return _can_poll_device
6550 p.register(dev_null.fileno(), PollConstants.POLLIN)
6552 invalid_request = False
6553 for f, event in p.poll():
6554 if event & PollConstants.POLLNVAL:
6555 invalid_request = True
6559 _can_poll_device = not invalid_request
6560 return _can_poll_device
6562 def create_poll_instance():
6564 Create an instance of select.poll, or an instance of
6565 PollSelectAdapter there is no poll() implementation or
6566 it is broken somehow.
6568 if can_poll_device():
6569 return select.poll()
6570 return PollSelectAdapter()
6572 getloadavg = getattr(os, "getloadavg", None)
6573 if getloadavg is None:
6576 Uses /proc/loadavg to emulate os.getloadavg().
6577 Raises OSError if the load average was unobtainable.
6580 loadavg_str = open('/proc/loadavg').readline()
6582 # getloadavg() is only supposed to raise OSError, so convert
6583 raise OSError('unknown')
6584 loadavg_split = loadavg_str.split()
6585 if len(loadavg_split) < 3:
6586 raise OSError('unknown')
6590 loadavg_floats.append(float(loadavg_split[i]))
6592 raise OSError('unknown')
6593 return tuple(loadavg_floats)
6595 class PollScheduler(object):
6597 class _sched_iface_class(SlotObject):
6598 __slots__ = ("register", "schedule", "unregister")
6602 self._max_load = None
6604 self._poll_event_queue = []
6605 self._poll_event_handlers = {}
6606 self._poll_event_handler_ids = {}
6607 # Increment id for each new handler.
6608 self._event_handler_id = 0
6609 self._poll_obj = create_poll_instance()
6610 self._scheduling = False
6612 def _schedule(self):
6614 Calls _schedule_tasks() and automatically returns early from
6615 any recursive calls to this method that the _schedule_tasks()
6616 call might trigger. This makes _schedule() safe to call from
6617 inside exit listeners.
6619 if self._scheduling:
6621 self._scheduling = True
6623 return self._schedule_tasks()
6625 self._scheduling = False
6627 def _running_job_count(self):
6630 def _can_add_job(self):
6631 max_jobs = self._max_jobs
6632 max_load = self._max_load
6634 if self._max_jobs is not True and \
6635 self._running_job_count() >= self._max_jobs:
6638 if max_load is not None and \
6639 (max_jobs is True or max_jobs > 1) and \
6640 self._running_job_count() >= 1:
6642 avg1, avg5, avg15 = getloadavg()
6646 if avg1 >= max_load:
6651 def _poll(self, timeout=None):
6653 All poll() calls pass through here. The poll events
6654 are added directly to self._poll_event_queue.
6655 In order to avoid endless blocking, this raises
6656 StopIteration if timeout is None and there are
6657 no file descriptors to poll.
6659 if not self._poll_event_handlers:
6661 if timeout is None and \
6662 not self._poll_event_handlers:
6663 raise StopIteration(
6664 "timeout is None and there are no poll() event handlers")
6666 # The following error is known to occur with Linux kernel versions
6669 # select.error: (4, 'Interrupted system call')
6671 # This error has been observed after a SIGSTOP, followed by SIGCONT.
6672 # Treat it similar to EAGAIN if timeout is None, otherwise just return
6673 # without any events.
6676 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
6678 except select.error, e:
6679 writemsg_level("\n!!! select error: %s\n" % (e,),
6680 level=logging.ERROR, noiselevel=-1)
6682 if timeout is not None:
6685 def _next_poll_event(self, timeout=None):
6687 Since the _schedule_wait() loop is called by event
6688 handlers from _poll_loop(), maintain a central event
6689 queue for both of them to share events from a single
6690 poll() call. In order to avoid endless blocking, this
6691 raises StopIteration if timeout is None and there are
6692 no file descriptors to poll.
6694 if not self._poll_event_queue:
6696 return self._poll_event_queue.pop()
6698 def _poll_loop(self):
6700 event_handlers = self._poll_event_handlers
6701 event_handled = False
6704 while event_handlers:
6705 f, event = self._next_poll_event()
6706 handler, reg_id = event_handlers[f]
6708 event_handled = True
6709 except StopIteration:
6710 event_handled = True
6712 if not event_handled:
6713 raise AssertionError("tight loop")
6715 def _schedule_yield(self):
6717 Schedule for a short period of time chosen by the scheduler based
6718 on internal state. Synchronous tasks should call this periodically
6719 in order to allow the scheduler to service pending poll events. The
6720 scheduler will call poll() exactly once, without blocking, and any
6721 resulting poll events will be serviced.
6723 event_handlers = self._poll_event_handlers
6726 if not event_handlers:
6727 return bool(events_handled)
6729 if not self._poll_event_queue:
6733 while event_handlers and self._poll_event_queue:
6734 f, event = self._next_poll_event()
6735 handler, reg_id = event_handlers[f]
6738 except StopIteration:
6741 return bool(events_handled)
6743 def _register(self, f, eventmask, handler):
6746 @return: A unique registration id, for use in schedule() or
6749 if f in self._poll_event_handlers:
6750 raise AssertionError("fd %d is already registered" % f)
6751 self._event_handler_id += 1
6752 reg_id = self._event_handler_id
6753 self._poll_event_handler_ids[reg_id] = f
6754 self._poll_event_handlers[f] = (handler, reg_id)
6755 self._poll_obj.register(f, eventmask)
6758 def _unregister(self, reg_id):
6759 f = self._poll_event_handler_ids[reg_id]
6760 self._poll_obj.unregister(f)
6761 del self._poll_event_handlers[f]
6762 del self._poll_event_handler_ids[reg_id]
6764 def _schedule_wait(self, wait_ids):
6766 Schedule until wait_id is not longer registered
6769 @param wait_id: a task id to wait for
6771 event_handlers = self._poll_event_handlers
6772 handler_ids = self._poll_event_handler_ids
6773 event_handled = False
6775 if isinstance(wait_ids, int):
6776 wait_ids = frozenset([wait_ids])
6779 while wait_ids.intersection(handler_ids):
6780 f, event = self._next_poll_event()
6781 handler, reg_id = event_handlers[f]
6783 event_handled = True
6784 except StopIteration:
6785 event_handled = True
6787 return event_handled
6789 class QueueScheduler(PollScheduler):
6792 Add instances of SequentialTaskQueue and then call run(). The
6793 run() method returns when no tasks remain.
6796 def __init__(self, max_jobs=None, max_load=None):
6797 PollScheduler.__init__(self)
6799 if max_jobs is None:
6802 self._max_jobs = max_jobs
6803 self._max_load = max_load
6804 self.sched_iface = self._sched_iface_class(
6805 register=self._register,
6806 schedule=self._schedule_wait,
6807 unregister=self._unregister)
6810 self._schedule_listeners = []
6813 self._queues.append(q)
6815 def remove(self, q):
6816 self._queues.remove(q)
6820 while self._schedule():
6823 while self._running_job_count():
6826 def _schedule_tasks(self):
6829 @returns: True if there may be remaining tasks to schedule,
6832 while self._can_add_job():
6833 n = self._max_jobs - self._running_job_count()
6837 if not self._start_next_job(n):
6840 for q in self._queues:
6845 def _running_job_count(self):
6847 for q in self._queues:
6848 job_count += len(q.running_tasks)
6849 self._jobs = job_count
6852 def _start_next_job(self, n=1):
6854 for q in self._queues:
6855 initial_job_count = len(q.running_tasks)
6857 final_job_count = len(q.running_tasks)
6858 if final_job_count > initial_job_count:
6859 started_count += (final_job_count - initial_job_count)
6860 if started_count >= n:
6862 return started_count
6864 class TaskScheduler(object):
6867 A simple way to handle scheduling of AsynchrousTask instances. Simply
6868 add tasks and call run(). The run() method returns when no tasks remain.
6871 def __init__(self, max_jobs=None, max_load=None):
6872 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
6873 self._scheduler = QueueScheduler(
6874 max_jobs=max_jobs, max_load=max_load)
6875 self.sched_iface = self._scheduler.sched_iface
6876 self.run = self._scheduler.run
6877 self._scheduler.add(self._queue)
6879 def add(self, task):
6880 self._queue.add(task)
6882 class JobStatusDisplay(object):
6884 _bound_properties = ("curval", "failed", "running")
6885 _jobs_column_width = 48
6887 # Don't update the display unless at least this much
6888 # time has passed, in units of seconds.
6889 _min_display_latency = 2
6891 _default_term_codes = {
6897 _termcap_name_map = {
6898 'carriage_return' : 'cr',
6903 def __init__(self, out=sys.stdout, quiet=False, xterm_titles=True):
6904 object.__setattr__(self, "out", out)
6905 object.__setattr__(self, "quiet", quiet)
6906 object.__setattr__(self, "xterm_titles", xterm_titles)
6907 object.__setattr__(self, "maxval", 0)
6908 object.__setattr__(self, "merges", 0)
6909 object.__setattr__(self, "_changed", False)
6910 object.__setattr__(self, "_displayed", False)
6911 object.__setattr__(self, "_last_display_time", 0)
6912 object.__setattr__(self, "width", 80)
6915 isatty = hasattr(out, "isatty") and out.isatty()
6916 object.__setattr__(self, "_isatty", isatty)
6917 if not isatty or not self._init_term():
6919 for k, capname in self._termcap_name_map.iteritems():
6920 term_codes[k] = self._default_term_codes[capname]
6921 object.__setattr__(self, "_term_codes", term_codes)
6922 encoding = sys.getdefaultencoding()
6923 for k, v in self._term_codes.items():
6924 if not isinstance(v, basestring):
6925 self._term_codes[k] = v.decode(encoding, 'replace')
6927 def _init_term(self):
6929 Initialize term control codes.
6931 @returns: True if term codes were successfully initialized,
6935 term_type = os.environ.get("TERM", "vt100")
6941 curses.setupterm(term_type, self.out.fileno())
6942 tigetstr = curses.tigetstr
6943 except curses.error:
6948 if tigetstr is None:
6952 for k, capname in self._termcap_name_map.iteritems():
6953 code = tigetstr(capname)
6955 code = self._default_term_codes[capname]
6956 term_codes[k] = code
6957 object.__setattr__(self, "_term_codes", term_codes)
6960 def _format_msg(self, msg):
6961 return ">>> %s" % msg
6965 self._term_codes['carriage_return'] + \
6966 self._term_codes['clr_eol'])
6968 self._displayed = False
6970 def _display(self, line):
6971 self.out.write(line)
6973 self._displayed = True
6975 def _update(self, msg):
6978 if not self._isatty:
6979 out.write(self._format_msg(msg) + self._term_codes['newline'])
6981 self._displayed = True
6987 self._display(self._format_msg(msg))
6989 def displayMessage(self, msg):
6991 was_displayed = self._displayed
6993 if self._isatty and self._displayed:
6996 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
6998 self._displayed = False
7001 self._changed = True
7007 for name in self._bound_properties:
7008 object.__setattr__(self, name, 0)
7011 self.out.write(self._term_codes['newline'])
7013 self._displayed = False
7015 def __setattr__(self, name, value):
7016 old_value = getattr(self, name)
7017 if value == old_value:
7019 object.__setattr__(self, name, value)
7020 if name in self._bound_properties:
7021 self._property_change(name, old_value, value)
7023 def _property_change(self, name, old_value, new_value):
7024 self._changed = True
7027 def _load_avg_str(self):
7042 return ", ".join(("%%.%df" % digits ) % x for x in avg)
7046 Display status on stdout, but only if something has
7047 changed since the last call.
7053 current_time = time.time()
7054 time_delta = current_time - self._last_display_time
7055 if self._displayed and \
7057 if not self._isatty:
7059 if time_delta < self._min_display_latency:
7062 self._last_display_time = current_time
7063 self._changed = False
7064 self._display_status()
7066 def _display_status(self):
7067 # Don't use len(self._completed_tasks) here since that also
7068 # can include uninstall tasks.
7069 curval_str = str(self.curval)
7070 maxval_str = str(self.maxval)
7071 running_str = str(self.running)
7072 failed_str = str(self.failed)
7073 load_avg_str = self._load_avg_str()
7075 color_output = StringIO()
7076 plain_output = StringIO()
7077 style_file = portage.output.ConsoleStyleFile(color_output)
7078 style_file.write_listener = plain_output
7079 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
7080 style_writer.style_listener = style_file.new_styles
7081 f = formatter.AbstractFormatter(style_writer)
7083 number_style = "INFORM"
7084 f.add_literal_data("Jobs: ")
7085 f.push_style(number_style)
7086 f.add_literal_data(curval_str)
7088 f.add_literal_data(" of ")
7089 f.push_style(number_style)
7090 f.add_literal_data(maxval_str)
7092 f.add_literal_data(" complete")
7095 f.add_literal_data(", ")
7096 f.push_style(number_style)
7097 f.add_literal_data(running_str)
7099 f.add_literal_data(" running")
7102 f.add_literal_data(", ")
7103 f.push_style(number_style)
7104 f.add_literal_data(failed_str)
7106 f.add_literal_data(" failed")
7108 padding = self._jobs_column_width - len(plain_output.getvalue())
7110 f.add_literal_data(padding * " ")
7112 f.add_literal_data("Load avg: ")
7113 f.add_literal_data(load_avg_str)
7115 # Truncate to fit width, to avoid making the terminal scroll if the
7116 # line overflows (happens when the load average is large).
7117 plain_output = plain_output.getvalue()
7118 if self._isatty and len(plain_output) > self.width:
7119 # Use plain_output here since it's easier to truncate
7120 # properly than the color output which contains console
7122 self._update(plain_output[:self.width])
7124 self._update(color_output.getvalue())
7126 if self.xterm_titles:
7127 xtermTitle(" ".join(plain_output.split()))
7129 class Scheduler(PollScheduler):
7131 _opts_ignore_blockers = \
7132 frozenset(["--buildpkgonly",
7133 "--fetchonly", "--fetch-all-uri",
7134 "--nodeps", "--pretend"])
7136 _opts_no_background = \
7137 frozenset(["--pretend",
7138 "--fetchonly", "--fetch-all-uri"])
7140 _opts_no_restart = frozenset(["--buildpkgonly",
7141 "--fetchonly", "--fetch-all-uri", "--pretend"])
7143 _bad_resume_opts = set(["--ask", "--changelog",
7144 "--resume", "--skipfirst"])
7146 _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
7148 class _iface_class(SlotObject):
7149 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
7150 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
7151 "scheduleSetup", "scheduleUnpack", "scheduleYield",
7154 class _fetch_iface_class(SlotObject):
7155 __slots__ = ("log_file", "schedule")
7157 _task_queues_class = slot_dict_class(
7158 ("merge", "jobs", "fetch", "unpack"), prefix="")
7160 class _build_opts_class(SlotObject):
7161 __slots__ = ("buildpkg", "buildpkgonly",
7162 "fetch_all_uri", "fetchonly", "pretend")
7164 class _binpkg_opts_class(SlotObject):
7165 __slots__ = ("fetchonly", "getbinpkg", "pretend")
7167 class _pkg_count_class(SlotObject):
7168 __slots__ = ("curval", "maxval")
7170 class _emerge_log_class(SlotObject):
7171 __slots__ = ("xterm_titles",)
7173 def log(self, *pargs, **kwargs):
7174 if not self.xterm_titles:
7175 # Avoid interference with the scheduler's status display.
7176 kwargs.pop("short_msg", None)
7177 emergelog(self.xterm_titles, *pargs, **kwargs)
7179 class _failed_pkg(SlotObject):
7180 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
7182 class _ConfigPool(object):
7183 """Interface for a task to temporarily allocate a config
7184 instance from a pool. This allows a task to be constructed
7185 long before the config instance actually becomes needed, like
7186 when prefetchers are constructed for the whole merge list."""
7187 __slots__ = ("_root", "_allocate", "_deallocate")
7188 def __init__(self, root, allocate, deallocate):
7190 self._allocate = allocate
7191 self._deallocate = deallocate
7193 return self._allocate(self._root)
7194 def deallocate(self, settings):
7195 self._deallocate(settings)
7197 class _unknown_internal_error(portage.exception.PortageException):
7199 Used internally to terminate scheduling. The specific reason for
7200 the failure should have been dumped to stderr.
7202 def __init__(self, value=""):
7203 portage.exception.PortageException.__init__(self, value)
7205 def __init__(self, settings, trees, mtimedb, myopts,
7206 spinner, mergelist, favorites, digraph):
7207 PollScheduler.__init__(self)
7208 self.settings = settings
7209 self.target_root = settings["ROOT"]
7211 self.myopts = myopts
7212 self._spinner = spinner
7213 self._mtimedb = mtimedb
7214 self._mergelist = mergelist
7215 self._favorites = favorites
7216 self._args_set = InternalPackageSet(favorites)
7217 self._build_opts = self._build_opts_class()
7218 for k in self._build_opts.__slots__:
7219 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
7220 self._binpkg_opts = self._binpkg_opts_class()
7221 for k in self._binpkg_opts.__slots__:
7222 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
7225 self._logger = self._emerge_log_class()
7226 self._task_queues = self._task_queues_class()
7227 for k in self._task_queues.allowed_keys:
7228 setattr(self._task_queues, k,
7229 SequentialTaskQueue())
7231 # Holds merges that will wait to be executed when no builds are
7232 # executing. This is useful for system packages since dependencies
7233 # on system packages are frequently unspecified.
7234 self._merge_wait_queue = []
7235 # Holds merges that have been transfered from the merge_wait_queue to
7236 # the actual merge queue. They are removed from this list upon
7237 # completion. Other packages can start building only when this list is
7239 self._merge_wait_scheduled = []
7241 # Holds system packages and their deep runtime dependencies. Before
7242 # being merged, these packages go to merge_wait_queue, to be merged
7243 # when no other packages are building.
7244 self._deep_system_deps = set()
7246 # Holds packages to merge which will satisfy currently unsatisfied
7247 # deep runtime dependencies of system packages. If this is not empty
7248 # then no parallel builds will be spawned until it is empty. This
7249 # minimizes the possibility that a build will fail due to the system
7250 # being in a fragile state. For example, see bug #259954.
7251 self._unsatisfied_system_deps = set()
7253 self._status_display = JobStatusDisplay(
7254 xterm_titles=('notitles' not in settings.features))
7255 self._max_load = myopts.get("--load-average")
7256 max_jobs = myopts.get("--jobs")
7257 if max_jobs is None:
7259 self._set_max_jobs(max_jobs)
7261 # The root where the currently running
7262 # portage instance is installed.
7263 self._running_root = trees["/"]["root_config"]
7265 if settings.get("PORTAGE_DEBUG", "") == "1":
7267 self.pkgsettings = {}
7268 self._config_pool = {}
7269 self._blocker_db = {}
7271 self._config_pool[root] = []
7272 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
7274 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
7275 schedule=self._schedule_fetch)
7276 self._sched_iface = self._iface_class(
7277 dblinkEbuildPhase=self._dblink_ebuild_phase,
7278 dblinkDisplayMerge=self._dblink_display_merge,
7279 dblinkElog=self._dblink_elog,
7280 dblinkEmergeLog=self._dblink_emerge_log,
7281 fetch=fetch_iface, register=self._register,
7282 schedule=self._schedule_wait,
7283 scheduleSetup=self._schedule_setup,
7284 scheduleUnpack=self._schedule_unpack,
7285 scheduleYield=self._schedule_yield,
7286 unregister=self._unregister)
7288 self._prefetchers = weakref.WeakValueDictionary()
7289 self._pkg_queue = []
7290 self._completed_tasks = set()
7292 self._failed_pkgs = []
7293 self._failed_pkgs_all = []
7294 self._failed_pkgs_die_msgs = []
7295 self._post_mod_echo_msgs = []
7296 self._parallel_fetch = False
7297 merge_count = len([x for x in mergelist \
7298 if isinstance(x, Package) and x.operation == "merge"])
7299 self._pkg_count = self._pkg_count_class(
7300 curval=0, maxval=merge_count)
7301 self._status_display.maxval = self._pkg_count.maxval
7303 # The load average takes some time to respond when new
7304 # jobs are added, so we need to limit the rate of adding
7306 self._job_delay_max = 10
7307 self._job_delay_factor = 1.0
7308 self._job_delay_exp = 1.5
7309 self._previous_job_start_time = None
7311 self._set_digraph(digraph)
7313 # This is used to memoize the _choose_pkg() result when
7314 # no packages can be chosen until one of the existing
7316 self._choose_pkg_return_early = False
7318 features = self.settings.features
7319 if "parallel-fetch" in features and \
7320 not ("--pretend" in self.myopts or \
7321 "--fetch-all-uri" in self.myopts or \
7322 "--fetchonly" in self.myopts):
7323 if "distlocks" not in features:
7324 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
7325 portage.writemsg(red("!!!")+" parallel-fetching " + \
7326 "requires the distlocks feature enabled"+"\n",
7328 portage.writemsg(red("!!!")+" you have it disabled, " + \
7329 "thus parallel-fetching is being disabled"+"\n",
7331 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
7332 elif len(mergelist) > 1:
7333 self._parallel_fetch = True
7335 if self._parallel_fetch:
7336 # clear out existing fetch log if it exists
7338 open(self._fetch_log, 'w')
7339 except EnvironmentError:
7342 self._running_portage = None
7343 portage_match = self._running_root.trees["vartree"].dbapi.match(
7344 portage.const.PORTAGE_PACKAGE_ATOM)
7346 cpv = portage_match.pop()
7347 self._running_portage = self._pkg(cpv, "installed",
7348 self._running_root, installed=True)
7350 def _poll(self, timeout=None):
7352 PollScheduler._poll(self, timeout=timeout)
7354 def _set_max_jobs(self, max_jobs):
7355 self._max_jobs = max_jobs
7356 self._task_queues.jobs.max_jobs = max_jobs
7358 def _background_mode(self):
7360 Check if background mode is enabled and adjust states as necessary.
7363 @returns: True if background mode is enabled, False otherwise.
7365 background = (self._max_jobs is True or \
7366 self._max_jobs > 1 or "--quiet" in self.myopts) and \
7367 not bool(self._opts_no_background.intersection(self.myopts))
7370 interactive_tasks = self._get_interactive_tasks()
7371 if interactive_tasks:
7373 writemsg_level(">>> Sending package output to stdio due " + \
7374 "to interactive package(s):\n",
7375 level=logging.INFO, noiselevel=-1)
7377 for pkg in interactive_tasks:
7378 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
7380 pkg_str += " for " + pkg.root
7383 writemsg_level("".join("%s\n" % (l,) for l in msg),
7384 level=logging.INFO, noiselevel=-1)
7385 if self._max_jobs is True or self._max_jobs > 1:
7386 self._set_max_jobs(1)
7387 writemsg_level(">>> Setting --jobs=1 due " + \
7388 "to the above interactive package(s)\n",
7389 level=logging.INFO, noiselevel=-1)
7391 self._status_display.quiet = \
7393 ("--quiet" in self.myopts and \
7394 "--verbose" not in self.myopts)
7396 self._logger.xterm_titles = \
7397 "notitles" not in self.settings.features and \
7398 self._status_display.quiet
7402 def _get_interactive_tasks(self):
7403 from portage import flatten
7404 from portage.dep import use_reduce, paren_reduce
7405 interactive_tasks = []
7406 for task in self._mergelist:
7407 if not (isinstance(task, Package) and \
7408 task.operation == "merge"):
7411 properties = flatten(use_reduce(paren_reduce(
7412 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
7413 except portage.exception.InvalidDependString, e:
7414 show_invalid_depstring_notice(task,
7415 task.metadata["PROPERTIES"], str(e))
7416 raise self._unknown_internal_error()
7417 if "interactive" in properties:
7418 interactive_tasks.append(task)
7419 return interactive_tasks
7421 def _set_digraph(self, digraph):
7422 if "--nodeps" in self.myopts or \
7423 (self._max_jobs is not True and self._max_jobs < 2):
7425 self._digraph = None
7428 self._digraph = digraph
7429 self._find_system_deps()
7430 self._prune_digraph()
7431 self._prevent_builddir_collisions()
7433 def _find_system_deps(self):
7435 Find system packages and their deep runtime dependencies. Before being
7436 merged, these packages go to merge_wait_queue, to be merged when no
7437 other packages are building.
7439 deep_system_deps = self._deep_system_deps
7440 deep_system_deps.clear()
7441 deep_system_deps.update(
7442 _find_deep_system_runtime_deps(self._digraph))
7443 deep_system_deps.difference_update([pkg for pkg in \
7444 deep_system_deps if pkg.operation != "merge"])
7446 def _prune_digraph(self):
7448 Prune any root nodes that are irrelevant.
7451 graph = self._digraph
7452 completed_tasks = self._completed_tasks
7453 removed_nodes = set()
7455 for node in graph.root_nodes():
7456 if not isinstance(node, Package) or \
7457 (node.installed and node.operation == "nomerge") or \
7459 node in completed_tasks:
7460 removed_nodes.add(node)
7462 graph.difference_update(removed_nodes)
7463 if not removed_nodes:
7465 removed_nodes.clear()
7467 def _prevent_builddir_collisions(self):
7469 When building stages, sometimes the same exact cpv needs to be merged
7470 to both $ROOTs. Add edges to the digraph in order to avoid collisions
7471 in the builddir. Currently, normal file locks would be inappropriate
7472 for this purpose since emerge holds all of it's build dir locks from
7476 for pkg in self._mergelist:
7477 if not isinstance(pkg, Package):
7478 # a satisfied blocker
7482 if pkg.cpv not in cpv_map:
7483 cpv_map[pkg.cpv] = [pkg]
7485 for earlier_pkg in cpv_map[pkg.cpv]:
7486 self._digraph.add(earlier_pkg, pkg,
7487 priority=DepPriority(buildtime=True))
7488 cpv_map[pkg.cpv].append(pkg)
7490 class _pkg_failure(portage.exception.PortageException):
7492 An instance of this class is raised by unmerge() when
7493 an uninstallation fails.
7496 def __init__(self, *pargs):
7497 portage.exception.PortageException.__init__(self, pargs)
7499 self.status = pargs[0]
7501 def _schedule_fetch(self, fetcher):
7503 Schedule a fetcher on the fetch queue, in order to
7504 serialize access to the fetch log.
7506 self._task_queues.fetch.addFront(fetcher)
7508 def _schedule_setup(self, setup_phase):
7510 Schedule a setup phase on the merge queue, in order to
7511 serialize unsandboxed access to the live filesystem.
7513 self._task_queues.merge.addFront(setup_phase)
7516 def _schedule_unpack(self, unpack_phase):
7518 Schedule an unpack phase on the unpack queue, in order
7519 to serialize $DISTDIR access for live ebuilds.
7521 self._task_queues.unpack.add(unpack_phase)
7523 def _find_blockers(self, new_pkg):
7525 Returns a callable which should be called only when
7526 the vdb lock has been acquired.
7529 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
7532 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
7533 if self._opts_ignore_blockers.intersection(self.myopts):
7536 # Call gc.collect() here to avoid heap overflow that
7537 # triggers 'Cannot allocate memory' errors (reported
7542 blocker_db = self._blocker_db[new_pkg.root]
7544 blocker_dblinks = []
7545 for blocking_pkg in blocker_db.findInstalledBlockers(
7546 new_pkg, acquire_lock=acquire_lock):
7547 if new_pkg.slot_atom == blocking_pkg.slot_atom:
7549 if new_pkg.cpv == blocking_pkg.cpv:
7551 blocker_dblinks.append(portage.dblink(
7552 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
7553 self.pkgsettings[blocking_pkg.root], treetype="vartree",
7554 vartree=self.trees[blocking_pkg.root]["vartree"]))
7558 return blocker_dblinks
7560 def _dblink_pkg(self, pkg_dblink):
7561 cpv = pkg_dblink.mycpv
7562 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
7563 root_config = self.trees[pkg_dblink.myroot]["root_config"]
7564 installed = type_name == "installed"
7565 return self._pkg(cpv, type_name, root_config, installed=installed)
7567 def _append_to_log_path(self, log_path, msg):
7568 f = open(log_path, 'a')
7574 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
7576 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7579 background = self._background
7581 if background and log_path is not None:
7582 log_file = open(log_path, 'a')
7587 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
7589 if log_file is not None:
7592 def _dblink_emerge_log(self, msg):
7593 self._logger.log(msg)
7595 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
7596 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7597 background = self._background
7599 if log_path is None:
7600 if not (background and level < logging.WARN):
7601 portage.util.writemsg_level(msg,
7602 level=level, noiselevel=noiselevel)
7605 portage.util.writemsg_level(msg,
7606 level=level, noiselevel=noiselevel)
7607 self._append_to_log_path(log_path, msg)
7609 def _dblink_ebuild_phase(self,
7610 pkg_dblink, pkg_dbapi, ebuild_path, phase):
7612 Using this callback for merge phases allows the scheduler
7613 to run while these phases execute asynchronously, and allows
7614 the scheduler control output handling.
7617 scheduler = self._sched_iface
7618 settings = pkg_dblink.settings
7619 pkg = self._dblink_pkg(pkg_dblink)
7620 background = self._background
7621 log_path = settings.get("PORTAGE_LOG_FILE")
7623 ebuild_phase = EbuildPhase(background=background,
7624 pkg=pkg, phase=phase, scheduler=scheduler,
7625 settings=settings, tree=pkg_dblink.treetype)
7626 ebuild_phase.start()
7629 return ebuild_phase.returncode
7631 def _generate_digests(self):
7633 Generate digests if necessary for --digests or FEATURES=digest.
7634 In order to avoid interference, this must done before parallel
7638 if '--fetchonly' in self.myopts:
7641 digest = '--digest' in self.myopts
7643 for pkgsettings in self.pkgsettings.itervalues():
7644 if 'digest' in pkgsettings.features:
7651 for x in self._mergelist:
7652 if not isinstance(x, Package) or \
7653 x.type_name != 'ebuild' or \
7654 x.operation != 'merge':
7656 pkgsettings = self.pkgsettings[x.root]
7657 if '--digest' not in self.myopts and \
7658 'digest' not in pkgsettings.features:
7660 portdb = x.root_config.trees['porttree'].dbapi
7661 ebuild_path = portdb.findname(x.cpv)
7664 "!!! Could not locate ebuild for '%s'.\n" \
7665 % x.cpv, level=logging.ERROR, noiselevel=-1)
7667 pkgsettings['O'] = os.path.dirname(ebuild_path)
7668 if not portage.digestgen([], pkgsettings, myportdb=portdb):
7670 "!!! Unable to generate manifest for '%s'.\n" \
7671 % x.cpv, level=logging.ERROR, noiselevel=-1)
7676 def _check_manifests(self):
7677 # Verify all the manifests now so that the user is notified of failure
7678 # as soon as possible.
7679 if "strict" not in self.settings.features or \
7680 "--fetchonly" in self.myopts or \
7681 "--fetch-all-uri" in self.myopts:
7684 shown_verifying_msg = False
7686 for myroot, pkgsettings in self.pkgsettings.iteritems():
7687 quiet_config = portage.config(clone=pkgsettings)
7688 quiet_config["PORTAGE_QUIET"] = "1"
7689 quiet_config.backup_changes("PORTAGE_QUIET")
7690 quiet_settings[myroot] = quiet_config
7693 for x in self._mergelist:
7694 if not isinstance(x, Package) or \
7695 x.type_name != "ebuild":
7698 if not shown_verifying_msg:
7699 shown_verifying_msg = True
7700 self._status_msg("Verifying ebuild manifests")
7702 root_config = x.root_config
7703 portdb = root_config.trees["porttree"].dbapi
7704 quiet_config = quiet_settings[root_config.root]
7705 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
7706 if not portage.digestcheck([], quiet_config, strict=True):
7711 def _add_prefetchers(self):
7713 if not self._parallel_fetch:
7716 if self._parallel_fetch:
7717 self._status_msg("Starting parallel fetch")
7719 prefetchers = self._prefetchers
7720 getbinpkg = "--getbinpkg" in self.myopts
7722 # In order to avoid "waiting for lock" messages
7723 # at the beginning, which annoy users, never
7724 # spawn a prefetcher for the first package.
7725 for pkg in self._mergelist[1:]:
7726 prefetcher = self._create_prefetcher(pkg)
7727 if prefetcher is not None:
7728 self._task_queues.fetch.add(prefetcher)
7729 prefetchers[pkg] = prefetcher
7731 def _create_prefetcher(self, pkg):
7733 @return: a prefetcher, or None if not applicable
7737 if not isinstance(pkg, Package):
7740 elif pkg.type_name == "ebuild":
7742 prefetcher = EbuildFetcher(background=True,
7743 config_pool=self._ConfigPool(pkg.root,
7744 self._allocate_config, self._deallocate_config),
7745 fetchonly=1, logfile=self._fetch_log,
7746 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
7748 elif pkg.type_name == "binary" and \
7749 "--getbinpkg" in self.myopts and \
7750 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
7752 prefetcher = BinpkgPrefetcher(background=True,
7753 pkg=pkg, scheduler=self._sched_iface)
7757 def _is_restart_scheduled(self):
7759 Check if the merge list contains a replacement
7760 for the current running instance, that will result
7761 in restart after merge.
7763 @returns: True if a restart is scheduled, False otherwise.
7765 if self._opts_no_restart.intersection(self.myopts):
7768 mergelist = self._mergelist
7770 for i, pkg in enumerate(mergelist):
7771 if self._is_restart_necessary(pkg) and \
7772 i != len(mergelist) - 1:
7777 def _is_restart_necessary(self, pkg):
7779 @return: True if merging the given package
7780 requires restart, False otherwise.
7783 # Figure out if we need a restart.
7784 if pkg.root == self._running_root.root and \
7785 portage.match_from_list(
7786 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
7787 if self._running_portage:
7788 return pkg.cpv != self._running_portage.cpv
7792 def _restart_if_necessary(self, pkg):
7794 Use execv() to restart emerge. This happens
7795 if portage upgrades itself and there are
7796 remaining packages in the list.
7799 if self._opts_no_restart.intersection(self.myopts):
7802 if not self._is_restart_necessary(pkg):
7805 if pkg == self._mergelist[-1]:
7808 self._main_loop_cleanup()
7810 logger = self._logger
7811 pkg_count = self._pkg_count
7812 mtimedb = self._mtimedb
7813 bad_resume_opts = self._bad_resume_opts
7815 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
7816 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
7818 logger.log(" *** RESTARTING " + \
7819 "emerge via exec() after change of " + \
7822 mtimedb["resume"]["mergelist"].remove(list(pkg))
7824 portage.run_exitfuncs()
7825 mynewargv = [sys.argv[0], "--resume"]
7826 resume_opts = self.myopts.copy()
7827 # For automatic resume, we need to prevent
7828 # any of bad_resume_opts from leaking in
7829 # via EMERGE_DEFAULT_OPTS.
7830 resume_opts["--ignore-default-opts"] = True
7831 for myopt, myarg in resume_opts.iteritems():
7832 if myopt not in bad_resume_opts:
7834 mynewargv.append(myopt)
7836 mynewargv.append(myopt +"="+ str(myarg))
7837 # priority only needs to be adjusted on the first run
7838 os.environ["PORTAGE_NICENESS"] = "0"
7839 os.execv(mynewargv[0], mynewargv)
7843 if "--resume" in self.myopts:
7845 portage.writemsg_stdout(
7846 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
7847 self._logger.log(" *** Resuming merge...")
7849 self._save_resume_list()
7852 self._background = self._background_mode()
7853 except self._unknown_internal_error:
7856 for root in self.trees:
7857 root_config = self.trees[root]["root_config"]
7859 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
7860 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
7861 # for ensuring sane $PWD (bug #239560) and storing elog messages.
7862 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
7863 if not tmpdir or not os.path.isdir(tmpdir):
7864 msg = "The directory specified in your " + \
7865 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
7866 "does not exist. Please create this " + \
7867 "directory or correct your PORTAGE_TMPDIR setting."
7868 msg = textwrap.wrap(msg, 70)
7869 out = portage.output.EOutput()
7874 if self._background:
7875 root_config.settings.unlock()
7876 root_config.settings["PORTAGE_BACKGROUND"] = "1"
7877 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
7878 root_config.settings.lock()
7880 self.pkgsettings[root] = portage.config(
7881 clone=root_config.settings)
7883 rval = self._generate_digests()
7884 if rval != os.EX_OK:
7887 rval = self._check_manifests()
7888 if rval != os.EX_OK:
7891 keep_going = "--keep-going" in self.myopts
7892 fetchonly = self._build_opts.fetchonly
7893 mtimedb = self._mtimedb
7894 failed_pkgs = self._failed_pkgs
7897 rval = self._merge()
7898 if rval == os.EX_OK or fetchonly or not keep_going:
7900 if "resume" not in mtimedb:
7902 mergelist = self._mtimedb["resume"].get("mergelist")
7909 for failed_pkg in failed_pkgs:
7910 mergelist.remove(list(failed_pkg.pkg))
7912 self._failed_pkgs_all.extend(failed_pkgs)
7918 if not self._calc_resume_list():
7921 clear_caches(self.trees)
7922 if not self._mergelist:
7925 self._save_resume_list()
7926 self._pkg_count.curval = 0
7927 self._pkg_count.maxval = len([x for x in self._mergelist \
7928 if isinstance(x, Package) and x.operation == "merge"])
7929 self._status_display.maxval = self._pkg_count.maxval
7931 self._logger.log(" *** Finished. Cleaning up...")
7934 self._failed_pkgs_all.extend(failed_pkgs)
7937 background = self._background
7938 failure_log_shown = False
7939 if background and len(self._failed_pkgs_all) == 1:
7940 # If only one package failed then just show it's
7941 # whole log for easy viewing.
7942 failed_pkg = self._failed_pkgs_all[-1]
7943 build_dir = failed_pkg.build_dir
7946 log_paths = [failed_pkg.build_log]
7948 log_path = self._locate_failure_log(failed_pkg)
7949 if log_path is not None:
7951 log_file = open(log_path)
7955 if log_file is not None:
7957 for line in log_file:
7958 writemsg_level(line, noiselevel=-1)
7961 failure_log_shown = True
7963 # Dump mod_echo output now since it tends to flood the terminal.
7964 # This allows us to avoid having more important output, generated
7965 # later, from being swept away by the mod_echo output.
7966 mod_echo_output = _flush_elog_mod_echo()
7968 if background and not failure_log_shown and \
7969 self._failed_pkgs_all and \
7970 self._failed_pkgs_die_msgs and \
7971 not mod_echo_output:
7973 printer = portage.output.EOutput()
7974 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
7976 if mysettings["ROOT"] != "/":
7977 root_msg = " merged to %s" % mysettings["ROOT"]
7979 printer.einfo("Error messages for package %s%s:" % \
7980 (colorize("INFORM", key), root_msg))
7982 for phase in portage.const.EBUILD_PHASES:
7983 if phase not in logentries:
7985 for msgtype, msgcontent in logentries[phase]:
7986 if isinstance(msgcontent, basestring):
7987 msgcontent = [msgcontent]
7988 for line in msgcontent:
7989 printer.eerror(line.strip("\n"))
7991 if self._post_mod_echo_msgs:
7992 for msg in self._post_mod_echo_msgs:
7995 if len(self._failed_pkgs_all) > 1 or \
7996 (self._failed_pkgs_all and "--keep-going" in self.myopts):
7997 if len(self._failed_pkgs_all) > 1:
7998 msg = "The following %d packages have " % \
7999 len(self._failed_pkgs_all) + \
8000 "failed to build or install:"
8002 msg = "The following package has " + \
8003 "failed to build or install:"
8005 writemsg(prefix + "\n", noiselevel=-1)
8006 from textwrap import wrap
8007 for line in wrap(msg, 72):
8008 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
8009 writemsg(prefix + "\n", noiselevel=-1)
8010 for failed_pkg in self._failed_pkgs_all:
8011 writemsg("%s\t%s\n" % (prefix,
8012 colorize("INFORM", str(failed_pkg.pkg))),
8014 writemsg(prefix + "\n", noiselevel=-1)
8018 def _elog_listener(self, mysettings, key, logentries, fulltext):
8019 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
8021 self._failed_pkgs_die_msgs.append(
8022 (mysettings, key, errors))
8024 def _locate_failure_log(self, failed_pkg):
8026 build_dir = failed_pkg.build_dir
8029 log_paths = [failed_pkg.build_log]
8031 for log_path in log_paths:
8036 log_size = os.stat(log_path).st_size
8047 def _add_packages(self):
8048 pkg_queue = self._pkg_queue
8049 for pkg in self._mergelist:
8050 if isinstance(pkg, Package):
8051 pkg_queue.append(pkg)
8052 elif isinstance(pkg, Blocker):
8055 def _system_merge_started(self, merge):
8057 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
8059 graph = self._digraph
8062 pkg = merge.merge.pkg
8064 # Skip this if $ROOT != / since it shouldn't matter if there
8065 # are unsatisfied system runtime deps in this case.
8069 completed_tasks = self._completed_tasks
8070 unsatisfied = self._unsatisfied_system_deps
8072 def ignore_non_runtime_or_satisfied(priority):
8074 Ignore non-runtime and satisfied runtime priorities.
8076 if isinstance(priority, DepPriority) and \
8077 not priority.satisfied and \
8078 (priority.runtime or priority.runtime_post):
8082 # When checking for unsatisfied runtime deps, only check
8083 # direct deps since indirect deps are checked when the
8084 # corresponding parent is merged.
8085 for child in graph.child_nodes(pkg,
8086 ignore_priority=ignore_non_runtime_or_satisfied):
8087 if not isinstance(child, Package) or \
8088 child.operation == 'uninstall':
8092 if child.operation == 'merge' and \
8093 child not in completed_tasks:
8094 unsatisfied.add(child)
8096 def _merge_wait_exit_handler(self, task):
8097 self._merge_wait_scheduled.remove(task)
8098 self._merge_exit(task)
8100 def _merge_exit(self, merge):
8101 self._do_merge_exit(merge)
8102 self._deallocate_config(merge.merge.settings)
8103 if merge.returncode == os.EX_OK and \
8104 not merge.merge.pkg.installed:
8105 self._status_display.curval += 1
8106 self._status_display.merges = len(self._task_queues.merge)
8109 def _do_merge_exit(self, merge):
8110 pkg = merge.merge.pkg
8111 if merge.returncode != os.EX_OK:
8112 settings = merge.merge.settings
8113 build_dir = settings.get("PORTAGE_BUILDDIR")
8114 build_log = settings.get("PORTAGE_LOG_FILE")
8116 self._failed_pkgs.append(self._failed_pkg(
8117 build_dir=build_dir, build_log=build_log,
8119 returncode=merge.returncode))
8120 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
8122 self._status_display.failed = len(self._failed_pkgs)
8125 self._task_complete(pkg)
8126 pkg_to_replace = merge.merge.pkg_to_replace
8127 if pkg_to_replace is not None:
8128 # When a package is replaced, mark it's uninstall
8129 # task complete (if any).
8131 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
8132 self._task_complete(uninst_hash_key)
8137 self._restart_if_necessary(pkg)
8139 # Call mtimedb.commit() after each merge so that
8140 # --resume still works after being interrupted
8141 # by reboot, sigkill or similar.
8142 mtimedb = self._mtimedb
8143 mtimedb["resume"]["mergelist"].remove(list(pkg))
8144 if not mtimedb["resume"]["mergelist"]:
8145 del mtimedb["resume"]
8148 def _build_exit(self, build):
8149 if build.returncode == os.EX_OK:
8151 merge = PackageMerge(merge=build)
8152 if not build.build_opts.buildpkgonly and \
8153 build.pkg in self._deep_system_deps:
8154 # Since dependencies on system packages are frequently
8155 # unspecified, merge them only when no builds are executing.
8156 self._merge_wait_queue.append(merge)
8157 merge.addStartListener(self._system_merge_started)
8159 merge.addExitListener(self._merge_exit)
8160 self._task_queues.merge.add(merge)
8161 self._status_display.merges = len(self._task_queues.merge)
8163 settings = build.settings
8164 build_dir = settings.get("PORTAGE_BUILDDIR")
8165 build_log = settings.get("PORTAGE_LOG_FILE")
8167 self._failed_pkgs.append(self._failed_pkg(
8168 build_dir=build_dir, build_log=build_log,
8170 returncode=build.returncode))
8171 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
8173 self._status_display.failed = len(self._failed_pkgs)
8174 self._deallocate_config(build.settings)
8176 self._status_display.running = self._jobs
8179 def _extract_exit(self, build):
8180 self._build_exit(build)
8182 def _task_complete(self, pkg):
8183 self._completed_tasks.add(pkg)
8184 self._unsatisfied_system_deps.discard(pkg)
8185 self._choose_pkg_return_early = False
8189 self._add_prefetchers()
8190 self._add_packages()
8191 pkg_queue = self._pkg_queue
8192 failed_pkgs = self._failed_pkgs
8193 portage.locks._quiet = self._background
8194 portage.elog._emerge_elog_listener = self._elog_listener
8200 self._main_loop_cleanup()
8201 portage.locks._quiet = False
8202 portage.elog._emerge_elog_listener = None
8204 rval = failed_pkgs[-1].returncode
8208 def _main_loop_cleanup(self):
8209 del self._pkg_queue[:]
8210 self._completed_tasks.clear()
8211 self._deep_system_deps.clear()
8212 self._unsatisfied_system_deps.clear()
8213 self._choose_pkg_return_early = False
8214 self._status_display.reset()
8215 self._digraph = None
8216 self._task_queues.fetch.clear()
8218 def _choose_pkg(self):
8220 Choose a task that has all it's dependencies satisfied.
8223 if self._choose_pkg_return_early:
8226 if self._digraph is None:
8227 if (self._jobs or self._task_queues.merge) and \
8228 not ("--nodeps" in self.myopts and \
8229 (self._max_jobs is True or self._max_jobs > 1)):
8230 self._choose_pkg_return_early = True
8232 return self._pkg_queue.pop(0)
8234 if not (self._jobs or self._task_queues.merge):
8235 return self._pkg_queue.pop(0)
8237 self._prune_digraph()
8240 later = set(self._pkg_queue)
8241 for pkg in self._pkg_queue:
8243 if not self._dependent_on_scheduled_merges(pkg, later):
8247 if chosen_pkg is not None:
8248 self._pkg_queue.remove(chosen_pkg)
8250 if chosen_pkg is None:
8251 # There's no point in searching for a package to
8252 # choose until at least one of the existing jobs
8254 self._choose_pkg_return_early = True
8258 def _dependent_on_scheduled_merges(self, pkg, later):
8260 Traverse the subgraph of the given packages deep dependencies
8261 to see if it contains any scheduled merges.
8262 @param pkg: a package to check dependencies for
8264 @param later: packages for which dependence should be ignored
8265 since they will be merged later than pkg anyway and therefore
8266 delaying the merge of pkg will not result in a more optimal
8270 @returns: True if the package is dependent, False otherwise.
8273 graph = self._digraph
8274 completed_tasks = self._completed_tasks
8277 traversed_nodes = set([pkg])
8278 direct_deps = graph.child_nodes(pkg)
8279 node_stack = direct_deps
8280 direct_deps = frozenset(direct_deps)
8282 node = node_stack.pop()
8283 if node in traversed_nodes:
8285 traversed_nodes.add(node)
8286 if not ((node.installed and node.operation == "nomerge") or \
8287 (node.operation == "uninstall" and \
8288 node not in direct_deps) or \
8289 node in completed_tasks or \
8293 node_stack.extend(graph.child_nodes(node))
8297 def _allocate_config(self, root):
8299 Allocate a unique config instance for a task in order
8300 to prevent interference between parallel tasks.
8302 if self._config_pool[root]:
8303 temp_settings = self._config_pool[root].pop()
8305 temp_settings = portage.config(clone=self.pkgsettings[root])
8306 # Since config.setcpv() isn't guaranteed to call config.reset() due to
8307 # performance reasons, call it here to make sure all settings from the
8308 # previous package get flushed out (such as PORTAGE_LOG_FILE).
8309 temp_settings.reload()
8310 temp_settings.reset()
8311 return temp_settings
8313 def _deallocate_config(self, settings):
8314 self._config_pool[settings["ROOT"]].append(settings)
8316 def _main_loop(self):
8318 # Only allow 1 job max if a restart is scheduled
8319 # due to portage update.
8320 if self._is_restart_scheduled() or \
8321 self._opts_no_background.intersection(self.myopts):
8322 self._set_max_jobs(1)
8324 merge_queue = self._task_queues.merge
8326 while self._schedule():
8327 if self._poll_event_handlers:
8332 if not (self._jobs or merge_queue):
8334 if self._poll_event_handlers:
8337 def _keep_scheduling(self):
8338 return bool(self._pkg_queue and \
8339 not (self._failed_pkgs and not self._build_opts.fetchonly))
8341 def _schedule_tasks(self):
8343 # When the number of jobs drops to zero, process all waiting merges.
8344 if not self._jobs and self._merge_wait_queue:
8345 for task in self._merge_wait_queue:
8346 task.addExitListener(self._merge_wait_exit_handler)
8347 self._task_queues.merge.add(task)
8348 self._status_display.merges = len(self._task_queues.merge)
8349 self._merge_wait_scheduled.extend(self._merge_wait_queue)
8350 del self._merge_wait_queue[:]
8352 self._schedule_tasks_imp()
8353 self._status_display.display()
8356 for q in self._task_queues.values():
8360 # Cancel prefetchers if they're the only reason
8361 # the main poll loop is still running.
8362 if self._failed_pkgs and not self._build_opts.fetchonly and \
8363 not (self._jobs or self._task_queues.merge) and \
8364 self._task_queues.fetch:
8365 self._task_queues.fetch.clear()
8369 self._schedule_tasks_imp()
8370 self._status_display.display()
8372 return self._keep_scheduling()
8374 def _job_delay(self):
8377 @returns: True if job scheduling should be delayed, False otherwise.
8380 if self._jobs and self._max_load is not None:
8382 current_time = time.time()
8384 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
8385 if delay > self._job_delay_max:
8386 delay = self._job_delay_max
8387 if (current_time - self._previous_job_start_time) < delay:
8392 def _schedule_tasks_imp(self):
8395 @returns: True if state changed, False otherwise.
8402 if not self._keep_scheduling():
8403 return bool(state_change)
8405 if self._choose_pkg_return_early or \
8406 self._merge_wait_scheduled or \
8407 (self._jobs and self._unsatisfied_system_deps) or \
8408 not self._can_add_job() or \
8410 return bool(state_change)
8412 pkg = self._choose_pkg()
8414 return bool(state_change)
8418 if not pkg.installed:
8419 self._pkg_count.curval += 1
8421 task = self._task(pkg)
8424 merge = PackageMerge(merge=task)
8425 merge.addExitListener(self._merge_exit)
8426 self._task_queues.merge.add(merge)
8430 self._previous_job_start_time = time.time()
8431 self._status_display.running = self._jobs
8432 task.addExitListener(self._extract_exit)
8433 self._task_queues.jobs.add(task)
8437 self._previous_job_start_time = time.time()
8438 self._status_display.running = self._jobs
8439 task.addExitListener(self._build_exit)
8440 self._task_queues.jobs.add(task)
8442 return bool(state_change)
8444 def _task(self, pkg):
8446 pkg_to_replace = None
8447 if pkg.operation != "uninstall":
8448 vardb = pkg.root_config.trees["vartree"].dbapi
8449 previous_cpv = vardb.match(pkg.slot_atom)
8451 previous_cpv = previous_cpv.pop()
8452 pkg_to_replace = self._pkg(previous_cpv,
8453 "installed", pkg.root_config, installed=True)
8455 task = MergeListItem(args_set=self._args_set,
8456 background=self._background, binpkg_opts=self._binpkg_opts,
8457 build_opts=self._build_opts,
8458 config_pool=self._ConfigPool(pkg.root,
8459 self._allocate_config, self._deallocate_config),
8460 emerge_opts=self.myopts,
8461 find_blockers=self._find_blockers(pkg), logger=self._logger,
8462 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
8463 pkg_to_replace=pkg_to_replace,
8464 prefetcher=self._prefetchers.get(pkg),
8465 scheduler=self._sched_iface,
8466 settings=self._allocate_config(pkg.root),
8467 statusMessage=self._status_msg,
8468 world_atom=self._world_atom)
8472 def _failed_pkg_msg(self, failed_pkg, action, preposition):
8473 pkg = failed_pkg.pkg
8474 msg = "%s to %s %s" % \
8475 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
8477 msg += " %s %s" % (preposition, pkg.root)
8479 log_path = self._locate_failure_log(failed_pkg)
8480 if log_path is not None:
8481 msg += ", Log file:"
8482 self._status_msg(msg)
8484 if log_path is not None:
8485 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
8487 def _status_msg(self, msg):
8489 Display a brief status message (no newlines) in the status display.
8490 This is called by tasks to provide feedback to the user. This
8491 delegates the resposibility of generating \r and \n control characters,
8492 to guarantee that lines are created or erased when necessary and
8496 @param msg: a brief status message (no newlines allowed)
8498 if not self._background:
8499 writemsg_level("\n")
8500 self._status_display.displayMessage(msg)
8502 def _save_resume_list(self):
8504 Do this before verifying the ebuild Manifests since it might
8505 be possible for the user to use --resume --skipfirst get past
8506 a non-essential package with a broken digest.
8508 mtimedb = self._mtimedb
8509 mtimedb["resume"]["mergelist"] = [list(x) \
8510 for x in self._mergelist \
8511 if isinstance(x, Package) and x.operation == "merge"]
8515 def _calc_resume_list(self):
8517 Use the current resume list to calculate a new one,
8518 dropping any packages with unsatisfied deps.
8520 @returns: True if successful, False otherwise.
8522 print colorize("GOOD", "*** Resuming merge...")
8524 if self._show_list():
8525 if "--tree" in self.myopts:
8526 portage.writemsg_stdout("\n" + \
8527 darkgreen("These are the packages that " + \
8528 "would be merged, in reverse order:\n\n"))
8531 portage.writemsg_stdout("\n" + \
8532 darkgreen("These are the packages that " + \
8533 "would be merged, in order:\n\n"))
8535 show_spinner = "--quiet" not in self.myopts and \
8536 "--nodeps" not in self.myopts
8539 print "Calculating dependencies ",
8541 myparams = create_depgraph_params(self.myopts, None)
8545 success, mydepgraph, dropped_tasks = resume_depgraph(
8546 self.settings, self.trees, self._mtimedb, self.myopts,
8547 myparams, self._spinner)
8548 except depgraph.UnsatisfiedResumeDep, exc:
8549 # rename variable to avoid python-3.0 error:
8550 # SyntaxError: can not delete variable 'e' referenced in nested
8553 mydepgraph = e.depgraph
8554 dropped_tasks = set()
8557 print "\b\b... done!"
8560 def unsatisfied_resume_dep_msg():
8561 mydepgraph.display_problems()
8562 out = portage.output.EOutput()
8563 out.eerror("One or more packages are either masked or " + \
8564 "have missing dependencies:")
8567 show_parents = set()
8569 if dep.parent in show_parents:
8571 show_parents.add(dep.parent)
8572 if dep.atom is None:
8573 out.eerror(indent + "Masked package:")
8574 out.eerror(2 * indent + str(dep.parent))
8577 out.eerror(indent + str(dep.atom) + " pulled in by:")
8578 out.eerror(2 * indent + str(dep.parent))
8580 msg = "The resume list contains packages " + \
8581 "that are either masked or have " + \
8582 "unsatisfied dependencies. " + \
8583 "Please restart/continue " + \
8584 "the operation manually, or use --skipfirst " + \
8585 "to skip the first package in the list and " + \
8586 "any other packages that may be " + \
8587 "masked or have missing dependencies."
8588 for line in textwrap.wrap(msg, 72):
8590 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
8593 if success and self._show_list():
8594 mylist = mydepgraph.altlist()
8596 if "--tree" in self.myopts:
8598 mydepgraph.display(mylist, favorites=self._favorites)
8601 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
8603 mydepgraph.display_problems()
8605 mylist = mydepgraph.altlist()
8606 mydepgraph.break_refs(mylist)
8607 mydepgraph.break_refs(dropped_tasks)
8608 self._mergelist = mylist
8609 self._set_digraph(mydepgraph.schedulerGraph())
8612 for task in dropped_tasks:
8613 if not (isinstance(task, Package) and task.operation == "merge"):
8616 msg = "emerge --keep-going:" + \
8619 msg += " for %s" % (pkg.root,)
8620 msg += " dropped due to unsatisfied dependency."
8621 for line in textwrap.wrap(msg, msg_width):
8622 eerror(line, phase="other", key=pkg.cpv)
8623 settings = self.pkgsettings[pkg.root]
8624 # Ensure that log collection from $T is disabled inside
8625 # elog_process(), since any logs that might exist are
8627 settings.pop("T", None)
8628 portage.elog.elog_process(pkg.cpv, settings)
8629 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
8633 def _show_list(self):
8634 myopts = self.myopts
8635 if "--quiet" not in myopts and \
8636 ("--ask" in myopts or "--tree" in myopts or \
8637 "--verbose" in myopts):
8641 def _world_atom(self, pkg):
8643 Add the package to the world file, but only if
8644 it's supposed to be added. Otherwise, do nothing.
8647 if set(("--buildpkgonly", "--fetchonly",
8649 "--oneshot", "--onlydeps",
8650 "--pretend")).intersection(self.myopts):
8653 if pkg.root != self.target_root:
8656 args_set = self._args_set
8657 if not args_set.findAtomForPackage(pkg):
8660 logger = self._logger
8661 pkg_count = self._pkg_count
8662 root_config = pkg.root_config
8663 world_set = root_config.sets["world"]
8664 world_locked = False
8665 if hasattr(world_set, "lock"):
8670 if hasattr(world_set, "load"):
8671 world_set.load() # maybe it's changed on disk
8673 atom = create_world_atom(pkg, args_set, root_config)
8675 if hasattr(world_set, "add"):
8676 self._status_msg(('Recording %s in "world" ' + \
8677 'favorites file...') % atom)
8678 logger.log(" === (%s of %s) Updating world file (%s)" % \
8679 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
8682 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
8683 (atom,), level=logging.WARN, noiselevel=-1)
8688 def _pkg(self, cpv, type_name, root_config, installed=False):
8690 Get a package instance from the cache, or create a new
8691 one if necessary. Raises KeyError from aux_get if it
8692 failures for some reason (package does not exist or is
8697 operation = "nomerge"
8699 if self._digraph is not None:
8700 # Reuse existing instance when available.
8701 pkg = self._digraph.get(
8702 (type_name, root_config.root, cpv, operation))
8706 tree_type = depgraph.pkg_tree_map[type_name]
8707 db = root_config.trees[tree_type].dbapi
8708 db_keys = list(self.trees[root_config.root][
8709 tree_type].dbapi._aux_cache_keys)
8710 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
8711 pkg = Package(cpv=cpv, metadata=metadata,
8712 root_config=root_config, installed=installed)
8713 if type_name == "ebuild":
8714 settings = self.pkgsettings[root_config.root]
8715 settings.setcpv(pkg)
8716 pkg.metadata["USE"] = settings["PORTAGE_USE"]
8717 pkg.metadata['CHOST'] = settings.get('CHOST', '')
8721 class MetadataRegen(PollScheduler):
8723 def __init__(self, portdb, cp_iter=None, consumer=None,
8724 max_jobs=None, max_load=None):
8725 PollScheduler.__init__(self)
8726 self._portdb = portdb
8727 self._global_cleanse = False
8729 cp_iter = self._iter_every_cp()
8730 # We can globally cleanse stale cache only if we
8731 # iterate over every single cp.
8732 self._global_cleanse = True
8733 self._cp_iter = cp_iter
8734 self._consumer = consumer
8736 if max_jobs is None:
8739 self._max_jobs = max_jobs
8740 self._max_load = max_load
8741 self._sched_iface = self._sched_iface_class(
8742 register=self._register,
8743 schedule=self._schedule_wait,
8744 unregister=self._unregister)
8746 self._valid_pkgs = set()
8747 self._cp_set = set()
8748 self._process_iter = self._iter_metadata_processes()
8749 self.returncode = os.EX_OK
8750 self._error_count = 0
8752 def _iter_every_cp(self):
8753 every_cp = self._portdb.cp_all()
8754 every_cp.sort(reverse=True)
8757 yield every_cp.pop()
8761 def _iter_metadata_processes(self):
8762 portdb = self._portdb
8763 valid_pkgs = self._valid_pkgs
8764 cp_set = self._cp_set
8765 consumer = self._consumer
8767 for cp in self._cp_iter:
8769 portage.writemsg_stdout("Processing %s\n" % cp)
8770 cpv_list = portdb.cp_list(cp)
8771 for cpv in cpv_list:
8773 ebuild_path, repo_path = portdb.findname2(cpv)
8774 metadata, st, emtime = portdb._pull_valid_cache(
8775 cpv, ebuild_path, repo_path)
8776 if metadata is not None:
8777 if consumer is not None:
8778 consumer(cpv, ebuild_path,
8779 repo_path, metadata)
8782 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
8783 ebuild_mtime=emtime,
8784 metadata_callback=portdb._metadata_callback,
8785 portdb=portdb, repo_path=repo_path,
8786 settings=portdb.doebuild_settings)
8790 portdb = self._portdb
8791 from portage.cache.cache_errors import CacheError
8794 while self._schedule():
8800 if self._global_cleanse:
8801 for mytree in portdb.porttrees:
8803 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
8804 except CacheError, e:
8805 portage.writemsg("Error listing cache entries for " + \
8806 "'%s': %s, continuing...\n" % (mytree, e),
8812 cp_set = self._cp_set
8813 cpv_getkey = portage.cpv_getkey
8814 for mytree in portdb.porttrees:
8816 dead_nodes[mytree] = set(cpv for cpv in \
8817 portdb.auxdb[mytree].iterkeys() \
8818 if cpv_getkey(cpv) in cp_set)
8819 except CacheError, e:
8820 portage.writemsg("Error listing cache entries for " + \
8821 "'%s': %s, continuing...\n" % (mytree, e),
8828 for y in self._valid_pkgs:
8829 for mytree in portdb.porttrees:
8830 if portdb.findname2(y, mytree=mytree)[0]:
8831 dead_nodes[mytree].discard(y)
8833 for mytree, nodes in dead_nodes.iteritems():
8834 auxdb = portdb.auxdb[mytree]
8838 except (KeyError, CacheError):
8841 def _schedule_tasks(self):
8844 @returns: True if there may be remaining tasks to schedule,
8847 while self._can_add_job():
8849 metadata_process = self._process_iter.next()
8850 except StopIteration:
8854 metadata_process.scheduler = self._sched_iface
8855 metadata_process.addExitListener(self._metadata_exit)
8856 metadata_process.start()
8859 def _metadata_exit(self, metadata_process):
8861 if metadata_process.returncode != os.EX_OK:
8863 self._error_count += 1
8864 self._valid_pkgs.discard(metadata_process.cpv)
8865 portage.writemsg("Error processing %s, continuing...\n" % \
8866 (metadata_process.cpv,), noiselevel=-1)
8868 if self._consumer is not None:
8869 # On failure, still notify the consumer (in this case the metadata
8870 # argument is None).
8871 self._consumer(metadata_process.cpv,
8872 metadata_process.ebuild_path,
8873 metadata_process.repo_path,
8874 metadata_process.metadata)
8878 class UninstallFailure(portage.exception.PortageException):
8880 An instance of this class is raised by unmerge() when
8881 an uninstallation fails.
8884 def __init__(self, *pargs):
8885 portage.exception.PortageException.__init__(self, pargs)
8887 self.status = pargs[0]
8889 def unmerge(root_config, myopts, unmerge_action,
8890 unmerge_files, ldpath_mtimes, autoclean=0,
8891 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
8892 scheduler=None, writemsg_level=portage.util.writemsg_level):
8895 clean_world = myopts.get('--deselect') != 'n'
8896 quiet = "--quiet" in myopts
8897 settings = root_config.settings
8898 sets = root_config.sets
8899 vartree = root_config.trees["vartree"]
8900 candidate_catpkgs=[]
8902 xterm_titles = "notitles" not in settings.features
8903 out = portage.output.EOutput()
8905 db_keys = list(vartree.dbapi._aux_cache_keys)
8908 pkg = pkg_cache.get(cpv)
8910 pkg = Package(cpv=cpv, installed=True,
8911 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
8912 root_config=root_config,
8913 type_name="installed")
8914 pkg_cache[cpv] = pkg
8917 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8919 # At least the parent needs to exist for the lock file.
8920 portage.util.ensure_dirs(vdb_path)
8921 except portage.exception.PortageException:
8925 if os.access(vdb_path, os.W_OK):
8926 vdb_lock = portage.locks.lockdir(vdb_path)
8927 realsyslist = sets["system"].getAtoms()
8929 for x in realsyslist:
8930 mycp = portage.dep_getkey(x)
8931 if mycp in settings.getvirtuals():
8933 for provider in settings.getvirtuals()[mycp]:
8934 if vartree.dbapi.match(provider):
8935 providers.append(provider)
8936 if len(providers) == 1:
8937 syslist.extend(providers)
8939 syslist.append(mycp)
8941 mysettings = portage.config(clone=settings)
8943 if not unmerge_files:
8944 if unmerge_action == "unmerge":
8946 print bold("emerge unmerge") + " can only be used with specific package names"
8953 # process all arguments and add all
8954 # valid db entries to candidate_catpkgs
8956 if not unmerge_files:
8957 candidate_catpkgs.extend(vartree.dbapi.cp_all())
8959 #we've got command-line arguments
8960 if not unmerge_files:
8961 print "\nNo packages to unmerge have been provided.\n"
8963 for x in unmerge_files:
8964 arg_parts = x.split('/')
8965 if x[0] not in [".","/"] and \
8966 arg_parts[-1][-7:] != ".ebuild":
8967 #possible cat/pkg or dep; treat as such
8968 candidate_catpkgs.append(x)
8969 elif unmerge_action in ["prune","clean"]:
8970 print "\n!!! Prune and clean do not accept individual" + \
8971 " ebuilds as arguments;\n skipping.\n"
8974 # it appears that the user is specifying an installed
8975 # ebuild and we're in "unmerge" mode, so it's ok.
8976 if not os.path.exists(x):
8977 print "\n!!! The path '"+x+"' doesn't exist.\n"
8980 absx = os.path.abspath(x)
8981 sp_absx = absx.split("/")
8982 if sp_absx[-1][-7:] == ".ebuild":
8984 absx = "/".join(sp_absx)
8986 sp_absx_len = len(sp_absx)
8988 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8989 vdb_len = len(vdb_path)
8991 sp_vdb = vdb_path.split("/")
8992 sp_vdb_len = len(sp_vdb)
8994 if not os.path.exists(absx+"/CONTENTS"):
8995 print "!!! Not a valid db dir: "+str(absx)
8998 if sp_absx_len <= sp_vdb_len:
8999 # The Path is shorter... so it can't be inside the vdb.
9002 print "\n!!!",x,"cannot be inside "+ \
9003 vdb_path+"; aborting.\n"
9006 for idx in range(0,sp_vdb_len):
9007 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
9010 print "\n!!!", x, "is not inside "+\
9011 vdb_path+"; aborting.\n"
9014 print "="+"/".join(sp_absx[sp_vdb_len:])
9015 candidate_catpkgs.append(
9016 "="+"/".join(sp_absx[sp_vdb_len:]))
9019 if (not "--quiet" in myopts):
9021 if settings["ROOT"] != "/":
9022 writemsg_level(darkgreen(newline+ \
9023 ">>> Using system located in ROOT tree %s\n" % \
9026 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
9027 not ("--quiet" in myopts):
9028 writemsg_level(darkgreen(newline+\
9029 ">>> These are the packages that would be unmerged:\n"))
9031 # Preservation of order is required for --depclean and --prune so
9032 # that dependencies are respected. Use all_selected to eliminate
9033 # duplicate packages since the same package may be selected by
9036 all_selected = set()
9037 for x in candidate_catpkgs:
9038 # cycle through all our candidate deps and determine
9039 # what will and will not get unmerged
9041 mymatch = vartree.dbapi.match(x)
9042 except portage.exception.AmbiguousPackageName, errpkgs:
9043 print "\n\n!!! The short ebuild name \"" + \
9044 x + "\" is ambiguous. Please specify"
9045 print "!!! one of the following fully-qualified " + \
9046 "ebuild names instead:\n"
9047 for i in errpkgs[0]:
9048 print " " + green(i)
9052 if not mymatch and x[0] not in "<>=~":
9053 mymatch = localtree.dep_match(x)
9055 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
9056 (x, unmerge_action), noiselevel=-1)
9060 {"protected": set(), "selected": set(), "omitted": set()})
9061 mykey = len(pkgmap) - 1
9062 if unmerge_action=="unmerge":
9064 if y not in all_selected:
9065 pkgmap[mykey]["selected"].add(y)
9067 elif unmerge_action == "prune":
9068 if len(mymatch) == 1:
9070 best_version = mymatch[0]
9071 best_slot = vartree.getslot(best_version)
9072 best_counter = vartree.dbapi.cpv_counter(best_version)
9073 for mypkg in mymatch[1:]:
9074 myslot = vartree.getslot(mypkg)
9075 mycounter = vartree.dbapi.cpv_counter(mypkg)
9076 if (myslot == best_slot and mycounter > best_counter) or \
9077 mypkg == portage.best([mypkg, best_version]):
9078 if myslot == best_slot:
9079 if mycounter < best_counter:
9080 # On slot collision, keep the one with the
9081 # highest counter since it is the most
9082 # recently installed.
9084 best_version = mypkg
9086 best_counter = mycounter
9087 pkgmap[mykey]["protected"].add(best_version)
9088 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
9089 if mypkg != best_version and mypkg not in all_selected)
9090 all_selected.update(pkgmap[mykey]["selected"])
9092 # unmerge_action == "clean"
9094 for mypkg in mymatch:
9095 if unmerge_action == "clean":
9096 myslot = localtree.getslot(mypkg)
9098 # since we're pruning, we don't care about slots
9099 # and put all the pkgs in together
9101 if myslot not in slotmap:
9102 slotmap[myslot] = {}
9103 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
9105 for mypkg in vartree.dbapi.cp_list(
9106 portage.dep_getkey(mymatch[0])):
9107 myslot = vartree.getslot(mypkg)
9108 if myslot not in slotmap:
9109 slotmap[myslot] = {}
9110 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
9112 for myslot in slotmap:
9113 counterkeys = slotmap[myslot].keys()
9117 pkgmap[mykey]["protected"].add(
9118 slotmap[myslot][counterkeys[-1]])
9121 for counter in counterkeys[:]:
9122 mypkg = slotmap[myslot][counter]
9123 if mypkg not in mymatch:
9124 counterkeys.remove(counter)
9125 pkgmap[mykey]["protected"].add(
9126 slotmap[myslot][counter])
9128 #be pretty and get them in order of merge:
9129 for ckey in counterkeys:
9130 mypkg = slotmap[myslot][ckey]
9131 if mypkg not in all_selected:
9132 pkgmap[mykey]["selected"].add(mypkg)
9133 all_selected.add(mypkg)
9134 # ok, now the last-merged package
9135 # is protected, and the rest are selected
9136 numselected = len(all_selected)
9137 if global_unmerge and not numselected:
9138 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
9142 portage.writemsg_stdout(
9143 "\n>>> No packages selected for removal by " + \
9144 unmerge_action + "\n")
9148 vartree.dbapi.flush_cache()
9149 portage.locks.unlockdir(vdb_lock)
9151 from portage.sets.base import EditablePackageSet
9153 # generate a list of package sets that are directly or indirectly listed in "world",
9154 # as there is no persistent list of "installed" sets
9155 installed_sets = ["world"]
9160 pos = len(installed_sets)
9161 for s in installed_sets[pos - 1:]:
9164 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
9167 installed_sets += candidates
9168 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
9171 # we don't want to unmerge packages that are still listed in user-editable package sets
9172 # listed in "world" as they would be remerged on the next update of "world" or the
9173 # relevant package sets.
9174 unknown_sets = set()
9175 for cp in xrange(len(pkgmap)):
9176 for cpv in pkgmap[cp]["selected"].copy():
9180 # It could have been uninstalled
9181 # by a concurrent process.
9184 if unmerge_action != "clean" and \
9185 root_config.root == "/" and \
9186 portage.match_from_list(
9187 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9188 msg = ("Not unmerging package %s since there is no valid " + \
9189 "reason for portage to unmerge itself.") % (pkg.cpv,)
9190 for line in textwrap.wrap(msg, 75):
9192 # adjust pkgmap so the display output is correct
9193 pkgmap[cp]["selected"].remove(cpv)
9194 all_selected.remove(cpv)
9195 pkgmap[cp]["protected"].add(cpv)
9199 for s in installed_sets:
9200 # skip sets that the user requested to unmerge, and skip world
9201 # unless we're unmerging a package set (as the package would be
9202 # removed from "world" later on)
9203 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
9207 if s in unknown_sets:
9210 out = portage.output.EOutput()
9211 out.eerror(("Unknown set '@%s' in " + \
9212 "%svar/lib/portage/world_sets") % \
9213 (s, root_config.root))
9216 # only check instances of EditablePackageSet as other classes are generally used for
9217 # special purposes and can be ignored here (and are usually generated dynamically, so the
9218 # user can't do much about them anyway)
9219 if isinstance(sets[s], EditablePackageSet):
9221 # This is derived from a snippet of code in the
9222 # depgraph._iter_atoms_for_pkg() method.
9223 for atom in sets[s].iterAtomsForPackage(pkg):
9224 inst_matches = vartree.dbapi.match(atom)
9225 inst_matches.reverse() # descending order
9227 for inst_cpv in inst_matches:
9229 inst_pkg = _pkg(inst_cpv)
9231 # It could have been uninstalled
9232 # by a concurrent process.
9235 if inst_pkg.cp != atom.cp:
9238 # This is descending order, and we're not
9239 # interested in any versions <= pkg given.
9241 if pkg.slot_atom != inst_pkg.slot_atom:
9242 higher_slot = inst_pkg
9244 if higher_slot is None:
9248 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
9249 #print colorize("WARN", "but still listed in the following package sets:")
9250 #print " %s\n" % ", ".join(parents)
9251 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
9252 print colorize("WARN", "still referenced by the following package sets:")
9253 print " %s\n" % ", ".join(parents)
9254 # adjust pkgmap so the display output is correct
9255 pkgmap[cp]["selected"].remove(cpv)
9256 all_selected.remove(cpv)
9257 pkgmap[cp]["protected"].add(cpv)
9261 numselected = len(all_selected)
9264 "\n>>> No packages selected for removal by " + \
9265 unmerge_action + "\n")
9268 # Unmerge order only matters in some cases
9272 selected = d["selected"]
9275 cp = portage.cpv_getkey(iter(selected).next())
9276 cp_dict = unordered.get(cp)
9279 unordered[cp] = cp_dict
9282 for k, v in d.iteritems():
9283 cp_dict[k].update(v)
9284 pkgmap = [unordered[cp] for cp in sorted(unordered)]
9286 for x in xrange(len(pkgmap)):
9287 selected = pkgmap[x]["selected"]
9290 for mytype, mylist in pkgmap[x].iteritems():
9291 if mytype == "selected":
9293 mylist.difference_update(all_selected)
9294 cp = portage.cpv_getkey(iter(selected).next())
9295 for y in localtree.dep_match(cp):
9296 if y not in pkgmap[x]["omitted"] and \
9297 y not in pkgmap[x]["selected"] and \
9298 y not in pkgmap[x]["protected"] and \
9299 y not in all_selected:
9300 pkgmap[x]["omitted"].add(y)
9301 if global_unmerge and not pkgmap[x]["selected"]:
9302 #avoid cluttering the preview printout with stuff that isn't getting unmerged
9304 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
9305 writemsg_level(colorize("BAD","\a\n\n!!! " + \
9306 "'%s' is part of your system profile.\n" % cp),
9307 level=logging.WARNING, noiselevel=-1)
9308 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
9309 "be damaging to your system.\n\n"),
9310 level=logging.WARNING, noiselevel=-1)
9311 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
9312 countdown(int(settings["EMERGE_WARNING_DELAY"]),
9313 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
9315 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
9317 writemsg_level(bold(cp) + ": ", noiselevel=-1)
9318 for mytype in ["selected","protected","omitted"]:
9320 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
9321 if pkgmap[x][mytype]:
9322 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
9323 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
9324 for pn, ver, rev in sorted_pkgs:
9328 myversion = ver + "-" + rev
9329 if mytype == "selected":
9331 colorize("UNMERGE_WARN", myversion + " "),
9335 colorize("GOOD", myversion + " "), noiselevel=-1)
9337 writemsg_level("none ", noiselevel=-1)
9339 writemsg_level("\n", noiselevel=-1)
9341 writemsg_level("\n", noiselevel=-1)
9343 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
9344 " packages are slated for removal.\n")
9345 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
9346 " and " + colorize("GOOD", "'omitted'") + \
9347 " packages will not be removed.\n\n")
9349 if "--pretend" in myopts:
9350 #we're done... return
9352 if "--ask" in myopts:
9353 if userquery("Would you like to unmerge these packages?")=="No":
9354 # enter pretend mode for correct formatting of results
9355 myopts["--pretend"] = True
9360 #the real unmerging begins, after a short delay....
9361 if clean_delay and not autoclean:
9362 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
9364 for x in xrange(len(pkgmap)):
9365 for y in pkgmap[x]["selected"]:
9366 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
9367 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
9368 mysplit = y.split("/")
9370 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
9371 mysettings, unmerge_action not in ["clean","prune"],
9372 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
9373 scheduler=scheduler)
9375 if retval != os.EX_OK:
9376 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
9378 raise UninstallFailure(retval)
9381 if clean_world and hasattr(sets["world"], "cleanPackage"):
9382 sets["world"].cleanPackage(vartree.dbapi, y)
9383 emergelog(xterm_titles, " >>> unmerge success: "+y)
9384 if clean_world and hasattr(sets["world"], "remove"):
9385 for s in root_config.setconfig.active:
9386 sets["world"].remove(SETPREFIX+s)
9389 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
9391 if os.path.exists("/usr/bin/install-info"):
9392 out = portage.output.EOutput()
9397 inforoot=normpath(root+z)
9398 if os.path.isdir(inforoot):
9399 infomtime = long(os.stat(inforoot).st_mtime)
9400 if inforoot not in prev_mtimes or \
9401 prev_mtimes[inforoot] != infomtime:
9402 regen_infodirs.append(inforoot)
9404 if not regen_infodirs:
9405 portage.writemsg_stdout("\n")
9406 out.einfo("GNU info directory index is up-to-date.")
9408 portage.writemsg_stdout("\n")
9409 out.einfo("Regenerating GNU info directory index...")
9411 dir_extensions = ("", ".gz", ".bz2")
9415 for inforoot in regen_infodirs:
9419 if not os.path.isdir(inforoot) or \
9420 not os.access(inforoot, os.W_OK):
9423 file_list = os.listdir(inforoot)
9425 dir_file = os.path.join(inforoot, "dir")
9426 moved_old_dir = False
9429 if x.startswith(".") or \
9430 os.path.isdir(os.path.join(inforoot, x)):
9432 if x.startswith("dir"):
9434 for ext in dir_extensions:
9435 if x == "dir" + ext or \
9436 x == "dir" + ext + ".old":
9441 if processed_count == 0:
9442 for ext in dir_extensions:
9444 os.rename(dir_file + ext, dir_file + ext + ".old")
9445 moved_old_dir = True
9446 except EnvironmentError, e:
9447 if e.errno != errno.ENOENT:
9450 processed_count += 1
9451 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
9452 existsstr="already exists, for file `"
9454 if re.search(existsstr,myso):
9455 # Already exists... Don't increment the count for this.
9457 elif myso[:44]=="install-info: warning: no info dir entry in ":
9458 # This info file doesn't contain a DIR-header: install-info produces this
9459 # (harmless) warning (the --quiet switch doesn't seem to work).
9460 # Don't increment the count for this.
9464 errmsg += myso + "\n"
9467 if moved_old_dir and not os.path.exists(dir_file):
9468 # We didn't generate a new dir file, so put the old file
9469 # back where it was originally found.
9470 for ext in dir_extensions:
9472 os.rename(dir_file + ext + ".old", dir_file + ext)
9473 except EnvironmentError, e:
9474 if e.errno != errno.ENOENT:
9478 # Clean dir.old cruft so that they don't prevent
9479 # unmerge of otherwise empty directories.
9480 for ext in dir_extensions:
9482 os.unlink(dir_file + ext + ".old")
9483 except EnvironmentError, e:
9484 if e.errno != errno.ENOENT:
9488 #update mtime so we can potentially avoid regenerating.
9489 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
9492 out.eerror("Processed %d info files; %d errors." % \
9494 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
9497 out.einfo("Processed %d info files." % (icount,))
9500 def display_news_notification(root_config, myopts):
9501 target_root = root_config.root
9502 trees = root_config.trees
9503 settings = trees["vartree"].settings
9504 portdb = trees["porttree"].dbapi
9505 vardb = trees["vartree"].dbapi
9506 NEWS_PATH = os.path.join("metadata", "news")
9507 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
9508 newsReaderDisplay = False
9509 update = "--pretend" not in myopts
9511 for repo in portdb.getRepositories():
9512 unreadItems = checkUpdatedNewsItems(
9513 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
9515 if not newsReaderDisplay:
9516 newsReaderDisplay = True
9518 print colorize("WARN", " * IMPORTANT:"),
9519 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
9522 if newsReaderDisplay:
9523 print colorize("WARN", " *"),
9524 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
9527 def display_preserved_libs(vardbapi):
9530 # Ensure the registry is consistent with existing files.
9531 vardbapi.plib_registry.pruneNonExisting()
9533 if vardbapi.plib_registry.hasEntries():
9535 print colorize("WARN", "!!!") + " existing preserved libs:"
9536 plibdata = vardbapi.plib_registry.getPreservedLibs()
9537 linkmap = vardbapi.linkmap
9540 linkmap_broken = False
9544 except portage.exception.CommandNotFound, e:
9545 writemsg_level("!!! Command Not Found: %s\n" % (e,),
9546 level=logging.ERROR, noiselevel=-1)
9548 linkmap_broken = True
9550 search_for_owners = set()
9551 for cpv in plibdata:
9552 internal_plib_keys = set(linkmap._obj_key(f) \
9553 for f in plibdata[cpv])
9554 for f in plibdata[cpv]:
9555 if f in consumer_map:
9558 for c in linkmap.findConsumers(f):
9559 # Filter out any consumers that are also preserved libs
9560 # belonging to the same package as the provider.
9561 if linkmap._obj_key(c) not in internal_plib_keys:
9564 consumer_map[f] = consumers
9565 search_for_owners.update(consumers[:MAX_DISPLAY+1])
9567 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
9569 for cpv in plibdata:
9570 print colorize("WARN", ">>>") + " package: %s" % cpv
9572 for f in plibdata[cpv]:
9573 obj_key = linkmap._obj_key(f)
9574 alt_paths = samefile_map.get(obj_key)
9575 if alt_paths is None:
9577 samefile_map[obj_key] = alt_paths
9580 for alt_paths in samefile_map.itervalues():
9581 alt_paths = sorted(alt_paths)
9583 print colorize("WARN", " * ") + " - %s" % (p,)
9585 consumers = consumer_map.get(f, [])
9586 for c in consumers[:MAX_DISPLAY]:
9587 print colorize("WARN", " * ") + " used by %s (%s)" % \
9588 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
9589 if len(consumers) == MAX_DISPLAY + 1:
9590 print colorize("WARN", " * ") + " used by %s (%s)" % \
9591 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
9592 for x in owners.get(consumers[MAX_DISPLAY], [])))
9593 elif len(consumers) > MAX_DISPLAY:
9594 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
9595 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
9598 def _flush_elog_mod_echo():
9600 Dump the mod_echo output now so that our other
9601 notifications are shown last.
9603 @returns: True if messages were shown, False otherwise.
9605 messages_shown = False
9607 from portage.elog import mod_echo
9609 pass # happens during downgrade to a version without the module
9611 messages_shown = bool(mod_echo._items)
9613 return messages_shown
9615 def post_emerge(root_config, myopts, mtimedb, retval):
9617 Misc. things to run at the end of a merge session.
9623 Display preserved libs warnings
9626 @param trees: A dictionary mapping each ROOT to it's package databases
9628 @param mtimedb: The mtimeDB to store data needed across merge invocations
9629 @type mtimedb: MtimeDB class instance
9630 @param retval: Emerge's return value
9634 1. Calls sys.exit(retval)
9637 target_root = root_config.root
9638 trees = { target_root : root_config.trees }
9639 vardbapi = trees[target_root]["vartree"].dbapi
9640 settings = vardbapi.settings
9641 info_mtimes = mtimedb["info"]
9643 # Load the most current variables from ${ROOT}/etc/profile.env
9646 settings.regenerate()
9649 config_protect = settings.get("CONFIG_PROTECT","").split()
9650 infodirs = settings.get("INFOPATH","").split(":") + \
9651 settings.get("INFODIR","").split(":")
9655 if retval == os.EX_OK:
9656 exit_msg = " *** exiting successfully."
9658 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
9659 emergelog("notitles" not in settings.features, exit_msg)
9661 _flush_elog_mod_echo()
9663 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
9664 if "--pretend" in myopts or (counter_hash is not None and \
9665 counter_hash == vardbapi._counter_hash()):
9666 display_news_notification(root_config, myopts)
9667 # If vdb state has not changed then there's nothing else to do.
9670 vdb_path = os.path.join(target_root, portage.VDB_PATH)
9671 portage.util.ensure_dirs(vdb_path)
9673 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
9674 vdb_lock = portage.locks.lockdir(vdb_path)
9678 if "noinfo" not in settings.features:
9679 chk_updated_info_files(target_root,
9680 infodirs, info_mtimes, retval)
9684 portage.locks.unlockdir(vdb_lock)
9686 chk_updated_cfg_files(target_root, config_protect)
9688 display_news_notification(root_config, myopts)
9689 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
9690 display_preserved_libs(vardbapi)
9695 def chk_updated_cfg_files(target_root, config_protect):
9697 #number of directories with some protect files in them
9699 for x in config_protect:
9700 x = os.path.join(target_root, x.lstrip(os.path.sep))
9701 if not os.access(x, os.W_OK):
9702 # Avoid Permission denied errors generated
9706 mymode = os.lstat(x).st_mode
9709 if stat.S_ISLNK(mymode):
9710 # We want to treat it like a directory if it
9711 # is a symlink to an existing directory.
9713 real_mode = os.stat(x).st_mode
9714 if stat.S_ISDIR(real_mode):
9718 if stat.S_ISDIR(mymode):
9719 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
9721 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
9722 os.path.split(x.rstrip(os.path.sep))
9723 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
9724 a = commands.getstatusoutput(mycommand)
9726 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
9728 # Show the error message alone, sending stdout to /dev/null.
9729 os.system(mycommand + " 1>/dev/null")
9731 files = a[1].split('\0')
9732 # split always produces an empty string as the last element
9733 if files and not files[-1]:
9737 print "\n"+colorize("WARN", " * IMPORTANT:"),
9738 if stat.S_ISDIR(mymode):
9739 print "%d config files in '%s' need updating." % \
9742 print "config file '%s' needs updating." % x
9745 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
9746 " section of the " + bold("emerge")
9747 print " "+yellow("*")+" man page to learn how to update config files."
9749 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
9752 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
9753 Returns the number of unread (yet relevent) items.
9755 @param portdb: a portage tree database
9756 @type portdb: pordbapi
9757 @param vardb: an installed package database
9758 @type vardb: vardbapi
9767 1. The number of unread but relevant news items.
9770 from portage.news import NewsManager
9771 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
9772 return manager.getUnreadItems( repo_id, update=update )
9774 def insert_category_into_atom(atom, category):
9775 alphanum = re.search(r'\w', atom)
9777 ret = atom[:alphanum.start()] + "%s/" % category + \
9778 atom[alphanum.start():]
9783 def is_valid_package_atom(x):
9785 alphanum = re.search(r'\w', x)
9787 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
9788 return portage.isvalidatom(x)
9790 def show_blocker_docs_link():
9792 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
9793 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
9795 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
9798 def show_mask_docs():
9799 print "For more information, see the MASKED PACKAGES section in the emerge"
9800 print "man page or refer to the Gentoo Handbook."
9802 def action_sync(settings, trees, mtimedb, myopts, myaction):
9803 xterm_titles = "notitles" not in settings.features
9804 emergelog(xterm_titles, " === sync")
9805 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9806 myportdir = portdb.porttree_root
9807 out = portage.output.EOutput()
9809 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
9811 if myportdir[-1]=="/":
9812 myportdir=myportdir[:-1]
9814 st = os.stat(myportdir)
9818 print ">>>",myportdir,"not found, creating it."
9819 os.makedirs(myportdir,0755)
9820 st = os.stat(myportdir)
9823 spawn_kwargs["env"] = settings.environ()
9824 if 'usersync' in settings.features and \
9825 portage.data.secpass >= 2 and \
9826 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
9827 st.st_gid != os.getgid() and st.st_mode & 0070):
9829 homedir = pwd.getpwuid(st.st_uid).pw_dir
9833 # Drop privileges when syncing, in order to match
9834 # existing uid/gid settings.
9835 spawn_kwargs["uid"] = st.st_uid
9836 spawn_kwargs["gid"] = st.st_gid
9837 spawn_kwargs["groups"] = [st.st_gid]
9838 spawn_kwargs["env"]["HOME"] = homedir
9840 if not st.st_mode & 0020:
9841 umask = umask | 0020
9842 spawn_kwargs["umask"] = umask
9844 syncuri = settings.get("SYNC", "").strip()
9846 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
9847 noiselevel=-1, level=logging.ERROR)
9850 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
9851 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
9855 updatecache_flg = False
9856 if myaction == "metadata":
9857 print "skipping sync"
9858 updatecache_flg = True
9859 elif ".git" in vcs_dirs:
9860 # Update existing git repository, and ignore the syncuri. We are
9861 # going to trust the user and assume that the user is in the branch
9862 # that he/she wants updated. We'll let the user manage branches with
9864 if portage.process.find_binary("git") is None:
9865 msg = ["Command not found: git",
9866 "Type \"emerge dev-util/git\" to enable git support."]
9868 writemsg_level("!!! %s\n" % l,
9869 level=logging.ERROR, noiselevel=-1)
9871 msg = ">>> Starting git pull in %s..." % myportdir
9872 emergelog(xterm_titles, msg )
9873 writemsg_level(msg + "\n")
9874 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
9875 (portage._shell_quote(myportdir),), **spawn_kwargs)
9876 if exitcode != os.EX_OK:
9877 msg = "!!! git pull error in %s." % myportdir
9878 emergelog(xterm_titles, msg)
9879 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
9881 msg = ">>> Git pull in %s successful" % myportdir
9882 emergelog(xterm_titles, msg)
9883 writemsg_level(msg + "\n")
9884 exitcode = git_sync_timestamps(settings, myportdir)
9885 if exitcode == os.EX_OK:
9886 updatecache_flg = True
9887 elif syncuri[:8]=="rsync://":
9888 for vcs_dir in vcs_dirs:
9889 writemsg_level(("!!! %s appears to be under revision " + \
9890 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
9891 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
9893 if not os.path.exists("/usr/bin/rsync"):
9894 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
9895 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
9900 if settings["PORTAGE_RSYNC_OPTS"] == "":
9901 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
9903 "--recursive", # Recurse directories
9904 "--links", # Consider symlinks
9905 "--safe-links", # Ignore links outside of tree
9906 "--perms", # Preserve permissions
9907 "--times", # Preserive mod times
9908 "--compress", # Compress the data transmitted
9909 "--force", # Force deletion on non-empty dirs
9910 "--whole-file", # Don't do block transfers, only entire files
9911 "--delete", # Delete files that aren't in the master tree
9912 "--stats", # Show final statistics about what was transfered
9913 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
9914 "--exclude=/distfiles", # Exclude distfiles from consideration
9915 "--exclude=/local", # Exclude local from consideration
9916 "--exclude=/packages", # Exclude packages from consideration
9920 # The below validation is not needed when using the above hardcoded
9923 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
9925 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
9926 for opt in ("--recursive", "--times"):
9927 if opt not in rsync_opts:
9928 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9929 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9930 rsync_opts.append(opt)
9932 for exclude in ("distfiles", "local", "packages"):
9933 opt = "--exclude=/%s" % exclude
9934 if opt not in rsync_opts:
9935 portage.writemsg(yellow("WARNING:") + \
9936 " adding required option %s not included in " % opt + \
9937 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
9938 rsync_opts.append(opt)
9940 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
9941 def rsync_opt_startswith(opt_prefix):
9942 for x in rsync_opts:
9943 if x.startswith(opt_prefix):
9947 if not rsync_opt_startswith("--timeout="):
9948 rsync_opts.append("--timeout=%d" % mytimeout)
9950 for opt in ("--compress", "--whole-file"):
9951 if opt not in rsync_opts:
9952 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9953 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9954 rsync_opts.append(opt)
9956 if "--quiet" in myopts:
9957 rsync_opts.append("--quiet") # Shut up a lot
9959 rsync_opts.append("--verbose") # Print filelist
9961 if "--verbose" in myopts:
9962 rsync_opts.append("--progress") # Progress meter for each file
9964 if "--debug" in myopts:
9965 rsync_opts.append("--checksum") # Force checksum on all files
9967 # Real local timestamp file.
9968 servertimestampfile = os.path.join(
9969 myportdir, "metadata", "timestamp.chk")
9971 content = portage.util.grabfile(servertimestampfile)
9975 mytimestamp = time.mktime(time.strptime(content[0],
9976 "%a, %d %b %Y %H:%M:%S +0000"))
9977 except (OverflowError, ValueError):
9982 rsync_initial_timeout = \
9983 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
9985 rsync_initial_timeout = 15
9988 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
9989 except SystemExit, e:
9990 raise # Needed else can't exit
9992 maxretries=3 #default number of retries
9995 user_name, hostname, port = re.split(
9996 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
9999 if user_name is None:
10001 updatecache_flg=True
10002 all_rsync_opts = set(rsync_opts)
10003 extra_rsync_opts = shlex.split(
10004 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
10005 all_rsync_opts.update(extra_rsync_opts)
10006 family = socket.AF_INET
10007 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
10008 family = socket.AF_INET
10009 elif socket.has_ipv6 and \
10010 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
10011 family = socket.AF_INET6
10013 SERVER_OUT_OF_DATE = -1
10014 EXCEEDED_MAX_RETRIES = -2
10020 for addrinfo in socket.getaddrinfo(
10021 hostname, None, family, socket.SOCK_STREAM):
10022 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
10023 # IPv6 addresses need to be enclosed in square brackets
10024 ips.append("[%s]" % addrinfo[4][0])
10026 ips.append(addrinfo[4][0])
10027 from random import shuffle
10029 except SystemExit, e:
10030 raise # Needed else can't exit
10031 except Exception, e:
10032 print "Notice:",str(e)
10037 dosyncuri = syncuri.replace(
10038 "//" + user_name + hostname + port + "/",
10039 "//" + user_name + ips[0] + port + "/", 1)
10040 except SystemExit, e:
10041 raise # Needed else can't exit
10042 except Exception, e:
10043 print "Notice:",str(e)
10047 if "--ask" in myopts:
10048 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
10053 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
10054 if "--quiet" not in myopts:
10055 print ">>> Starting rsync with "+dosyncuri+"..."
10057 emergelog(xterm_titles,
10058 ">>> Starting retry %d of %d with %s" % \
10059 (retries,maxretries,dosyncuri))
10060 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
10062 if mytimestamp != 0 and "--quiet" not in myopts:
10063 print ">>> Checking server timestamp ..."
10065 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
10067 if "--debug" in myopts:
10070 exitcode = os.EX_OK
10071 servertimestamp = 0
10072 # Even if there's no timestamp available locally, fetch the
10073 # timestamp anyway as an initial probe to verify that the server is
10074 # responsive. This protects us from hanging indefinitely on a
10075 # connection attempt to an unresponsive server which rsync's
10076 # --timeout option does not prevent.
10078 # Temporary file for remote server timestamp comparison.
10079 from tempfile import mkstemp
10080 fd, tmpservertimestampfile = mkstemp()
10082 mycommand = rsynccommand[:]
10083 mycommand.append(dosyncuri.rstrip("/") + \
10084 "/metadata/timestamp.chk")
10085 mycommand.append(tmpservertimestampfile)
10089 def timeout_handler(signum, frame):
10090 raise portage.exception.PortageException("timed out")
10091 signal.signal(signal.SIGALRM, timeout_handler)
10092 # Timeout here in case the server is unresponsive. The
10093 # --timeout rsync option doesn't apply to the initial
10094 # connection attempt.
10095 if rsync_initial_timeout:
10096 signal.alarm(rsync_initial_timeout)
10098 mypids.extend(portage.process.spawn(
10099 mycommand, env=settings.environ(), returnpid=True))
10100 exitcode = os.waitpid(mypids[0], 0)[1]
10101 content = portage.grabfile(tmpservertimestampfile)
10103 if rsync_initial_timeout:
10106 os.unlink(tmpservertimestampfile)
10109 except portage.exception.PortageException, e:
10113 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
10114 os.kill(mypids[0], signal.SIGTERM)
10115 os.waitpid(mypids[0], 0)
10116 # This is the same code rsync uses for timeout.
10119 if exitcode != os.EX_OK:
10120 if exitcode & 0xff:
10121 exitcode = (exitcode & 0xff) << 8
10123 exitcode = exitcode >> 8
10125 portage.process.spawned_pids.remove(mypids[0])
10128 servertimestamp = time.mktime(time.strptime(
10129 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
10130 except (OverflowError, ValueError):
10132 del mycommand, mypids, content
10133 if exitcode == os.EX_OK:
10134 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
10135 emergelog(xterm_titles,
10136 ">>> Cancelling sync -- Already current.")
10139 print ">>> Timestamps on the server and in the local repository are the same."
10140 print ">>> Cancelling all further sync action. You are already up to date."
10142 print ">>> In order to force sync, remove '%s'." % servertimestampfile
10146 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
10147 emergelog(xterm_titles,
10148 ">>> Server out of date: %s" % dosyncuri)
10151 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
10153 print ">>> In order to force sync, remove '%s'." % servertimestampfile
10156 exitcode = SERVER_OUT_OF_DATE
10157 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
10159 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
10160 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
10161 if exitcode in [0,1,3,4,11,14,20,21]:
10163 elif exitcode in [1,3,4,11,14,20,21]:
10166 # Code 2 indicates protocol incompatibility, which is expected
10167 # for servers with protocol < 29 that don't support
10168 # --prune-empty-directories. Retry for a server that supports
10169 # at least rsync protocol version 29 (>=rsync-2.6.4).
10174 if retries<=maxretries:
10175 print ">>> Retrying..."
10180 updatecache_flg=False
10181 exitcode = EXCEEDED_MAX_RETRIES
10185 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
10186 elif exitcode == SERVER_OUT_OF_DATE:
10188 elif exitcode == EXCEEDED_MAX_RETRIES:
10190 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
10195 msg.append("Rsync has reported that there is a syntax error. Please ensure")
10196 msg.append("that your SYNC statement is proper.")
10197 msg.append("SYNC=" + settings["SYNC"])
10199 msg.append("Rsync has reported that there is a File IO error. Normally")
10200 msg.append("this means your disk is full, but can be caused by corruption")
10201 msg.append("on the filesystem that contains PORTDIR. Please investigate")
10202 msg.append("and try again after the problem has been fixed.")
10203 msg.append("PORTDIR=" + settings["PORTDIR"])
10205 msg.append("Rsync was killed before it finished.")
10207 msg.append("Rsync has not successfully finished. It is recommended that you keep")
10208 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
10209 msg.append("to use rsync due to firewall or other restrictions. This should be a")
10210 msg.append("temporary problem unless complications exist with your network")
10211 msg.append("(and possibly your system's filesystem) configuration.")
10215 elif syncuri[:6]=="cvs://":
10216 if not os.path.exists("/usr/bin/cvs"):
10217 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
10218 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
10220 cvsroot=syncuri[6:]
10221 cvsdir=os.path.dirname(myportdir)
10222 if not os.path.exists(myportdir+"/CVS"):
10224 print ">>> Starting initial cvs checkout with "+syncuri+"..."
10225 if os.path.exists(cvsdir+"/gentoo-x86"):
10226 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
10229 os.rmdir(myportdir)
10231 if e.errno != errno.ENOENT:
10233 "!!! existing '%s' directory; exiting.\n" % myportdir)
10236 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
10237 print "!!! cvs checkout error; exiting."
10239 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
10242 print ">>> Starting cvs update with "+syncuri+"..."
10243 retval = portage.process.spawn_bash(
10244 "cd %s; cvs -z0 -q update -dP" % \
10245 (portage._shell_quote(myportdir),), **spawn_kwargs)
10246 if retval != os.EX_OK:
10248 dosyncuri = syncuri
10250 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
10251 noiselevel=-1, level=logging.ERROR)
10254 if updatecache_flg and \
10255 myaction != "metadata" and \
10256 "metadata-transfer" not in settings.features:
10257 updatecache_flg = False
10259 # Reload the whole config from scratch.
10260 settings, trees, mtimedb = load_emerge_config(trees=trees)
10261 root_config = trees[settings["ROOT"]]["root_config"]
10262 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10264 if updatecache_flg and \
10265 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
10267 # Only update cache for myportdir since that's
10268 # the only one that's been synced here.
10269 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
10271 if portage._global_updates(trees, mtimedb["updates"]):
10273 # Reload the whole config from scratch.
10274 settings, trees, mtimedb = load_emerge_config(trees=trees)
10275 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10276 root_config = trees[settings["ROOT"]]["root_config"]
10278 mybestpv = portdb.xmatch("bestmatch-visible",
10279 portage.const.PORTAGE_PACKAGE_ATOM)
10280 mypvs = portage.best(
10281 trees[settings["ROOT"]]["vartree"].dbapi.match(
10282 portage.const.PORTAGE_PACKAGE_ATOM))
10284 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
10286 if myaction != "metadata":
10287 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
10288 retval = portage.process.spawn(
10289 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
10290 dosyncuri], env=settings.environ())
10291 if retval != os.EX_OK:
10292 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
10294 if(mybestpv != mypvs) and not "--quiet" in myopts:
10296 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
10297 print red(" * ")+"that you update portage now, before any other packages are updated."
10299 print red(" * ")+"To update portage, run 'emerge portage' now."
10302 display_news_notification(root_config, myopts)
10305 def git_sync_timestamps(settings, portdir):
10307 Since git doesn't preserve timestamps, synchronize timestamps between
10308 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
10309 for a given file as long as the file in the working tree is not modified
10310 (relative to HEAD).
10312 cache_dir = os.path.join(portdir, "metadata", "cache")
10313 if not os.path.isdir(cache_dir):
10315 writemsg_level(">>> Synchronizing timestamps...\n")
10317 from portage.cache.cache_errors import CacheError
10319 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
10320 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
10321 except CacheError, e:
10322 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
10323 level=logging.ERROR, noiselevel=-1)
10326 ec_dir = os.path.join(portdir, "eclass")
10328 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
10329 if f.endswith(".eclass"))
10331 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
10332 level=logging.ERROR, noiselevel=-1)
10335 args = [portage.const.BASH_BINARY, "-c",
10336 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
10337 portage._shell_quote(portdir)]
10339 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
10340 modified_files = set(l.rstrip("\n") for l in proc.stdout)
10342 if rval != os.EX_OK:
10345 modified_eclasses = set(ec for ec in ec_names \
10346 if os.path.join("eclass", ec + ".eclass") in modified_files)
10348 updated_ec_mtimes = {}
10350 for cpv in cache_db:
10351 cpv_split = portage.catpkgsplit(cpv)
10352 if cpv_split is None:
10353 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
10354 level=logging.ERROR, noiselevel=-1)
10357 cat, pn, ver, rev = cpv_split
10358 cat, pf = portage.catsplit(cpv)
10359 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
10360 if relative_eb_path in modified_files:
10364 cache_entry = cache_db[cpv]
10365 eb_mtime = cache_entry.get("_mtime_")
10366 ec_mtimes = cache_entry.get("_eclasses_")
10368 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
10369 level=logging.ERROR, noiselevel=-1)
10371 except CacheError, e:
10372 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
10373 (cpv, e), level=logging.ERROR, noiselevel=-1)
10376 if eb_mtime is None:
10377 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
10378 level=logging.ERROR, noiselevel=-1)
10382 eb_mtime = long(eb_mtime)
10384 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
10385 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
10388 if ec_mtimes is None:
10389 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
10390 level=logging.ERROR, noiselevel=-1)
10393 if modified_eclasses.intersection(ec_mtimes):
10396 missing_eclasses = set(ec_mtimes).difference(ec_names)
10397 if missing_eclasses:
10398 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
10399 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
10403 eb_path = os.path.join(portdir, relative_eb_path)
10405 current_eb_mtime = os.stat(eb_path)
10407 writemsg_level("!!! Missing ebuild: %s\n" % \
10408 (cpv,), level=logging.ERROR, noiselevel=-1)
10411 inconsistent = False
10412 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
10413 updated_mtime = updated_ec_mtimes.get(ec)
10414 if updated_mtime is not None and updated_mtime != ec_mtime:
10415 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
10416 (cpv, ec), level=logging.ERROR, noiselevel=-1)
10417 inconsistent = True
10423 if current_eb_mtime != eb_mtime:
10424 os.utime(eb_path, (eb_mtime, eb_mtime))
10426 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
10427 if ec in updated_ec_mtimes:
10429 ec_path = os.path.join(ec_dir, ec + ".eclass")
10430 current_mtime = long(os.stat(ec_path).st_mtime)
10431 if current_mtime != ec_mtime:
10432 os.utime(ec_path, (ec_mtime, ec_mtime))
10433 updated_ec_mtimes[ec] = ec_mtime
10437 def action_metadata(settings, portdb, myopts, porttrees=None):
10438 if porttrees is None:
10439 porttrees = portdb.porttrees
10440 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
10441 old_umask = os.umask(0002)
10442 cachedir = os.path.normpath(settings.depcachedir)
10443 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
10444 "/lib", "/opt", "/proc", "/root", "/sbin",
10445 "/sys", "/tmp", "/usr", "/var"]:
10446 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
10447 "ROOT DIRECTORY ON YOUR SYSTEM."
10448 print >> sys.stderr, \
10449 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
10451 if not os.path.exists(cachedir):
10452 os.makedirs(cachedir)
10454 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
10455 auxdbkeys = tuple(auxdbkeys)
10457 class TreeData(object):
10458 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
10459 def __init__(self, dest_db, eclass_db, path, src_db):
10460 self.dest_db = dest_db
10461 self.eclass_db = eclass_db
10463 self.src_db = src_db
10464 self.valid_nodes = set()
10466 porttrees_data = []
10467 for path in porttrees:
10468 src_db = portdb._pregen_auxdb.get(path)
10469 if src_db is None and \
10470 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
10471 src_db = portdb.metadbmodule(
10472 path, 'metadata/cache', auxdbkeys, readonly=True)
10474 src_db.ec = portdb._repo_info[path].eclass_db
10475 except AttributeError:
10478 if src_db is not None:
10479 porttrees_data.append(TreeData(portdb.auxdb[path],
10480 portdb._repo_info[path].eclass_db, path, src_db))
10482 porttrees = [tree_data.path for tree_data in porttrees_data]
10484 isatty = sys.stdout.isatty()
10485 quiet = not isatty or '--quiet' in myopts
10488 progressBar = portage.output.TermProgressBar()
10489 progressHandler = ProgressHandler()
10490 onProgress = progressHandler.onProgress
10492 progressBar.set(progressHandler.curval, progressHandler.maxval)
10493 progressHandler.display = display
10494 def sigwinch_handler(signum, frame):
10495 lines, progressBar.term_columns = \
10496 portage.output.get_term_size()
10497 signal.signal(signal.SIGWINCH, sigwinch_handler)
10499 # Temporarily override portdb.porttrees so portdb.cp_all()
10500 # will only return the relevant subset.
10501 portdb_porttrees = portdb.porttrees
10502 portdb.porttrees = porttrees
10504 cp_all = portdb.cp_all()
10506 portdb.porttrees = portdb_porttrees
10509 maxval = len(cp_all)
10510 if onProgress is not None:
10511 onProgress(maxval, curval)
10513 from portage.cache.util import quiet_mirroring
10514 from portage import eapi_is_supported, \
10515 _validate_cache_for_unsupported_eapis
10517 # TODO: Display error messages, but do not interfere with the progress bar.
10519 # 1) erase the progress bar
10520 # 2) show the error message
10521 # 3) redraw the progress bar on a new line
10522 noise = quiet_mirroring()
10525 for tree_data in porttrees_data:
10526 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
10527 tree_data.valid_nodes.add(cpv)
10529 src = tree_data.src_db[cpv]
10530 except KeyError, e:
10531 noise.missing_entry(cpv)
10534 except CacheError, ce:
10535 noise.exception(cpv, ce)
10539 eapi = src.get('EAPI')
10542 eapi = eapi.lstrip('-')
10543 eapi_supported = eapi_is_supported(eapi)
10544 if not eapi_supported:
10545 if not _validate_cache_for_unsupported_eapis:
10546 noise.misc(cpv, "unable to validate " + \
10547 "cache for EAPI='%s'" % eapi)
10552 dest = tree_data.dest_db[cpv]
10553 except (KeyError, CacheError):
10556 for d in (src, dest):
10557 if d is not None and d.get('EAPI') in ('', '0'):
10560 if dest is not None:
10561 if not (dest['_mtime_'] == src['_mtime_'] and \
10562 tree_data.eclass_db.is_eclass_data_valid(
10563 dest['_eclasses_']) and \
10564 set(dest['_eclasses_']) == set(src['_eclasses_'])):
10567 # We don't want to skip the write unless we're really
10568 # sure that the existing cache is identical, so don't
10569 # trust _mtime_ and _eclasses_ alone.
10570 for k in set(chain(src, dest)).difference(
10571 ('_mtime_', '_eclasses_')):
10572 if dest.get(k, '') != src.get(k, ''):
10576 if dest is not None:
10577 # The existing data is valid and identical,
10578 # so there's no need to overwrite it.
10582 inherited = src.get('INHERITED', '')
10583 eclasses = src.get('_eclasses_')
10584 except CacheError, ce:
10585 noise.exception(cpv, ce)
10589 if eclasses is not None:
10590 if not tree_data.eclass_db.is_eclass_data_valid(
10591 src['_eclasses_']):
10592 noise.eclass_stale(cpv)
10594 inherited = eclasses
10596 inherited = inherited.split()
10598 if tree_data.src_db.complete_eclass_entries and \
10600 noise.corruption(cpv, "missing _eclasses_ field")
10604 # Even if _eclasses_ already exists, replace it with data from
10605 # eclass_cache, in order to insert local eclass paths.
10607 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
10609 # INHERITED contains a non-existent eclass.
10610 noise.eclass_stale(cpv)
10613 if eclasses is None:
10614 noise.eclass_stale(cpv)
10616 src['_eclasses_'] = eclasses
10618 src['_eclasses_'] = {}
10620 if not eapi_supported:
10622 'EAPI' : '-' + eapi,
10623 '_mtime_' : src['_mtime_'],
10624 '_eclasses_' : src['_eclasses_'],
10628 tree_data.dest_db[cpv] = src
10629 except CacheError, ce:
10630 noise.exception(cpv, ce)
10634 if onProgress is not None:
10635 onProgress(maxval, curval)
10637 if onProgress is not None:
10638 onProgress(maxval, curval)
10640 for tree_data in porttrees_data:
10642 dead_nodes = set(tree_data.dest_db.iterkeys())
10643 except CacheError, e:
10644 writemsg_level("Error listing cache entries for " + \
10645 "'%s': %s, continuing...\n" % (tree_data.path, e),
10646 level=logging.ERROR, noiselevel=-1)
10649 dead_nodes.difference_update(tree_data.valid_nodes)
10650 for cpv in dead_nodes:
10652 del tree_data.dest_db[cpv]
10653 except (KeyError, CacheError):
10657 # make sure the final progress is displayed
10658 progressHandler.display()
10660 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
10663 os.umask(old_umask)
10665 def action_regen(settings, portdb, max_jobs, max_load):
10666 xterm_titles = "notitles" not in settings.features
10667 emergelog(xterm_titles, " === regen")
10668 #regenerate cache entries
10669 portage.writemsg_stdout("Regenerating cache entries...\n")
10671 os.close(sys.stdin.fileno())
10672 except SystemExit, e:
10673 raise # Needed else can't exit
10678 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
10681 portage.writemsg_stdout("done!\n")
10682 return regen.returncode
10684 def action_config(settings, trees, myopts, myfiles):
10685 if len(myfiles) != 1:
10686 print red("!!! config can only take a single package atom at this time\n")
10688 if not is_valid_package_atom(myfiles[0]):
10689 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
10691 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
10692 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
10696 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
10697 except portage.exception.AmbiguousPackageName, e:
10698 # Multiple matches thrown from cpv_expand
10701 print "No packages found.\n"
10703 elif len(pkgs) > 1:
10704 if "--ask" in myopts:
10706 print "Please select a package to configure:"
10710 options.append(str(idx))
10711 print options[-1]+") "+pkg
10713 options.append("X")
10714 idx = userquery("Selection?", options)
10717 pkg = pkgs[int(idx)-1]
10719 print "The following packages available:"
10722 print "\nPlease use a specific atom or the --ask option."
10728 if "--ask" in myopts:
10729 if userquery("Ready to configure "+pkg+"?") == "No":
10732 print "Configuring pkg..."
10734 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
10735 mysettings = portage.config(clone=settings)
10736 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
10737 debug = mysettings.get("PORTAGE_DEBUG") == "1"
10738 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
10740 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
10741 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
10742 if retval == os.EX_OK:
10743 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
10744 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
10747 def action_info(settings, trees, myopts, myfiles):
10748 print getportageversion(settings["PORTDIR"], settings["ROOT"],
10749 settings.profile_path, settings["CHOST"],
10750 trees[settings["ROOT"]]["vartree"].dbapi)
10752 header_title = "System Settings"
10754 print header_width * "="
10755 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10756 print header_width * "="
10757 print "System uname: "+platform.platform(aliased=1)
10759 lastSync = portage.grabfile(os.path.join(
10760 settings["PORTDIR"], "metadata", "timestamp.chk"))
10761 print "Timestamp of tree:",
10767 output=commands.getstatusoutput("distcc --version")
10769 print str(output[1].split("\n",1)[0]),
10770 if "distcc" in settings.features:
10775 output=commands.getstatusoutput("ccache -V")
10777 print str(output[1].split("\n",1)[0]),
10778 if "ccache" in settings.features:
10783 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
10784 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
10785 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
10786 myvars = portage.util.unique_array(myvars)
10790 if portage.isvalidatom(x):
10791 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
10792 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
10793 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
10795 for pn, ver, rev in pkg_matches:
10797 pkgs.append(ver + "-" + rev)
10801 pkgs = ", ".join(pkgs)
10802 print "%-20s %s" % (x+":", pkgs)
10804 print "%-20s %s" % (x+":", "[NOT VALID]")
10806 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
10808 if "--verbose" in myopts:
10809 myvars=settings.keys()
10811 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
10812 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
10813 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
10814 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
10816 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
10818 myvars = portage.util.unique_array(myvars)
10819 use_expand = settings.get('USE_EXPAND', '').split()
10821 use_expand_hidden = set(
10822 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
10823 alphabetical_use = '--alphabetical' in myopts
10824 root_config = trees[settings["ROOT"]]['root_config']
10830 print '%s="%s"' % (x, settings[x])
10832 use = set(settings["USE"].split())
10833 for varname in use_expand:
10834 flag_prefix = varname.lower() + "_"
10835 for f in list(use):
10836 if f.startswith(flag_prefix):
10840 print 'USE="%s"' % " ".join(use),
10841 for varname in use_expand:
10842 myval = settings.get(varname)
10844 print '%s="%s"' % (varname, myval),
10847 unset_vars.append(x)
10849 print "Unset: "+", ".join(unset_vars)
10852 if "--debug" in myopts:
10853 for x in dir(portage):
10854 module = getattr(portage, x)
10855 if "cvs_id_string" in dir(module):
10856 print "%s: %s" % (str(x), str(module.cvs_id_string))
10858 # See if we can find any packages installed matching the strings
10859 # passed on the command line
10861 vardb = trees[settings["ROOT"]]["vartree"].dbapi
10862 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10864 mypkgs.extend(vardb.match(x))
10866 # If some packages were found...
10868 # Get our global settings (we only print stuff if it varies from
10869 # the current config)
10870 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
10871 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
10872 auxkeys.append('DEFINED_PHASES')
10874 pkgsettings = portage.config(clone=settings)
10876 # Loop through each package
10877 # Only print settings if they differ from global settings
10878 header_title = "Package Settings"
10879 print header_width * "="
10880 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10881 print header_width * "="
10882 from portage.output import EOutput
10885 # Get all package specific variables
10886 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
10887 pkg = Package(built=True, cpv=cpv,
10888 installed=True, metadata=izip(Package.metadata_keys,
10889 (metadata.get(x, '') for x in Package.metadata_keys)),
10890 root_config=root_config, type_name='installed')
10892 print "\n%s was built with the following:" % \
10893 colorize("INFORM", str(pkg.cpv))
10895 pkgsettings.setcpv(pkg)
10896 forced_flags = set(chain(pkgsettings.useforce,
10897 pkgsettings.usemask))
10898 use = set(pkg.use.enabled)
10899 use.discard(pkgsettings.get('ARCH'))
10900 use_expand_flags = set()
10903 for varname in use_expand:
10904 flag_prefix = varname.lower() + "_"
10906 if f.startswith(flag_prefix):
10907 use_expand_flags.add(f)
10908 use_enabled.setdefault(
10909 varname.upper(), []).append(f[len(flag_prefix):])
10911 for f in pkg.iuse.all:
10912 if f.startswith(flag_prefix):
10913 use_expand_flags.add(f)
10915 use_disabled.setdefault(
10916 varname.upper(), []).append(f[len(flag_prefix):])
10918 var_order = set(use_enabled)
10919 var_order.update(use_disabled)
10920 var_order = sorted(var_order)
10921 var_order.insert(0, 'USE')
10922 use.difference_update(use_expand_flags)
10923 use_enabled['USE'] = list(use)
10924 use_disabled['USE'] = []
10926 for f in pkg.iuse.all:
10927 if f not in use and \
10928 f not in use_expand_flags:
10929 use_disabled['USE'].append(f)
10931 for varname in var_order:
10932 if varname in use_expand_hidden:
10935 for f in use_enabled.get(varname, []):
10936 flags.append(UseFlagDisplay(f, True, f in forced_flags))
10937 for f in use_disabled.get(varname, []):
10938 flags.append(UseFlagDisplay(f, False, f in forced_flags))
10939 if alphabetical_use:
10940 flags.sort(key=UseFlagDisplay.sort_combined)
10942 flags.sort(key=UseFlagDisplay.sort_separated)
10943 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
10946 for myvar in mydesiredvars:
10947 if metadata[myvar].split() != settings.get(myvar, '').split():
10948 print "%s=\"%s\"" % (myvar, metadata[myvar])
10951 if metadata['DEFINED_PHASES']:
10952 if 'info' not in metadata['DEFINED_PHASES'].split():
10955 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
10956 ebuildpath = vardb.findname(pkg.cpv)
10957 if not ebuildpath or not os.path.exists(ebuildpath):
10958 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
10960 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
10961 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
10962 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
10965 def action_search(root_config, myopts, myfiles, spinner):
10967 print "emerge: no search terms provided."
10969 searchinstance = search(root_config,
10970 spinner, "--searchdesc" in myopts,
10971 "--quiet" not in myopts, "--usepkg" in myopts,
10972 "--usepkgonly" in myopts)
10973 for mysearch in myfiles:
10975 searchinstance.execute(mysearch)
10976 except re.error, comment:
10977 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
10979 searchinstance.output()
10981 def action_uninstall(settings, trees, ldpath_mtimes,
10982 opts, action, files, spinner):
10984 # For backward compat, some actions do not require leading '='.
10985 ignore_missing_eq = action in ('clean', 'unmerge')
10986 root = settings['ROOT']
10987 vardb = trees[root]['vartree'].dbapi
10991 # Ensure atoms are valid before calling unmerge().
10992 # For backward compat, leading '=' is not required.
10994 if is_valid_package_atom(x) or \
10995 (ignore_missing_eq and is_valid_package_atom('=' + x)):
10998 valid_atoms.append(
10999 portage.dep_expand(x, mydb=vardb, settings=settings))
11000 except portage.exception.AmbiguousPackageName, e:
11001 msg = "The short ebuild name \"" + x + \
11002 "\" is ambiguous. Please specify " + \
11003 "one of the following " + \
11004 "fully-qualified ebuild names instead:"
11005 for line in textwrap.wrap(msg, 70):
11006 writemsg_level("!!! %s\n" % (line,),
11007 level=logging.ERROR, noiselevel=-1)
11009 writemsg_level(" %s\n" % colorize("INFORM", i),
11010 level=logging.ERROR, noiselevel=-1)
11011 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
11014 elif x.startswith(os.sep):
11015 if not x.startswith(root):
11016 writemsg_level(("!!! '%s' does not start with" + \
11017 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
11019 # Queue these up since it's most efficient to handle
11020 # multiple files in a single iter_owners() call.
11021 lookup_owners.append(x)
11025 msg.append("'%s' is not a valid package atom." % (x,))
11026 msg.append("Please check ebuild(5) for full details.")
11027 writemsg_level("".join("!!! %s\n" % line for line in msg),
11028 level=logging.ERROR, noiselevel=-1)
11032 relative_paths = []
11033 search_for_multiple = False
11034 if len(lookup_owners) > 1:
11035 search_for_multiple = True
11037 for x in lookup_owners:
11038 if not search_for_multiple and os.path.isdir(x):
11039 search_for_multiple = True
11040 relative_paths.append(x[len(root):])
11043 for pkg, relative_path in \
11044 vardb._owners.iter_owners(relative_paths):
11045 owners.add(pkg.mycpv)
11046 if not search_for_multiple:
11051 slot = vardb.aux_get(cpv, ['SLOT'])[0]
11053 # portage now masks packages with missing slot, but it's
11054 # possible that one was installed by an older version
11055 atom = portage.cpv_getkey(cpv)
11057 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
11058 valid_atoms.append(portage.dep.Atom(atom))
11060 writemsg_level(("!!! '%s' is not claimed " + \
11061 "by any package.\n") % lookup_owners[0],
11062 level=logging.WARNING, noiselevel=-1)
11064 if files and not valid_atoms:
11067 if action in ('clean', 'unmerge') or \
11068 (action == 'prune' and "--nodeps" in opts):
11069 # When given a list of atoms, unmerge them in the order given.
11070 ordered = action == 'unmerge'
11071 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
11072 valid_atoms, ldpath_mtimes, ordered=ordered)
11074 elif action == 'deselect':
11075 rval = action_deselect(settings, trees, opts, valid_atoms)
11077 rval = action_depclean(settings, trees, ldpath_mtimes,
11078 opts, action, valid_atoms, spinner)
11082 def action_deselect(settings, trees, opts, atoms):
11083 root_config = trees[settings['ROOT']]['root_config']
11084 world_set = root_config.sets['world']
11085 if not hasattr(world_set, 'update'):
11086 writemsg_level("World set does not appear to be mutable.\n",
11087 level=logging.ERROR, noiselevel=-1)
11090 vardb = root_config.trees['vartree'].dbapi
11091 expanded_atoms = set(atoms)
11092 from portage.dep import Atom
11094 for cpv in vardb.match(atom):
11095 slot, = vardb.aux_get(cpv, ['SLOT'])
11098 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
11100 pretend = '--pretend' in opts
11102 if not pretend and hasattr(world_set, 'lock'):
11106 discard_atoms = set()
11108 for atom in world_set:
11109 if not isinstance(atom, Atom):
11112 for arg_atom in expanded_atoms:
11113 if arg_atom.intersects(atom) and \
11114 not (arg_atom.slot and not atom.slot):
11115 discard_atoms.add(atom)
11118 for atom in sorted(discard_atoms):
11119 print ">>> Removing %s from \"world\" favorites file..." % \
11120 colorize("INFORM", str(atom))
11122 if '--ask' in opts:
11123 prompt = "Would you like to remove these " + \
11124 "packages from your world favorites?"
11125 if userquery(prompt) == 'No':
11128 remaining = set(world_set)
11129 remaining.difference_update(discard_atoms)
11131 world_set.replace(remaining)
11133 print ">>> No matching atoms found in \"world\" favorites file..."
11139 def action_depclean(settings, trees, ldpath_mtimes,
11140 myopts, action, myfiles, spinner):
11141 # Kill packages that aren't explicitly merged or are required as a
11142 # dependency of another package. World file is explicit.
11144 # Global depclean or prune operations are not very safe when there are
11145 # missing dependencies since it's unknown how badly incomplete
11146 # the dependency graph is, and we might accidentally remove packages
11147 # that should have been pulled into the graph. On the other hand, it's
11148 # relatively safe to ignore missing deps when only asked to remove
11149 # specific packages.
11150 allow_missing_deps = len(myfiles) > 0
11153 msg.append("Always study the list of packages to be cleaned for any obvious\n")
11154 msg.append("mistakes. Packages that are part of the world set will always\n")
11155 msg.append("be kept. They can be manually added to this set with\n")
11156 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
11157 msg.append("package.provided (see portage(5)) will be removed by\n")
11158 msg.append("depclean, even if they are part of the world set.\n")
11160 msg.append("As a safety measure, depclean will not remove any packages\n")
11161 msg.append("unless *all* required dependencies have been resolved. As a\n")
11162 msg.append("consequence, it is often necessary to run %s\n" % \
11163 good("`emerge --update"))
11164 msg.append(good("--newuse --deep @system @world`") + \
11165 " prior to depclean.\n")
11167 if action == "depclean" and "--quiet" not in myopts and not myfiles:
11168 portage.writemsg_stdout("\n")
11170 portage.writemsg_stdout(colorize("WARN", " * ") + x)
11172 xterm_titles = "notitles" not in settings.features
11173 myroot = settings["ROOT"]
11174 root_config = trees[myroot]["root_config"]
11175 getSetAtoms = root_config.setconfig.getSetAtoms
11176 vardb = trees[myroot]["vartree"].dbapi
11177 deselect = myopts.get('--deselect') != 'n'
11179 required_set_names = ("system", "world")
11183 for s in required_set_names:
11184 required_sets[s] = InternalPackageSet(
11185 initial_atoms=getSetAtoms(s))
11188 # When removing packages, use a temporary version of world
11189 # which excludes packages that are intended to be eligible for
11191 world_temp_set = required_sets["world"]
11192 system_set = required_sets["system"]
11194 if not system_set or not world_temp_set:
11197 writemsg_level("!!! You have no system list.\n",
11198 level=logging.ERROR, noiselevel=-1)
11200 if not world_temp_set:
11201 writemsg_level("!!! You have no world file.\n",
11202 level=logging.WARNING, noiselevel=-1)
11204 writemsg_level("!!! Proceeding is likely to " + \
11205 "break your installation.\n",
11206 level=logging.WARNING, noiselevel=-1)
11207 if "--pretend" not in myopts:
11208 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
11210 if action == "depclean":
11211 emergelog(xterm_titles, " >>> depclean")
11214 args_set = InternalPackageSet()
11216 args_set.update(myfiles)
11217 matched_packages = False
11220 matched_packages = True
11222 if not matched_packages:
11223 writemsg_level(">>> No packages selected for removal by %s\n" % \
11227 writemsg_level("\nCalculating dependencies ")
11228 resolver_params = create_depgraph_params(myopts, "remove")
11229 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
11230 vardb = resolver.trees[myroot]["vartree"].dbapi
11232 if action == "depclean":
11237 world_temp_set.clear()
11239 # Pull in everything that's installed but not matched
11240 # by an argument atom since we don't want to clean any
11241 # package if something depends on it.
11246 if args_set.findAtomForPackage(pkg) is None:
11247 world_temp_set.add("=" + pkg.cpv)
11249 except portage.exception.InvalidDependString, e:
11250 show_invalid_depstring_notice(pkg,
11251 pkg.metadata["PROVIDE"], str(e))
11253 world_temp_set.add("=" + pkg.cpv)
11256 elif action == "prune":
11259 world_temp_set.clear()
11261 # Pull in everything that's installed since we don't
11262 # to prune a package if something depends on it.
11263 world_temp_set.update(vardb.cp_all())
11267 # Try to prune everything that's slotted.
11268 for cp in vardb.cp_all():
11269 if len(vardb.cp_list(cp)) > 1:
11272 # Remove atoms from world that match installed packages
11273 # that are also matched by argument atoms, but do not remove
11274 # them if they match the highest installed version.
11277 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
11278 if not pkgs_for_cp or pkg not in pkgs_for_cp:
11279 raise AssertionError("package expected in matches: " + \
11280 "cp = %s, cpv = %s matches = %s" % \
11281 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
11283 highest_version = pkgs_for_cp[-1]
11284 if pkg == highest_version:
11285 # pkg is the highest version
11286 world_temp_set.add("=" + pkg.cpv)
11289 if len(pkgs_for_cp) <= 1:
11290 raise AssertionError("more packages expected: " + \
11291 "cp = %s, cpv = %s matches = %s" % \
11292 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
11295 if args_set.findAtomForPackage(pkg) is None:
11296 world_temp_set.add("=" + pkg.cpv)
11298 except portage.exception.InvalidDependString, e:
11299 show_invalid_depstring_notice(pkg,
11300 pkg.metadata["PROVIDE"], str(e))
11302 world_temp_set.add("=" + pkg.cpv)
11306 for s, package_set in required_sets.iteritems():
11307 set_atom = SETPREFIX + s
11308 set_arg = SetArg(arg=set_atom, set=package_set,
11309 root_config=resolver.roots[myroot])
11310 set_args[s] = set_arg
11311 for atom in set_arg.set:
11312 resolver._dep_stack.append(
11313 Dependency(atom=atom, root=myroot, parent=set_arg))
11314 resolver.digraph.add(set_arg, None)
11316 success = resolver._complete_graph()
11317 writemsg_level("\b\b... done!\n")
11319 resolver.display_problems()
11324 def unresolved_deps():
11326 unresolvable = set()
11327 for dep in resolver._initially_unsatisfied_deps:
11328 if isinstance(dep.parent, Package) and \
11329 (dep.priority > UnmergeDepPriority.SOFT):
11330 unresolvable.add((dep.atom, dep.parent.cpv))
11332 if not unresolvable:
11335 if unresolvable and not allow_missing_deps:
11336 prefix = bad(" * ")
11338 msg.append("Dependencies could not be completely resolved due to")
11339 msg.append("the following required packages not being installed:")
11341 for atom, parent in unresolvable:
11342 msg.append(" %s pulled in by:" % (atom,))
11343 msg.append(" %s" % (parent,))
11345 msg.append("Have you forgotten to run " + \
11346 good("`emerge --update --newuse --deep @system @world`") + " prior")
11347 msg.append(("to %s? It may be necessary to manually " + \
11348 "uninstall packages that no longer") % action)
11349 msg.append("exist in the portage tree since " + \
11350 "it may not be possible to satisfy their")
11351 msg.append("dependencies. Also, be aware of " + \
11352 "the --with-bdeps option that is documented")
11353 msg.append("in " + good("`man emerge`") + ".")
11354 if action == "prune":
11356 msg.append("If you would like to ignore " + \
11357 "dependencies then use %s." % good("--nodeps"))
11358 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
11359 level=logging.ERROR, noiselevel=-1)
11363 if unresolved_deps():
11366 graph = resolver.digraph.copy()
11367 required_pkgs_total = 0
11369 if isinstance(node, Package):
11370 required_pkgs_total += 1
11372 def show_parents(child_node):
11373 parent_nodes = graph.parent_nodes(child_node)
11374 if not parent_nodes:
11375 # With --prune, the highest version can be pulled in without any
11376 # real parent since all installed packages are pulled in. In that
11377 # case there's nothing to show here.
11380 for node in parent_nodes:
11381 parent_strs.append(str(getattr(node, "cpv", node)))
11384 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
11385 for parent_str in parent_strs:
11386 msg.append(" %s\n" % (parent_str,))
11388 portage.writemsg_stdout("".join(msg), noiselevel=-1)
11390 def cmp_pkg_cpv(pkg1, pkg2):
11391 """Sort Package instances by cpv."""
11392 if pkg1.cpv > pkg2.cpv:
11394 elif pkg1.cpv == pkg2.cpv:
11399 def create_cleanlist():
11400 pkgs_to_remove = []
11402 if action == "depclean":
11405 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
11408 arg_atom = args_set.findAtomForPackage(pkg)
11409 except portage.exception.InvalidDependString:
11410 # this error has already been displayed by now
11414 if pkg not in graph:
11415 pkgs_to_remove.append(pkg)
11416 elif "--verbose" in myopts:
11420 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
11421 if pkg not in graph:
11422 pkgs_to_remove.append(pkg)
11423 elif "--verbose" in myopts:
11426 elif action == "prune":
11427 # Prune really uses all installed instead of world. It's not
11428 # a real reverse dependency so don't display it as such.
11429 graph.remove(set_args["world"])
11431 for atom in args_set:
11432 for pkg in vardb.match_pkgs(atom):
11433 if pkg not in graph:
11434 pkgs_to_remove.append(pkg)
11435 elif "--verbose" in myopts:
11438 if not pkgs_to_remove:
11440 ">>> No packages selected for removal by %s\n" % action)
11441 if "--verbose" not in myopts:
11443 ">>> To see reverse dependencies, use %s\n" % \
11445 if action == "prune":
11447 ">>> To ignore dependencies, use %s\n" % \
11450 return pkgs_to_remove
11452 cleanlist = create_cleanlist()
11455 clean_set = set(cleanlist)
11457 # Check if any of these package are the sole providers of libraries
11458 # with consumers that have not been selected for removal. If so, these
11459 # packages and any dependencies need to be added to the graph.
11460 real_vardb = trees[myroot]["vartree"].dbapi
11461 linkmap = real_vardb.linkmap
11462 liblist = linkmap.listLibraryObjects()
11463 consumer_cache = {}
11464 provider_cache = {}
11468 writemsg_level(">>> Checking for lib consumers...\n")
11470 for pkg in cleanlist:
11471 pkg_dblink = real_vardb._dblink(pkg.cpv)
11472 provided_libs = set()
11474 for lib in liblist:
11475 if pkg_dblink.isowner(lib, myroot):
11476 provided_libs.add(lib)
11478 if not provided_libs:
11482 for lib in provided_libs:
11483 lib_consumers = consumer_cache.get(lib)
11484 if lib_consumers is None:
11485 lib_consumers = linkmap.findConsumers(lib)
11486 consumer_cache[lib] = lib_consumers
11488 consumers[lib] = lib_consumers
11493 for lib, lib_consumers in consumers.items():
11494 for consumer_file in list(lib_consumers):
11495 if pkg_dblink.isowner(consumer_file, myroot):
11496 lib_consumers.remove(consumer_file)
11497 if not lib_consumers:
11503 for lib, lib_consumers in consumers.iteritems():
11505 soname = soname_cache.get(lib)
11507 soname = linkmap.getSoname(lib)
11508 soname_cache[lib] = soname
11510 consumer_providers = []
11511 for lib_consumer in lib_consumers:
11512 providers = provider_cache.get(lib)
11513 if providers is None:
11514 providers = linkmap.findProviders(lib_consumer)
11515 provider_cache[lib_consumer] = providers
11516 if soname not in providers:
11517 # Why does this happen?
11519 consumer_providers.append(
11520 (lib_consumer, providers[soname]))
11522 consumers[lib] = consumer_providers
11524 consumer_map[pkg] = consumers
11528 search_files = set()
11529 for consumers in consumer_map.itervalues():
11530 for lib, consumer_providers in consumers.iteritems():
11531 for lib_consumer, providers in consumer_providers:
11532 search_files.add(lib_consumer)
11533 search_files.update(providers)
11535 writemsg_level(">>> Assigning files to packages...\n")
11536 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
11538 for pkg, consumers in consumer_map.items():
11539 for lib, consumer_providers in consumers.items():
11540 lib_consumers = set()
11542 for lib_consumer, providers in consumer_providers:
11543 owner_set = file_owners.get(lib_consumer)
11544 provider_dblinks = set()
11545 provider_pkgs = set()
11547 if len(providers) > 1:
11548 for provider in providers:
11549 provider_set = file_owners.get(provider)
11550 if provider_set is not None:
11551 provider_dblinks.update(provider_set)
11553 if len(provider_dblinks) > 1:
11554 for provider_dblink in provider_dblinks:
11555 pkg_key = ("installed", myroot,
11556 provider_dblink.mycpv, "nomerge")
11557 if pkg_key not in clean_set:
11558 provider_pkgs.add(vardb.get(pkg_key))
11563 if owner_set is not None:
11564 lib_consumers.update(owner_set)
11566 for consumer_dblink in list(lib_consumers):
11567 if ("installed", myroot, consumer_dblink.mycpv,
11568 "nomerge") in clean_set:
11569 lib_consumers.remove(consumer_dblink)
11573 consumers[lib] = lib_consumers
11577 del consumer_map[pkg]
11580 # TODO: Implement a package set for rebuilding consumer packages.
11582 msg = "In order to avoid breakage of link level " + \
11583 "dependencies, one or more packages will not be removed. " + \
11584 "This can be solved by rebuilding " + \
11585 "the packages that pulled them in."
11587 prefix = bad(" * ")
11588 from textwrap import wrap
11589 writemsg_level("".join(prefix + "%s\n" % line for \
11590 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
11593 for pkg, consumers in consumer_map.iteritems():
11594 unique_consumers = set(chain(*consumers.values()))
11595 unique_consumers = sorted(consumer.mycpv \
11596 for consumer in unique_consumers)
11598 msg.append(" %s pulled in by:" % (pkg.cpv,))
11599 for consumer in unique_consumers:
11600 msg.append(" %s" % (consumer,))
11602 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
11603 level=logging.WARNING, noiselevel=-1)
11605 # Add lib providers to the graph as children of lib consumers,
11606 # and also add any dependencies pulled in by the provider.
11607 writemsg_level(">>> Adding lib providers to graph...\n")
11609 for pkg, consumers in consumer_map.iteritems():
11610 for consumer_dblink in set(chain(*consumers.values())):
11611 consumer_pkg = vardb.get(("installed", myroot,
11612 consumer_dblink.mycpv, "nomerge"))
11613 if not resolver._add_pkg(pkg,
11614 Dependency(parent=consumer_pkg,
11615 priority=UnmergeDepPriority(runtime=True),
11617 resolver.display_problems()
11620 writemsg_level("\nCalculating dependencies ")
11621 success = resolver._complete_graph()
11622 writemsg_level("\b\b... done!\n")
11623 resolver.display_problems()
11626 if unresolved_deps():
11629 graph = resolver.digraph.copy()
11630 required_pkgs_total = 0
11632 if isinstance(node, Package):
11633 required_pkgs_total += 1
11634 cleanlist = create_cleanlist()
11637 clean_set = set(cleanlist)
11639 # Use a topological sort to create an unmerge order such that
11640 # each package is unmerged before it's dependencies. This is
11641 # necessary to avoid breaking things that may need to run
11642 # during pkg_prerm or pkg_postrm phases.
11644 # Create a new graph to account for dependencies between the
11645 # packages being unmerged.
11649 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
11650 runtime = UnmergeDepPriority(runtime=True)
11651 runtime_post = UnmergeDepPriority(runtime_post=True)
11652 buildtime = UnmergeDepPriority(buildtime=True)
11654 "RDEPEND": runtime,
11655 "PDEPEND": runtime_post,
11656 "DEPEND": buildtime,
11659 for node in clean_set:
11660 graph.add(node, None)
11662 node_use = node.metadata["USE"].split()
11663 for dep_type in dep_keys:
11664 depstr = node.metadata[dep_type]
11668 portage.dep._dep_check_strict = False
11669 success, atoms = portage.dep_check(depstr, None, settings,
11670 myuse=node_use, trees=resolver._graph_trees,
11673 portage.dep._dep_check_strict = True
11675 # Ignore invalid deps of packages that will
11676 # be uninstalled anyway.
11679 priority = priority_map[dep_type]
11681 if not isinstance(atom, portage.dep.Atom):
11682 # Ignore invalid atoms returned from dep_check().
11686 matches = vardb.match_pkgs(atom)
11689 for child_node in matches:
11690 if child_node in clean_set:
11691 graph.add(child_node, node, priority=priority)
11694 if len(graph.order) == len(graph.root_nodes()):
11695 # If there are no dependencies between packages
11696 # let unmerge() group them by cat/pn.
11698 cleanlist = [pkg.cpv for pkg in graph.order]
11700 # Order nodes from lowest to highest overall reference count for
11701 # optimal root node selection.
11702 node_refcounts = {}
11703 for node in graph.order:
11704 node_refcounts[node] = len(graph.parent_nodes(node))
11705 def cmp_reference_count(node1, node2):
11706 return node_refcounts[node1] - node_refcounts[node2]
11707 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
11709 ignore_priority_range = [None]
11710 ignore_priority_range.extend(
11711 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
11712 while not graph.empty():
11713 for ignore_priority in ignore_priority_range:
11714 nodes = graph.root_nodes(ignore_priority=ignore_priority)
11718 raise AssertionError("no root nodes")
11719 if ignore_priority is not None:
11720 # Some deps have been dropped due to circular dependencies,
11721 # so only pop one node in order do minimize the number that
11726 cleanlist.append(node.cpv)
11728 unmerge(root_config, myopts, "unmerge", cleanlist,
11729 ldpath_mtimes, ordered=ordered)
11731 if action == "prune":
11734 if not cleanlist and "--quiet" in myopts:
11737 print "Packages installed: "+str(len(vardb.cpv_all()))
11738 print "Packages in world: " + \
11739 str(len(root_config.sets["world"].getAtoms()))
11740 print "Packages in system: " + \
11741 str(len(root_config.sets["system"].getAtoms()))
11742 print "Required packages: "+str(required_pkgs_total)
11743 if "--pretend" in myopts:
11744 print "Number to remove: "+str(len(cleanlist))
11746 print "Number removed: "+str(len(cleanlist))
11748 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
11750 Construct a depgraph for the given resume list. This will raise
11751 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
11753 @returns: (success, depgraph, dropped_tasks)
11756 skip_unsatisfied = True
11757 mergelist = mtimedb["resume"]["mergelist"]
11758 dropped_tasks = set()
11760 mydepgraph = depgraph(settings, trees,
11761 myopts, myparams, spinner)
11763 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
11764 skip_masked=skip_masked)
11765 except depgraph.UnsatisfiedResumeDep, e:
11766 if not skip_unsatisfied:
11769 graph = mydepgraph.digraph
11770 unsatisfied_parents = dict((dep.parent, dep.parent) \
11771 for dep in e.value)
11772 traversed_nodes = set()
11773 unsatisfied_stack = list(unsatisfied_parents)
11774 while unsatisfied_stack:
11775 pkg = unsatisfied_stack.pop()
11776 if pkg in traversed_nodes:
11778 traversed_nodes.add(pkg)
11780 # If this package was pulled in by a parent
11781 # package scheduled for merge, removing this
11782 # package may cause the the parent package's
11783 # dependency to become unsatisfied.
11784 for parent_node in graph.parent_nodes(pkg):
11785 if not isinstance(parent_node, Package) \
11786 or parent_node.operation not in ("merge", "nomerge"):
11789 graph.child_nodes(parent_node,
11790 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
11791 if pkg in unsatisfied:
11792 unsatisfied_parents[parent_node] = parent_node
11793 unsatisfied_stack.append(parent_node)
11795 pruned_mergelist = []
11796 for x in mergelist:
11797 if isinstance(x, list) and \
11798 tuple(x) not in unsatisfied_parents:
11799 pruned_mergelist.append(x)
11801 # If the mergelist doesn't shrink then this loop is infinite.
11802 if len(pruned_mergelist) == len(mergelist):
11803 # This happens if a package can't be dropped because
11804 # it's already installed, but it has unsatisfied PDEPEND.
11806 mergelist[:] = pruned_mergelist
11808 # Exclude installed packages that have been removed from the graph due
11809 # to failure to build/install runtime dependencies after the dependent
11810 # package has already been installed.
11811 dropped_tasks.update(pkg for pkg in \
11812 unsatisfied_parents if pkg.operation != "nomerge")
11813 mydepgraph.break_refs(unsatisfied_parents)
11815 del e, graph, traversed_nodes, \
11816 unsatisfied_parents, unsatisfied_stack
11820 return (success, mydepgraph, dropped_tasks)
11822 def action_build(settings, trees, mtimedb,
11823 myopts, myaction, myfiles, spinner):
11825 # validate the state of the resume data
11826 # so that we can make assumptions later.
11827 for k in ("resume", "resume_backup"):
11828 if k not in mtimedb:
11830 resume_data = mtimedb[k]
11831 if not isinstance(resume_data, dict):
11834 mergelist = resume_data.get("mergelist")
11835 if not isinstance(mergelist, list):
11838 for x in mergelist:
11839 if not (isinstance(x, list) and len(x) == 4):
11841 pkg_type, pkg_root, pkg_key, pkg_action = x
11842 if pkg_root not in trees:
11843 # Current $ROOT setting differs,
11844 # so the list must be stale.
11850 resume_opts = resume_data.get("myopts")
11851 if not isinstance(resume_opts, (dict, list)):
11854 favorites = resume_data.get("favorites")
11855 if not isinstance(favorites, list):
11860 if "--resume" in myopts and \
11861 ("resume" in mtimedb or
11862 "resume_backup" in mtimedb):
11864 if "resume" not in mtimedb:
11865 mtimedb["resume"] = mtimedb["resume_backup"]
11866 del mtimedb["resume_backup"]
11868 # "myopts" is a list for backward compatibility.
11869 resume_opts = mtimedb["resume"].get("myopts", [])
11870 if isinstance(resume_opts, list):
11871 resume_opts = dict((k,True) for k in resume_opts)
11872 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
11873 resume_opts.pop(opt, None)
11875 # Current options always override resume_opts.
11876 resume_opts.update(myopts)
11878 myopts.update(resume_opts)
11880 if "--debug" in myopts:
11881 writemsg_level("myopts %s\n" % (myopts,))
11883 # Adjust config according to options of the command being resumed.
11884 for myroot in trees:
11885 mysettings = trees[myroot]["vartree"].settings
11886 mysettings.unlock()
11887 adjust_config(myopts, mysettings)
11889 del myroot, mysettings
11891 ldpath_mtimes = mtimedb["ldpath"]
11894 buildpkgonly = "--buildpkgonly" in myopts
11895 pretend = "--pretend" in myopts
11896 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
11897 ask = "--ask" in myopts
11898 nodeps = "--nodeps" in myopts
11899 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
11900 tree = "--tree" in myopts
11901 if nodeps and tree:
11903 del myopts["--tree"]
11904 portage.writemsg(colorize("WARN", " * ") + \
11905 "--tree is broken with --nodeps. Disabling...\n")
11906 debug = "--debug" in myopts
11907 verbose = "--verbose" in myopts
11908 quiet = "--quiet" in myopts
11909 if pretend or fetchonly:
11910 # make the mtimedb readonly
11911 mtimedb.filename = None
11912 if '--digest' in myopts or 'digest' in settings.features:
11913 if '--digest' in myopts:
11914 msg = "The --digest option"
11916 msg = "The FEATURES=digest setting"
11918 msg += " can prevent corruption from being" + \
11919 " noticed. The `repoman manifest` command is the preferred" + \
11920 " way to generate manifests and it is capable of doing an" + \
11921 " entire repository or category at once."
11922 prefix = bad(" * ")
11923 writemsg(prefix + "\n")
11924 from textwrap import wrap
11925 for line in wrap(msg, 72):
11926 writemsg("%s%s\n" % (prefix, line))
11927 writemsg(prefix + "\n")
11929 if "--quiet" not in myopts and \
11930 ("--pretend" in myopts or "--ask" in myopts or \
11931 "--tree" in myopts or "--verbose" in myopts):
11933 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
11935 elif "--buildpkgonly" in myopts:
11939 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
11941 print darkgreen("These are the packages that would be %s, in reverse order:") % action
11945 print darkgreen("These are the packages that would be %s, in order:") % action
11948 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
11949 if not show_spinner:
11950 spinner.update = spinner.update_quiet
11953 favorites = mtimedb["resume"].get("favorites")
11954 if not isinstance(favorites, list):
11958 print "Calculating dependencies ",
11959 myparams = create_depgraph_params(myopts, myaction)
11961 resume_data = mtimedb["resume"]
11962 mergelist = resume_data["mergelist"]
11963 if mergelist and "--skipfirst" in myopts:
11964 for i, task in enumerate(mergelist):
11965 if isinstance(task, list) and \
11966 task and task[-1] == "merge":
11973 success, mydepgraph, dropped_tasks = resume_depgraph(
11974 settings, trees, mtimedb, myopts, myparams, spinner)
11975 except (portage.exception.PackageNotFound,
11976 depgraph.UnsatisfiedResumeDep), e:
11977 if isinstance(e, depgraph.UnsatisfiedResumeDep):
11978 mydepgraph = e.depgraph
11981 from textwrap import wrap
11982 from portage.output import EOutput
11985 resume_data = mtimedb["resume"]
11986 mergelist = resume_data.get("mergelist")
11987 if not isinstance(mergelist, list):
11989 if mergelist and debug or (verbose and not quiet):
11990 out.eerror("Invalid resume list:")
11993 for task in mergelist:
11994 if isinstance(task, list):
11995 out.eerror(indent + str(tuple(task)))
11998 if isinstance(e, depgraph.UnsatisfiedResumeDep):
11999 out.eerror("One or more packages are either masked or " + \
12000 "have missing dependencies:")
12003 for dep in e.value:
12004 if dep.atom is None:
12005 out.eerror(indent + "Masked package:")
12006 out.eerror(2 * indent + str(dep.parent))
12009 out.eerror(indent + str(dep.atom) + " pulled in by:")
12010 out.eerror(2 * indent + str(dep.parent))
12012 msg = "The resume list contains packages " + \
12013 "that are either masked or have " + \
12014 "unsatisfied dependencies. " + \
12015 "Please restart/continue " + \
12016 "the operation manually, or use --skipfirst " + \
12017 "to skip the first package in the list and " + \
12018 "any other packages that may be " + \
12019 "masked or have missing dependencies."
12020 for line in wrap(msg, 72):
12022 elif isinstance(e, portage.exception.PackageNotFound):
12023 out.eerror("An expected package is " + \
12024 "not available: %s" % str(e))
12026 msg = "The resume list contains one or more " + \
12027 "packages that are no longer " + \
12028 "available. Please restart/continue " + \
12029 "the operation manually."
12030 for line in wrap(msg, 72):
12034 print "\b\b... done!"
12038 portage.writemsg("!!! One or more packages have been " + \
12039 "dropped due to\n" + \
12040 "!!! masking or unsatisfied dependencies:\n\n",
12042 for task in dropped_tasks:
12043 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
12044 portage.writemsg("\n", noiselevel=-1)
12047 if mydepgraph is not None:
12048 mydepgraph.display_problems()
12049 if not (ask or pretend):
12050 # delete the current list and also the backup
12051 # since it's probably stale too.
12052 for k in ("resume", "resume_backup"):
12053 mtimedb.pop(k, None)
12058 if ("--resume" in myopts):
12059 print darkgreen("emerge: It seems we have nothing to resume...")
12062 myparams = create_depgraph_params(myopts, myaction)
12063 if "--quiet" not in myopts and "--nodeps" not in myopts:
12064 print "Calculating dependencies ",
12066 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
12068 retval, favorites = mydepgraph.select_files(myfiles)
12069 except portage.exception.PackageNotFound, e:
12070 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
12072 except portage.exception.PackageSetNotFound, e:
12073 root_config = trees[settings["ROOT"]]["root_config"]
12074 display_missing_pkg_set(root_config, e.value)
12077 print "\b\b... done!"
12079 mydepgraph.display_problems()
12082 if "--pretend" not in myopts and \
12083 ("--ask" in myopts or "--tree" in myopts or \
12084 "--verbose" in myopts) and \
12085 not ("--quiet" in myopts and "--ask" not in myopts):
12086 if "--resume" in myopts:
12087 mymergelist = mydepgraph.altlist()
12088 if len(mymergelist) == 0:
12089 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12091 favorites = mtimedb["resume"]["favorites"]
12092 retval = mydepgraph.display(
12093 mydepgraph.altlist(reversed=tree),
12094 favorites=favorites)
12095 mydepgraph.display_problems()
12096 if retval != os.EX_OK:
12098 prompt="Would you like to resume merging these packages?"
12100 retval = mydepgraph.display(
12101 mydepgraph.altlist(reversed=("--tree" in myopts)),
12102 favorites=favorites)
12103 mydepgraph.display_problems()
12104 if retval != os.EX_OK:
12107 for x in mydepgraph.altlist():
12108 if isinstance(x, Package) and x.operation == "merge":
12112 sets = trees[settings["ROOT"]]["root_config"].sets
12113 world_candidates = None
12114 if "--noreplace" in myopts and \
12115 not oneshot and favorites:
12116 # Sets that are not world candidates are filtered
12117 # out here since the favorites list needs to be
12118 # complete for depgraph.loadResumeCommand() to
12119 # operate correctly.
12120 world_candidates = [x for x in favorites \
12121 if not (x.startswith(SETPREFIX) and \
12122 not sets[x[1:]].world_candidate)]
12123 if "--noreplace" in myopts and \
12124 not oneshot and world_candidates:
12126 for x in world_candidates:
12127 print " %s %s" % (good("*"), x)
12128 prompt="Would you like to add these packages to your world favorites?"
12129 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
12130 prompt="Nothing to merge; would you like to auto-clean packages?"
12133 print "Nothing to merge; quitting."
12136 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12137 prompt="Would you like to fetch the source files for these packages?"
12139 prompt="Would you like to merge these packages?"
12141 if "--ask" in myopts and userquery(prompt) == "No":
12146 # Don't ask again (e.g. when auto-cleaning packages after merge)
12147 myopts.pop("--ask", None)
12149 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12150 if ("--resume" in myopts):
12151 mymergelist = mydepgraph.altlist()
12152 if len(mymergelist) == 0:
12153 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12155 favorites = mtimedb["resume"]["favorites"]
12156 retval = mydepgraph.display(
12157 mydepgraph.altlist(reversed=tree),
12158 favorites=favorites)
12159 mydepgraph.display_problems()
12160 if retval != os.EX_OK:
12163 retval = mydepgraph.display(
12164 mydepgraph.altlist(reversed=("--tree" in myopts)),
12165 favorites=favorites)
12166 mydepgraph.display_problems()
12167 if retval != os.EX_OK:
12169 if "--buildpkgonly" in myopts:
12170 graph_copy = mydepgraph.digraph.clone()
12171 removed_nodes = set()
12172 for node in graph_copy:
12173 if not isinstance(node, Package) or \
12174 node.operation == "nomerge":
12175 removed_nodes.add(node)
12176 graph_copy.difference_update(removed_nodes)
12177 if not graph_copy.hasallzeros(ignore_priority = \
12178 DepPrioritySatisfiedRange.ignore_medium):
12179 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12180 print "!!! You have to merge the dependencies before you can build this package.\n"
12183 if "--buildpkgonly" in myopts:
12184 graph_copy = mydepgraph.digraph.clone()
12185 removed_nodes = set()
12186 for node in graph_copy:
12187 if not isinstance(node, Package) or \
12188 node.operation == "nomerge":
12189 removed_nodes.add(node)
12190 graph_copy.difference_update(removed_nodes)
12191 if not graph_copy.hasallzeros(ignore_priority = \
12192 DepPrioritySatisfiedRange.ignore_medium):
12193 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12194 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
12197 if ("--resume" in myopts):
12198 favorites=mtimedb["resume"]["favorites"]
12199 mymergelist = mydepgraph.altlist()
12200 mydepgraph.break_refs(mymergelist)
12201 mergetask = Scheduler(settings, trees, mtimedb, myopts,
12202 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
12203 del mydepgraph, mymergelist
12204 clear_caches(trees)
12206 retval = mergetask.merge()
12207 merge_count = mergetask.curval
12209 if "resume" in mtimedb and \
12210 "mergelist" in mtimedb["resume"] and \
12211 len(mtimedb["resume"]["mergelist"]) > 1:
12212 mtimedb["resume_backup"] = mtimedb["resume"]
12213 del mtimedb["resume"]
12215 mtimedb["resume"]={}
12216 # Stored as a dict starting with portage-2.1.6_rc1, and supported
12217 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
12218 # a list type for options.
12219 mtimedb["resume"]["myopts"] = myopts.copy()
12221 # Convert Atom instances to plain str.
12222 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
12224 pkglist = mydepgraph.altlist()
12225 mydepgraph.saveNomergeFavorites()
12226 mydepgraph.break_refs(pkglist)
12227 mergetask = Scheduler(settings, trees, mtimedb, myopts,
12228 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
12229 del mydepgraph, pkglist
12230 clear_caches(trees)
12232 retval = mergetask.merge()
12233 merge_count = mergetask.curval
12235 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
12236 if "yes" == settings.get("AUTOCLEAN"):
12237 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
12238 unmerge(trees[settings["ROOT"]]["root_config"],
12239 myopts, "clean", [],
12240 ldpath_mtimes, autoclean=1)
12242 portage.writemsg_stdout(colorize("WARN", "WARNING:")
12243 + " AUTOCLEAN is disabled. This can cause serious"
12244 + " problems due to overlapping packages.\n")
12245 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
12249 def multiple_actions(action1, action2):
12250 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
12251 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
12254 def insert_optional_args(args):
12256 Parse optional arguments and insert a value if one has
12257 not been provided. This is done before feeding the args
12258 to the optparse parser since that parser does not support
12259 this feature natively.
12263 jobs_opts = ("-j", "--jobs")
12264 default_arg_opts = {
12265 '--deselect' : ('n',),
12266 '--root-deps' : ('rdeps',),
12268 arg_stack = args[:]
12269 arg_stack.reverse()
12271 arg = arg_stack.pop()
12273 default_arg_choices = default_arg_opts.get(arg)
12274 if default_arg_choices is not None:
12275 new_args.append(arg)
12276 if arg_stack and arg_stack[-1] in default_arg_choices:
12277 new_args.append(arg_stack.pop())
12279 # insert default argument
12280 new_args.append('True')
12283 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
12284 if not (short_job_opt or arg in jobs_opts):
12285 new_args.append(arg)
12288 # Insert an empty placeholder in order to
12289 # satisfy the requirements of optparse.
12291 new_args.append("--jobs")
12294 if short_job_opt and len(arg) > 2:
12295 if arg[:2] == "-j":
12297 job_count = int(arg[2:])
12299 saved_opts = arg[2:]
12302 saved_opts = arg[1:].replace("j", "")
12304 if job_count is None and arg_stack:
12306 job_count = int(arg_stack[-1])
12310 # Discard the job count from the stack
12311 # since we're consuming it here.
12314 if job_count is None:
12315 # unlimited number of jobs
12316 new_args.append("True")
12318 new_args.append(str(job_count))
12320 if saved_opts is not None:
12321 new_args.append("-" + saved_opts)
12325 def parse_opts(tmpcmdline, silent=False):
12330 global actions, options, shortmapping
12332 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
12333 argument_options = {
12335 "help":"specify the location for portage configuration files",
12339 "help":"enable or disable color output",
12341 "choices":("y", "n")
12345 "help" : "remove atoms from the world file",
12347 "choices" : ("True", "n")
12352 "help" : "Specifies the number of packages to build " + \
12358 "--load-average": {
12360 "help" :"Specifies that no new builds should be started " + \
12361 "if there are other builds running and the load average " + \
12362 "is at least LOAD (a floating-point number).",
12368 "help":"include unnecessary build time dependencies",
12370 "choices":("y", "n")
12373 "help":"specify conditions to trigger package reinstallation",
12375 "choices":["changed-use"]
12378 "help" : "specify the target root filesystem for merging packages",
12383 "help" : "modify interpretation of depedencies",
12385 "choices" :("True", "rdeps")
12389 from optparse import OptionParser
12390 parser = OptionParser()
12391 if parser.has_option("--help"):
12392 parser.remove_option("--help")
12394 for action_opt in actions:
12395 parser.add_option("--" + action_opt, action="store_true",
12396 dest=action_opt.replace("-", "_"), default=False)
12397 for myopt in options:
12398 parser.add_option(myopt, action="store_true",
12399 dest=myopt.lstrip("--").replace("-", "_"), default=False)
12400 for shortopt, longopt in shortmapping.iteritems():
12401 parser.add_option("-" + shortopt, action="store_true",
12402 dest=longopt.lstrip("--").replace("-", "_"), default=False)
12403 for myalias, myopt in longopt_aliases.iteritems():
12404 parser.add_option(myalias, action="store_true",
12405 dest=myopt.lstrip("--").replace("-", "_"), default=False)
12407 for myopt, kwargs in argument_options.iteritems():
12408 parser.add_option(myopt,
12409 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
12411 tmpcmdline = insert_optional_args(tmpcmdline)
12413 myoptions, myargs = parser.parse_args(args=tmpcmdline)
12415 if myoptions.deselect == "True":
12416 myoptions.deselect = True
12418 if myoptions.root_deps == "True":
12419 myoptions.root_deps = True
12423 if myoptions.jobs == "True":
12427 jobs = int(myoptions.jobs)
12431 if jobs is not True and \
12435 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
12436 (myoptions.jobs,), noiselevel=-1)
12438 myoptions.jobs = jobs
12440 if myoptions.load_average:
12442 load_average = float(myoptions.load_average)
12446 if load_average <= 0.0:
12447 load_average = None
12449 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
12450 (myoptions.load_average,), noiselevel=-1)
12452 myoptions.load_average = load_average
12454 for myopt in options:
12455 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
12457 myopts[myopt] = True
12459 for myopt in argument_options:
12460 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
12464 if myoptions.searchdesc:
12465 myoptions.search = True
12467 for action_opt in actions:
12468 v = getattr(myoptions, action_opt.replace("-", "_"))
12471 multiple_actions(myaction, action_opt)
12473 myaction = action_opt
12475 if myaction is None and myoptions.deselect is True:
12476 myaction = 'deselect'
12480 return myaction, myopts, myfiles
12482 def validate_ebuild_environment(trees):
12483 for myroot in trees:
12484 settings = trees[myroot]["vartree"].settings
12485 settings.validate()
12487 def clear_caches(trees):
12488 for d in trees.itervalues():
12489 d["porttree"].dbapi.melt()
12490 d["porttree"].dbapi._aux_cache.clear()
12491 d["bintree"].dbapi._aux_cache.clear()
12492 d["bintree"].dbapi._clear_cache()
12493 d["vartree"].dbapi.linkmap._clear_cache()
12494 portage.dircache.clear()
12497 def load_emerge_config(trees=None):
12499 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
12500 v = os.environ.get(envvar, None)
12501 if v and v.strip():
12503 trees = portage.create_trees(trees=trees, **kwargs)
12505 for root, root_trees in trees.iteritems():
12506 settings = root_trees["vartree"].settings
12507 setconfig = load_default_config(settings, root_trees)
12508 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
12510 settings = trees["/"]["vartree"].settings
12512 for myroot in trees:
12514 settings = trees[myroot]["vartree"].settings
12517 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
12518 mtimedb = portage.MtimeDB(mtimedbfile)
12520 return settings, trees, mtimedb
12522 def adjust_config(myopts, settings):
12523 """Make emerge specific adjustments to the config."""
12525 # To enhance usability, make some vars case insensitive by forcing them to
12527 for myvar in ("AUTOCLEAN", "NOCOLOR"):
12528 if myvar in settings:
12529 settings[myvar] = settings[myvar].lower()
12530 settings.backup_changes(myvar)
12533 # Kill noauto as it will break merges otherwise.
12534 if "noauto" in settings.features:
12535 settings.features.remove('noauto')
12536 settings['FEATURES'] = ' '.join(sorted(settings.features))
12537 settings.backup_changes("FEATURES")
12541 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
12542 except ValueError, e:
12543 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12544 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
12545 settings["CLEAN_DELAY"], noiselevel=-1)
12546 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
12547 settings.backup_changes("CLEAN_DELAY")
12549 EMERGE_WARNING_DELAY = 10
12551 EMERGE_WARNING_DELAY = int(settings.get(
12552 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
12553 except ValueError, e:
12554 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12555 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
12556 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
12557 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
12558 settings.backup_changes("EMERGE_WARNING_DELAY")
12560 if "--quiet" in myopts:
12561 settings["PORTAGE_QUIET"]="1"
12562 settings.backup_changes("PORTAGE_QUIET")
12564 if "--verbose" in myopts:
12565 settings["PORTAGE_VERBOSE"] = "1"
12566 settings.backup_changes("PORTAGE_VERBOSE")
12568 # Set so that configs will be merged regardless of remembered status
12569 if ("--noconfmem" in myopts):
12570 settings["NOCONFMEM"]="1"
12571 settings.backup_changes("NOCONFMEM")
12573 # Set various debug markers... They should be merged somehow.
12576 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
12577 if PORTAGE_DEBUG not in (0, 1):
12578 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
12579 PORTAGE_DEBUG, noiselevel=-1)
12580 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
12583 except ValueError, e:
12584 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12585 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
12586 settings["PORTAGE_DEBUG"], noiselevel=-1)
12588 if "--debug" in myopts:
12590 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
12591 settings.backup_changes("PORTAGE_DEBUG")
12593 if settings.get("NOCOLOR") not in ("yes","true"):
12594 portage.output.havecolor = 1
12596 """The explicit --color < y | n > option overrides the NOCOLOR environment
12597 variable and stdout auto-detection."""
12598 if "--color" in myopts:
12599 if "y" == myopts["--color"]:
12600 portage.output.havecolor = 1
12601 settings["NOCOLOR"] = "false"
12603 portage.output.havecolor = 0
12604 settings["NOCOLOR"] = "true"
12605 settings.backup_changes("NOCOLOR")
12606 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
12607 portage.output.havecolor = 0
12608 settings["NOCOLOR"] = "true"
12609 settings.backup_changes("NOCOLOR")
12611 def apply_priorities(settings):
12615 def nice(settings):
12617 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
12618 except (OSError, ValueError), e:
12619 out = portage.output.EOutput()
12620 out.eerror("Failed to change nice value to '%s'" % \
12621 settings["PORTAGE_NICENESS"])
12622 out.eerror("%s\n" % str(e))
12624 def ionice(settings):
12626 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
12628 ionice_cmd = shlex.split(ionice_cmd)
12632 from portage.util import varexpand
12633 variables = {"PID" : str(os.getpid())}
12634 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
12637 rval = portage.process.spawn(cmd, env=os.environ)
12638 except portage.exception.CommandNotFound:
12639 # The OS kernel probably doesn't support ionice,
12640 # so return silently.
12643 if rval != os.EX_OK:
12644 out = portage.output.EOutput()
12645 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
12646 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
12648 def display_missing_pkg_set(root_config, set_name):
12651 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
12652 "The following sets exist:") % \
12653 colorize("INFORM", set_name))
12656 for s in sorted(root_config.sets):
12657 msg.append(" %s" % s)
12660 writemsg_level("".join("%s\n" % l for l in msg),
12661 level=logging.ERROR, noiselevel=-1)
12663 def expand_set_arguments(myfiles, myaction, root_config):
12665 setconfig = root_config.setconfig
12667 sets = setconfig.getSets()
12669 # In order to know exactly which atoms/sets should be added to the
12670 # world file, the depgraph performs set expansion later. It will get
12671 # confused about where the atoms came from if it's not allowed to
12672 # expand them itself.
12673 do_not_expand = (None, )
12676 if a in ("system", "world"):
12677 newargs.append(SETPREFIX+a)
12684 # separators for set arguments
12688 # WARNING: all operators must be of equal length
12690 DIFF_OPERATOR = "-@"
12691 UNION_OPERATOR = "+@"
12693 for i in range(0, len(myfiles)):
12694 if myfiles[i].startswith(SETPREFIX):
12697 x = myfiles[i][len(SETPREFIX):]
12700 start = x.find(ARG_START)
12701 end = x.find(ARG_END)
12702 if start > 0 and start < end:
12703 namepart = x[:start]
12704 argpart = x[start+1:end]
12706 # TODO: implement proper quoting
12707 args = argpart.split(",")
12711 k, v = a.split("=", 1)
12714 options[a] = "True"
12715 setconfig.update(namepart, options)
12716 newset += (x[:start-len(namepart)]+namepart)
12717 x = x[end+len(ARG_END):]
12721 myfiles[i] = SETPREFIX+newset
12723 sets = setconfig.getSets()
12725 # display errors that occured while loading the SetConfig instance
12726 for e in setconfig.errors:
12727 print colorize("BAD", "Error during set creation: %s" % e)
12729 # emerge relies on the existance of sets with names "world" and "system"
12730 required_sets = ("world", "system")
12733 for s in required_sets:
12735 missing_sets.append(s)
12737 if len(missing_sets) > 2:
12738 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
12739 missing_sets_str += ', and "%s"' % missing_sets[-1]
12740 elif len(missing_sets) == 2:
12741 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
12743 missing_sets_str = '"%s"' % missing_sets[-1]
12744 msg = ["emerge: incomplete set configuration, " + \
12745 "missing set(s): %s" % missing_sets_str]
12747 msg.append(" sets defined: %s" % ", ".join(sets))
12748 msg.append(" This usually means that '%s'" % \
12749 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
12750 msg.append(" is missing or corrupt.")
12752 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
12754 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
12757 if a.startswith(SETPREFIX):
12758 # support simple set operations (intersection, difference and union)
12759 # on the commandline. Expressions are evaluated strictly left-to-right
12760 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
12761 expression = a[len(SETPREFIX):]
12764 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
12765 is_pos = expression.rfind(IS_OPERATOR)
12766 diff_pos = expression.rfind(DIFF_OPERATOR)
12767 union_pos = expression.rfind(UNION_OPERATOR)
12768 op_pos = max(is_pos, diff_pos, union_pos)
12769 s1 = expression[:op_pos]
12770 s2 = expression[op_pos+len(IS_OPERATOR):]
12771 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
12773 display_missing_pkg_set(root_config, s2)
12775 expr_sets.insert(0, s2)
12776 expr_ops.insert(0, op)
12778 if not expression in sets:
12779 display_missing_pkg_set(root_config, expression)
12781 expr_sets.insert(0, expression)
12782 result = set(setconfig.getSetAtoms(expression))
12783 for i in range(0, len(expr_ops)):
12784 s2 = setconfig.getSetAtoms(expr_sets[i+1])
12785 if expr_ops[i] == IS_OPERATOR:
12786 result.intersection_update(s2)
12787 elif expr_ops[i] == DIFF_OPERATOR:
12788 result.difference_update(s2)
12789 elif expr_ops[i] == UNION_OPERATOR:
12792 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
12793 newargs.extend(result)
12795 s = a[len(SETPREFIX):]
12797 display_missing_pkg_set(root_config, s)
12799 setconfig.active.append(s)
12801 set_atoms = setconfig.getSetAtoms(s)
12802 except portage.exception.PackageSetNotFound, e:
12803 writemsg_level(("emerge: the given set '%s' " + \
12804 "contains a non-existent set named '%s'.\n") % \
12805 (s, e), level=logging.ERROR, noiselevel=-1)
12807 if myaction in unmerge_actions and \
12808 not sets[s].supportsOperation("unmerge"):
12809 sys.stderr.write("emerge: the given set '%s' does " % s + \
12810 "not support unmerge operations\n")
12812 elif not set_atoms:
12813 print "emerge: '%s' is an empty set" % s
12814 elif myaction not in do_not_expand:
12815 newargs.extend(set_atoms)
12817 newargs.append(SETPREFIX+s)
12818 for e in sets[s].errors:
12822 return (newargs, retval)
12824 def repo_name_check(trees):
12825 missing_repo_names = set()
12826 for root, root_trees in trees.iteritems():
12827 if "porttree" in root_trees:
12828 portdb = root_trees["porttree"].dbapi
12829 missing_repo_names.update(portdb.porttrees)
12830 repos = portdb.getRepositories()
12832 missing_repo_names.discard(portdb.getRepositoryPath(r))
12833 if portdb.porttree_root in missing_repo_names and \
12834 not os.path.exists(os.path.join(
12835 portdb.porttree_root, "profiles")):
12836 # This is normal if $PORTDIR happens to be empty,
12837 # so don't warn about it.
12838 missing_repo_names.remove(portdb.porttree_root)
12840 if missing_repo_names:
12842 msg.append("WARNING: One or more repositories " + \
12843 "have missing repo_name entries:")
12845 for p in missing_repo_names:
12846 msg.append("\t%s/profiles/repo_name" % (p,))
12848 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
12849 "should be a plain text file containing a unique " + \
12850 "name for the repository on the first line.", 70))
12851 writemsg_level("".join("%s\n" % l for l in msg),
12852 level=logging.WARNING, noiselevel=-1)
12854 return bool(missing_repo_names)
12856 def repo_name_duplicate_check(trees):
12858 for root, root_trees in trees.iteritems():
12859 if 'porttree' in root_trees:
12860 portdb = root_trees['porttree'].dbapi
12861 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
12862 for repo_name, paths in portdb._ignored_repos:
12863 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
12864 ignored_repos.setdefault(k, []).extend(paths)
12868 msg.append('WARNING: One or more repositories ' + \
12869 'have been ignored due to duplicate')
12870 msg.append(' profiles/repo_name entries:')
12872 for k in sorted(ignored_repos):
12873 msg.append(' %s overrides' % (k,))
12874 for path in ignored_repos[k]:
12875 msg.append(' %s' % (path,))
12877 msg.extend(' ' + x for x in textwrap.wrap(
12878 "All profiles/repo_name entries must be unique in order " + \
12879 "to avoid having duplicates ignored. " + \
12880 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
12881 "/etc/make.conf if you would like to disable this warning."))
12882 writemsg_level(''.join('%s\n' % l for l in msg),
12883 level=logging.WARNING, noiselevel=-1)
12885 return bool(ignored_repos)
12887 def config_protect_check(trees):
12888 for root, root_trees in trees.iteritems():
12889 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
12890 msg = "!!! CONFIG_PROTECT is empty"
12892 msg += " for '%s'" % root
12893 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
12895 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
12897 if "--quiet" in myopts:
12898 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12899 print "!!! one of the following fully-qualified ebuild names instead:\n"
12900 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12901 print " " + colorize("INFORM", cp)
12904 s = search(root_config, spinner, "--searchdesc" in myopts,
12905 "--quiet" not in myopts, "--usepkg" in myopts,
12906 "--usepkgonly" in myopts)
12907 null_cp = portage.dep_getkey(insert_category_into_atom(
12909 cat, atom_pn = portage.catsplit(null_cp)
12910 s.searchkey = atom_pn
12911 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12914 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12915 print "!!! one of the above fully-qualified ebuild names instead.\n"
12917 def profile_check(trees, myaction, myopts):
12918 if myaction in ("info", "sync"):
12920 elif "--version" in myopts or "--help" in myopts:
12922 for root, root_trees in trees.iteritems():
12923 if root_trees["root_config"].settings.profiles:
12925 # generate some profile related warning messages
12926 validate_ebuild_environment(trees)
12927 msg = "If you have just changed your profile configuration, you " + \
12928 "should revert back to the previous configuration. Due to " + \
12929 "your current profile being invalid, allowed actions are " + \
12930 "limited to --help, --info, --sync, and --version."
12931 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
12932 level=logging.ERROR, noiselevel=-1)
12937 global portage # NFC why this is necessary now - genone
12938 portage._disable_legacy_globals()
12939 # Disable color until we're sure that it should be enabled (after
12940 # EMERGE_DEFAULT_OPTS has been parsed).
12941 portage.output.havecolor = 0
12942 # This first pass is just for options that need to be known as early as
12943 # possible, such as --config-root. They will be parsed again later,
12944 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
12945 # the value of --config-root).
12946 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
12947 if "--debug" in myopts:
12948 os.environ["PORTAGE_DEBUG"] = "1"
12949 if "--config-root" in myopts:
12950 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
12951 if "--root" in myopts:
12952 os.environ["ROOT"] = myopts["--root"]
12954 # Portage needs to ensure a sane umask for the files it creates.
12956 settings, trees, mtimedb = load_emerge_config()
12957 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12958 rval = profile_check(trees, myaction, myopts)
12959 if rval != os.EX_OK:
12962 if portage._global_updates(trees, mtimedb["updates"]):
12964 # Reload the whole config from scratch.
12965 settings, trees, mtimedb = load_emerge_config(trees=trees)
12966 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12968 xterm_titles = "notitles" not in settings.features
12971 if "--ignore-default-opts" not in myopts:
12972 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
12973 tmpcmdline.extend(sys.argv[1:])
12974 myaction, myopts, myfiles = parse_opts(tmpcmdline)
12976 if "--digest" in myopts:
12977 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
12978 # Reload the whole config from scratch so that the portdbapi internal
12979 # config is updated with new FEATURES.
12980 settings, trees, mtimedb = load_emerge_config(trees=trees)
12981 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12983 for myroot in trees:
12984 mysettings = trees[myroot]["vartree"].settings
12985 mysettings.unlock()
12986 adjust_config(myopts, mysettings)
12987 if '--pretend' not in myopts and myaction in \
12988 (None, 'clean', 'depclean', 'prune', 'unmerge'):
12989 mysettings["PORTAGE_COUNTER_HASH"] = \
12990 trees[myroot]["vartree"].dbapi._counter_hash()
12991 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
12993 del myroot, mysettings
12995 apply_priorities(settings)
12997 spinner = stdout_spinner()
12998 if "candy" in settings.features:
12999 spinner.update = spinner.update_scroll
13001 if "--quiet" not in myopts:
13002 portage.deprecated_profile_check(settings=settings)
13003 repo_name_check(trees)
13004 repo_name_duplicate_check(trees)
13005 config_protect_check(trees)
13007 for mytrees in trees.itervalues():
13008 mydb = mytrees["porttree"].dbapi
13009 # Freeze the portdbapi for performance (memoize all xmatch results).
13013 if "moo" in myfiles:
13016 Larry loves Gentoo (""" + platform.system() + """)
13018 _______________________
13019 < Have you mooed today? >
13020 -----------------------
13030 ext = os.path.splitext(x)[1]
13031 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
13032 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
13035 root_config = trees[settings["ROOT"]]["root_config"]
13036 if myaction == "list-sets":
13037 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
13041 # only expand sets for actions taking package arguments
13042 oldargs = myfiles[:]
13043 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
13044 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
13045 if retval != os.EX_OK:
13048 # Need to handle empty sets specially, otherwise emerge will react
13049 # with the help message for empty argument lists
13050 if oldargs and not myfiles:
13051 print "emerge: no targets left after set expansion"
13054 if ("--tree" in myopts) and ("--columns" in myopts):
13055 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
13058 if ("--quiet" in myopts):
13059 spinner.update = spinner.update_quiet
13060 portage.util.noiselimit = -1
13062 # Always create packages if FEATURES=buildpkg
13063 # Imply --buildpkg if --buildpkgonly
13064 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
13065 if "--buildpkg" not in myopts:
13066 myopts["--buildpkg"] = True
13068 # Always try and fetch binary packages if FEATURES=getbinpkg
13069 if ("getbinpkg" in settings.features):
13070 myopts["--getbinpkg"] = True
13072 if "--buildpkgonly" in myopts:
13073 # --buildpkgonly will not merge anything, so
13074 # it cancels all binary package options.
13075 for opt in ("--getbinpkg", "--getbinpkgonly",
13076 "--usepkg", "--usepkgonly"):
13077 myopts.pop(opt, None)
13079 if "--fetch-all-uri" in myopts:
13080 myopts["--fetchonly"] = True
13082 if "--skipfirst" in myopts and "--resume" not in myopts:
13083 myopts["--resume"] = True
13085 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
13086 myopts["--usepkgonly"] = True
13088 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
13089 myopts["--getbinpkg"] = True
13091 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
13092 myopts["--usepkg"] = True
13094 # Also allow -K to apply --usepkg/-k
13095 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
13096 myopts["--usepkg"] = True
13098 # Allow -p to remove --ask
13099 if "--pretend" in myopts:
13100 myopts.pop("--ask", None)
13102 # forbid --ask when not in a terminal
13103 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
13104 if ("--ask" in myopts) and (not sys.stdin.isatty()):
13105 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
13109 if settings.get("PORTAGE_DEBUG", "") == "1":
13110 spinner.update = spinner.update_quiet
13112 if "python-trace" in settings.features:
13113 import portage.debug
13114 portage.debug.set_trace(True)
13116 if not ("--quiet" in myopts):
13117 if not sys.stdout.isatty() or ("--nospinner" in myopts):
13118 spinner.update = spinner.update_basic
13120 if myaction == 'version':
13121 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13122 settings.profile_path, settings["CHOST"],
13123 trees[settings["ROOT"]]["vartree"].dbapi)
13125 elif "--help" in myopts:
13126 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13129 if "--debug" in myopts:
13130 print "myaction", myaction
13131 print "myopts", myopts
13133 if not myaction and not myfiles and "--resume" not in myopts:
13134 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13137 pretend = "--pretend" in myopts
13138 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13139 buildpkgonly = "--buildpkgonly" in myopts
13141 # check if root user is the current user for the actions where emerge needs this
13142 if portage.secpass < 2:
13143 # We've already allowed "--version" and "--help" above.
13144 if "--pretend" not in myopts and myaction not in ("search","info"):
13145 need_superuser = myaction in ('clean', 'depclean', 'deselect',
13146 'prune', 'unmerge') or not \
13148 (buildpkgonly and secpass >= 1) or \
13149 myaction in ("metadata", "regen") or \
13150 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
13151 if portage.secpass < 1 or \
13154 access_desc = "superuser"
13156 access_desc = "portage group"
13157 # Always show portage_group_warning() when only portage group
13158 # access is required but the user is not in the portage group.
13159 from portage.data import portage_group_warning
13160 if "--ask" in myopts:
13161 myopts["--pretend"] = True
13162 del myopts["--ask"]
13163 print ("%s access is required... " + \
13164 "adding --pretend to options\n") % access_desc
13165 if portage.secpass < 1 and not need_superuser:
13166 portage_group_warning()
13168 sys.stderr.write(("emerge: %s access is required\n") \
13170 if portage.secpass < 1 and not need_superuser:
13171 portage_group_warning()
13174 disable_emergelog = False
13175 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
13177 disable_emergelog = True
13179 if myaction in ("search", "info"):
13180 disable_emergelog = True
13181 if disable_emergelog:
13182 """ Disable emergelog for everything except build or unmerge
13183 operations. This helps minimize parallel emerge.log entries that can
13184 confuse log parsers. We especially want it disabled during
13185 parallel-fetch, which uses --resume --fetchonly."""
13187 def emergelog(*pargs, **kargs):
13191 if 'EMERGE_LOG_DIR' in settings:
13193 # At least the parent needs to exist for the lock file.
13194 portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
13195 except portage.exception.PortageException, e:
13196 writemsg_level("!!! Error creating directory for " + \
13197 "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
13198 (settings['EMERGE_LOG_DIR'], e),
13199 noiselevel=-1, level=logging.ERROR)
13201 global _emerge_log_dir
13202 _emerge_log_dir = settings['EMERGE_LOG_DIR']
13204 if not "--pretend" in myopts:
13205 emergelog(xterm_titles, "Started emerge on: "+\
13206 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
13209 myelogstr=" ".join(myopts)
13211 myelogstr+=" "+myaction
13213 myelogstr += " " + " ".join(oldargs)
13214 emergelog(xterm_titles, " *** emerge " + myelogstr)
13217 def emergeexitsig(signum, frame):
13218 signal.signal(signal.SIGINT, signal.SIG_IGN)
13219 signal.signal(signal.SIGTERM, signal.SIG_IGN)
13220 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
13221 sys.exit(100+signum)
13222 signal.signal(signal.SIGINT, emergeexitsig)
13223 signal.signal(signal.SIGTERM, emergeexitsig)
13226 """This gets out final log message in before we quit."""
13227 if "--pretend" not in myopts:
13228 emergelog(xterm_titles, " *** terminating.")
13229 if "notitles" not in settings.features:
13231 portage.atexit_register(emergeexit)
13233 if myaction in ("config", "metadata", "regen", "sync"):
13234 if "--pretend" in myopts:
13235 sys.stderr.write(("emerge: The '%s' action does " + \
13236 "not support '--pretend'.\n") % myaction)
13239 if "sync" == myaction:
13240 return action_sync(settings, trees, mtimedb, myopts, myaction)
13241 elif "metadata" == myaction:
13242 action_metadata(settings, portdb, myopts)
13243 elif myaction=="regen":
13244 validate_ebuild_environment(trees)
13245 return action_regen(settings, portdb, myopts.get("--jobs"),
13246 myopts.get("--load-average"))
13248 elif "config"==myaction:
13249 validate_ebuild_environment(trees)
13250 action_config(settings, trees, myopts, myfiles)
13253 elif "search"==myaction:
13254 validate_ebuild_environment(trees)
13255 action_search(trees[settings["ROOT"]]["root_config"],
13256 myopts, myfiles, spinner)
13258 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
13259 validate_ebuild_environment(trees)
13260 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
13261 myopts, myaction, myfiles, spinner)
13262 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
13263 post_emerge(root_config, myopts, mtimedb, rval)
13266 elif myaction == 'info':
13268 # Ensure atoms are valid before calling unmerge().
13269 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13272 if is_valid_package_atom(x):
13274 valid_atoms.append(
13275 portage.dep_expand(x, mydb=vardb, settings=settings))
13276 except portage.exception.AmbiguousPackageName, e:
13277 msg = "The short ebuild name \"" + x + \
13278 "\" is ambiguous. Please specify " + \
13279 "one of the following " + \
13280 "fully-qualified ebuild names instead:"
13281 for line in textwrap.wrap(msg, 70):
13282 writemsg_level("!!! %s\n" % (line,),
13283 level=logging.ERROR, noiselevel=-1)
13285 writemsg_level(" %s\n" % colorize("INFORM", i),
13286 level=logging.ERROR, noiselevel=-1)
13287 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13291 msg.append("'%s' is not a valid package atom." % (x,))
13292 msg.append("Please check ebuild(5) for full details.")
13293 writemsg_level("".join("!!! %s\n" % line for line in msg),
13294 level=logging.ERROR, noiselevel=-1)
13297 return action_info(settings, trees, myopts, valid_atoms)
13299 # "update", "system", or just process files:
13301 validate_ebuild_environment(trees)
13304 if x.startswith(SETPREFIX) or \
13305 is_valid_package_atom(x):
13307 if x[:1] == os.sep:
13315 msg.append("'%s' is not a valid package atom." % (x,))
13316 msg.append("Please check ebuild(5) for full details.")
13317 writemsg_level("".join("!!! %s\n" % line for line in msg),
13318 level=logging.ERROR, noiselevel=-1)
13321 if "--pretend" not in myopts:
13322 display_news_notification(root_config, myopts)
13323 retval = action_build(settings, trees, mtimedb,
13324 myopts, myaction, myfiles, spinner)
13325 root_config = trees[settings["ROOT"]]["root_config"]
13326 post_emerge(root_config, myopts, mtimedb, retval)