2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
24 from collections import deque
41 from os import path as osp
42 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51 nc_len, red, teal, turquoise, xtermTitle, \
52 xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
61 portage.dep._dep_check_strict = True
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage.sets import load_default_config, SETPREFIX
70 from portage.sets.base import InternalPackageSet
72 from itertools import chain, izip
73 from UserDict import DictMixin
76 import cPickle as pickle
81 import cStringIO as StringIO
85 class stdout_spinner(object):
87 "Gentoo Rocks ("+platform.system()+")",
88 "Thank you for using Gentoo. :)",
89 "Are you actually trying to read this?",
90 "How many times have you stared at this?",
91 "We are generating the cache right now",
92 "You are paying too much attention.",
93 "A theory is better than its explanation.",
94 "Phasers locked on target, Captain.",
95 "Thrashing is just virtual crashing.",
96 "To be is to program.",
97 "Real Users hate Real Programmers.",
98 "When all else fails, read the instructions.",
99 "Functionality breeds Contempt.",
100 "The future lies ahead.",
101 "3.1415926535897932384626433832795028841971694",
102 "Sometimes insanity is the only alternative.",
103 "Inaccuracy saves a world of explanation.",
106 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
110 self.update = self.update_twirl
111 self.scroll_sequence = self.scroll_msgs[
112 int(time.time() * 100) % len(self.scroll_msgs)]
114 self.min_display_latency = 0.05
116 def _return_early(self):
118 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119 each update* method should return without doing any output when this
122 cur_time = time.time()
123 if cur_time - self.last_update < self.min_display_latency:
125 self.last_update = cur_time
128 def update_basic(self):
129 self.spinpos = (self.spinpos + 1) % 500
130 if self._return_early():
132 if (self.spinpos % 100) == 0:
133 if self.spinpos == 0:
134 sys.stdout.write(". ")
136 sys.stdout.write(".")
139 def update_scroll(self):
140 if self._return_early():
142 if(self.spinpos >= len(self.scroll_sequence)):
143 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
146 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
148 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
150 def update_twirl(self):
151 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152 if self._return_early():
154 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
157 def update_quiet(self):
160 def userquery(prompt, responses=None, colours=None):
161 """Displays a prompt and a set of responses, then waits for a response
162 which is checked against the responses and the first to match is
163 returned. An empty response will match the first value in responses. The
164 input buffer is *not* cleared prior to the prompt!
167 responses: a List of Strings.
168 colours: a List of Functions taking and returning a String, used to
169 process the responses for display. Typically these will be functions
170 like red() but could be e.g. lambda x: "DisplayString".
171 If responses is omitted, defaults to ["Yes", "No"], [green, red].
172 If only colours is omitted, defaults to [bold, ...].
174 Returns a member of the List responses. (If called without optional
175 arguments, returns "Yes" or "No".)
176 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
178 if responses is None:
179 responses = ["Yes", "No"]
181 create_color_func("PROMPT_CHOICE_DEFAULT"),
182 create_color_func("PROMPT_CHOICE_OTHER")
184 elif colours is None:
186 colours=(colours*len(responses))[:len(responses)]
190 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191 for key in responses:
192 # An empty response will match the first value in responses.
193 if response.upper()==key[:len(response)].upper():
195 print "Sorry, response '%s' not understood." % response,
196 except (EOFError, KeyboardInterrupt):
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen", "search",
207 "--ask", "--alphabetical",
208 "--buildpkg", "--buildpkgonly",
209 "--changelog", "--columns",
214 "--fetchonly", "--fetch-all-uri",
215 "--getbinpkg", "--getbinpkgonly",
216 "--help", "--ignore-default-opts",
219 "--newuse", "--nocolor",
220 "--nodeps", "--noreplace",
221 "--nospinner", "--oneshot",
222 "--onlydeps", "--pretend",
223 "--quiet", "--resume",
224 "--searchdesc", "--selective",
228 "--usepkg", "--usepkgonly",
229 "--verbose", "--version"
235 "b":"--buildpkg", "B":"--buildpkgonly",
236 "c":"--clean", "C":"--unmerge",
237 "d":"--debug", "D":"--deep",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
242 "k":"--usepkg", "K":"--usepkgonly",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps", "O":"--nodeps",
246 "p":"--pretend", "P":"--prune",
248 "s":"--search", "S":"--searchdesc",
251 "v":"--verbose", "V":"--version"
254 def emergelog(xterm_titles, mystr, short_msg=None):
255 if xterm_titles and short_msg:
256 if "HOSTNAME" in os.environ:
257 short_msg = os.environ["HOSTNAME"]+": "+short_msg
258 xtermTitle(short_msg)
260 file_path = "/var/log/emerge.log"
261 mylogfile = open(file_path, "a")
262 portage.util.apply_secpass_permissions(file_path,
263 uid=portage.portage_uid, gid=portage.portage_gid,
267 mylock = portage.locks.lockfile(mylogfile)
268 # seek because we may have gotten held up by the lock.
269 # if so, we may not be positioned at the end of the file.
271 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
275 portage.locks.unlockfile(mylock)
277 except (IOError,OSError,portage.exception.PortageException), e:
279 print >> sys.stderr, "emergelog():",e
281 def countdown(secs=5, doing="Starting"):
283 print ">>> Waiting",secs,"seconds before starting..."
284 print ">>> (Control-C to abort)...\n"+doing+" in: ",
288 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295 if type(mysize) not in [types.IntType,types.LongType]:
297 if 0 != mysize % 1024:
298 # Always round up to the next kB so that it doesn't show 0 kB when
299 # some small file still needs to be fetched.
300 mysize += 1024 - mysize % 1024
301 mystr=str(mysize/1024)
305 mystr=mystr[:mycount]+","+mystr[mycount:]
309 def getgccversion(chost):
312 return: the current in-use gcc version
315 gcc_ver_command = 'gcc -dumpversion'
316 gcc_ver_prefix = 'gcc-'
318 gcc_not_found_error = red(
319 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320 "!!! to update the environment of this terminal and possibly\n" +
321 "!!! other terminals also.\n"
324 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
328 mystatus, myoutput = commands.getstatusoutput(
329 chost + "-" + gcc_ver_command)
330 if mystatus == os.EX_OK:
331 return gcc_ver_prefix + myoutput
333 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334 if mystatus == os.EX_OK:
335 return gcc_ver_prefix + myoutput
337 portage.writemsg(gcc_not_found_error, noiselevel=-1)
338 return "[unavailable]"
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341 profilever = "unavailable"
343 realpath = os.path.realpath(profile)
344 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
345 if realpath.startswith(basepath):
346 profilever = realpath[1 + len(basepath):]
349 profilever = "!" + os.readlink(profile)
352 del realpath, basepath
355 libclist = vardb.match("virtual/libc")
356 libclist += vardb.match("virtual/glibc")
357 libclist = portage.util.unique_array(libclist)
359 xs=portage.catpkgsplit(x)
361 libcver+=","+"-".join(xs[1:])
363 libcver="-".join(xs[1:])
365 libcver="unavailable"
367 gccver = getgccversion(chost)
368 unameout=platform.release()+" "+platform.machine()
370 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
372 def create_depgraph_params(myopts, myaction):
373 #configure emerge engine parameters
375 # self: include _this_ package regardless of if it is merged.
376 # selective: exclude the package if it is merged
377 # recurse: go into the dependencies
378 # deep: go into the dependencies of already merged packages
379 # empty: pretend nothing is merged
380 # complete: completely account for all known dependencies
381 # remove: build graph for use in removing packages
382 myparams = set(["recurse"])
384 if myaction == "remove":
385 myparams.add("remove")
386 myparams.add("complete")
389 if "--update" in myopts or \
390 "--newuse" in myopts or \
391 "--reinstall" in myopts or \
392 "--noreplace" in myopts:
393 myparams.add("selective")
394 if "--emptytree" in myopts:
395 myparams.add("empty")
396 myparams.discard("selective")
397 if "--nodeps" in myopts:
398 myparams.discard("recurse")
399 if "--deep" in myopts:
401 if "--complete-graph" in myopts:
402 myparams.add("complete")
405 # search functionality
406 class search(object):
417 def __init__(self, root_config, spinner, searchdesc,
418 verbose, usepkg, usepkgonly):
419 """Searches the available and installed packages for the supplied search key.
420 The list of available and installed packages is created at object instantiation.
421 This makes successive searches faster."""
422 self.settings = root_config.settings
423 self.vartree = root_config.trees["vartree"]
424 self.spinner = spinner
425 self.verbose = verbose
426 self.searchdesc = searchdesc
427 self.root_config = root_config
428 self.setconfig = root_config.setconfig
429 self.matches = {"pkg" : []}
434 self.portdb = fake_portdb
435 for attrib in ("aux_get", "cp_all",
436 "xmatch", "findname", "getFetchMap"):
437 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
441 portdb = root_config.trees["porttree"].dbapi
442 bindb = root_config.trees["bintree"].dbapi
443 vardb = root_config.trees["vartree"].dbapi
445 if not usepkgonly and portdb._have_root_eclass_dir:
446 self._dbs.append(portdb)
448 if (usepkg or usepkgonly) and bindb.cp_all():
449 self._dbs.append(bindb)
451 self._dbs.append(vardb)
452 self._portdb = portdb
457 cp_all.update(db.cp_all())
458 return list(sorted(cp_all))
460 def _aux_get(self, *args, **kwargs):
463 return db.aux_get(*args, **kwargs)
468 def _findname(self, *args, **kwargs):
470 if db is not self._portdb:
471 # We don't want findname to return anything
472 # unless it's an ebuild in a portage tree.
473 # Otherwise, it's already built and we don't
476 func = getattr(db, "findname", None)
478 value = func(*args, **kwargs)
483 def _getFetchMap(self, *args, **kwargs):
485 func = getattr(db, "getFetchMap", None)
487 value = func(*args, **kwargs)
492 def _visible(self, db, cpv, metadata):
493 installed = db is self.vartree.dbapi
494 built = installed or db is not self._portdb
497 pkg_type = "installed"
500 return visible(self.settings,
501 Package(type_name=pkg_type, root_config=self.root_config,
502 cpv=cpv, built=built, installed=installed, metadata=metadata))
504 def _xmatch(self, level, atom):
506 This method does not expand old-style virtuals because it
507 is restricted to returning matches for a single ${CATEGORY}/${PN}
508 and old-style virual matches unreliable for that when querying
509 multiple package databases. If necessary, old-style virtuals
510 can be performed on atoms prior to calling this method.
512 cp = portage.dep_getkey(atom)
513 if level == "match-all":
516 if hasattr(db, "xmatch"):
517 matches.update(db.xmatch(level, atom))
519 matches.update(db.match(atom))
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "match-visible":
525 if hasattr(db, "xmatch"):
526 matches.update(db.xmatch(level, atom))
528 db_keys = list(db._aux_cache_keys)
529 for cpv in db.match(atom):
530 metadata = izip(db_keys,
531 db.aux_get(cpv, db_keys))
532 if not self._visible(db, cpv, metadata):
535 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536 db._cpv_sort_ascending(result)
537 elif level == "bestmatch-visible":
540 if hasattr(db, "xmatch"):
541 cpv = db.xmatch("bestmatch-visible", atom)
542 if not cpv or portage.cpv_getkey(cpv) != cp:
544 if not result or cpv == portage.best([cpv, result]):
547 db_keys = Package.metadata_keys
548 # break out of this loop with highest visible
549 # match, checked in descending order
550 for cpv in reversed(db.match(atom)):
551 if portage.cpv_getkey(cpv) != cp:
553 metadata = izip(db_keys,
554 db.aux_get(cpv, db_keys))
555 if not self._visible(db, cpv, metadata):
557 if not result or cpv == portage.best([cpv, result]):
561 raise NotImplementedError(level)
564 def execute(self,searchkey):
565 """Performs the search for the supplied search key"""
567 self.searchkey=searchkey
568 self.packagematches = []
571 self.matches = {"pkg":[], "desc":[], "set":[]}
574 self.matches = {"pkg":[], "set":[]}
575 print "Searching... ",
578 if self.searchkey.startswith('%'):
580 self.searchkey = self.searchkey[1:]
581 if self.searchkey.startswith('@'):
583 self.searchkey = self.searchkey[1:]
585 self.searchre=re.compile(self.searchkey,re.I)
587 self.searchre=re.compile(re.escape(self.searchkey), re.I)
588 for package in self.portdb.cp_all():
589 self.spinner.update()
592 match_string = package[:]
594 match_string = package.split("/")[-1]
597 if self.searchre.search(match_string):
598 if not self.portdb.xmatch("match-visible", package):
600 self.matches["pkg"].append([package,masked])
601 elif self.searchdesc: # DESCRIPTION searching
602 full_package = self.portdb.xmatch("bestmatch-visible", package)
604 #no match found; we don't want to query description
605 full_package = portage.best(
606 self.portdb.xmatch("match-all", package))
612 full_desc = self.portdb.aux_get(
613 full_package, ["DESCRIPTION"])[0]
615 print "emerge: search: aux_get() failed, skipping"
617 if self.searchre.search(full_desc):
618 self.matches["desc"].append([full_package,masked])
620 self.sdict = self.setconfig.getSets()
621 for setname in self.sdict:
622 self.spinner.update()
624 match_string = setname
626 match_string = setname.split("/")[-1]
628 if self.searchre.search(match_string):
629 self.matches["set"].append([setname, False])
630 elif self.searchdesc:
631 if self.searchre.search(
632 self.sdict[setname].getMetadata("DESCRIPTION")):
633 self.matches["set"].append([setname, False])
636 for mtype in self.matches:
637 self.matches[mtype].sort()
638 self.mlen += len(self.matches[mtype])
641 if not self.portdb.xmatch("match-all", cp):
644 if not self.portdb.xmatch("bestmatch-visible", cp):
646 self.matches["pkg"].append([cp, masked])
650 """Outputs the results of the search."""
651 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
652 print "[ Applications found : "+white(str(self.mlen))+" ]"
654 vardb = self.vartree.dbapi
655 for mtype in self.matches:
656 for match,masked in self.matches[mtype]:
660 full_package = self.portdb.xmatch(
661 "bestmatch-visible", match)
663 #no match found; we don't want to query description
665 full_package = portage.best(
666 self.portdb.xmatch("match-all",match))
667 elif mtype == "desc":
669 match = portage.cpv_getkey(match)
671 print green("*")+" "+white(match)
672 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
676 desc, homepage, license = self.portdb.aux_get(
677 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
679 print "emerge: search: aux_get() failed, skipping"
682 print green("*")+" "+white(match)+" "+red("[ Masked ]")
684 print green("*")+" "+white(match)
685 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
689 mycat = match.split("/")[0]
690 mypkg = match.split("/")[1]
691 mycpv = match + "-" + myversion
692 myebuild = self.portdb.findname(mycpv)
694 pkgdir = os.path.dirname(myebuild)
695 from portage import manifest
696 mf = manifest.Manifest(
697 pkgdir, self.settings["DISTDIR"])
699 uri_map = self.portdb.getFetchMap(mycpv)
700 except portage.exception.InvalidDependString, e:
701 file_size_str = "Unknown (%s)" % (e,)
705 mysum[0] = mf.getDistfilesSize(uri_map)
707 file_size_str = "Unknown (missing " + \
708 "digest for %s)" % (e,)
713 if db is not vardb and \
714 db.cpv_exists(mycpv):
716 if not myebuild and hasattr(db, "bintree"):
717 myebuild = db.bintree.getname(mycpv)
719 mysum[0] = os.stat(myebuild).st_size
724 if myebuild and file_size_str is None:
725 mystr = str(mysum[0] / 1024)
729 mystr = mystr[:mycount] + "," + mystr[mycount:]
730 file_size_str = mystr + " kB"
734 print " ", darkgreen("Latest version available:"),myversion
735 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
738 (darkgreen("Size of files:"), file_size_str)
739 print " ", darkgreen("Homepage:")+" ",homepage
740 print " ", darkgreen("Description:")+" ",desc
741 print " ", darkgreen("License:")+" ",license
746 def getInstallationStatus(self,package):
747 installed_package = self.vartree.dep_bestmatch(package)
749 version = self.getVersion(installed_package,search.VERSION_RELEASE)
751 result = darkgreen("Latest version installed:")+" "+version
753 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
756 def getVersion(self,full_package,detail):
757 if len(full_package) > 1:
758 package_parts = portage.catpkgsplit(full_package)
759 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
760 result = package_parts[2]+ "-" + package_parts[3]
762 result = package_parts[2]
767 class RootConfig(object):
768 """This is used internally by depgraph to track information about a
772 "ebuild" : "porttree",
773 "binary" : "bintree",
774 "installed" : "vartree"
778 for k, v in pkg_tree_map.iteritems():
781 def __init__(self, settings, trees, setconfig):
783 self.settings = settings
784 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
785 self.root = self.settings["ROOT"]
786 self.setconfig = setconfig
787 self.sets = self.setconfig.getSets()
788 self.visible_pkgs = PackageVirtualDbapi(self.settings)
790 def create_world_atom(pkg, args_set, root_config):
791 """Create a new atom for the world file if one does not exist. If the
792 argument atom is precise enough to identify a specific slot then a slot
793 atom will be returned. Atoms that are in the system set may also be stored
794 in world since system atoms can only match one slot while world atoms can
795 be greedy with respect to slots. Unslotted system packages will not be
798 arg_atom = args_set.findAtomForPackage(pkg)
801 cp = portage.dep_getkey(arg_atom)
803 sets = root_config.sets
804 portdb = root_config.trees["porttree"].dbapi
805 vardb = root_config.trees["vartree"].dbapi
806 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
807 for cpv in portdb.match(cp))
808 slotted = len(available_slots) > 1 or \
809 (len(available_slots) == 1 and "0" not in available_slots)
811 # check the vdb in case this is multislot
812 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
813 for cpv in vardb.match(cp))
814 slotted = len(available_slots) > 1 or \
815 (len(available_slots) == 1 and "0" not in available_slots)
816 if slotted and arg_atom != cp:
817 # If the user gave a specific atom, store it as a
818 # slot atom in the world file.
819 slot_atom = pkg.slot_atom
821 # For USE=multislot, there are a couple of cases to
824 # 1) SLOT="0", but the real SLOT spontaneously changed to some
825 # unknown value, so just record an unslotted atom.
827 # 2) SLOT comes from an installed package and there is no
828 # matching SLOT in the portage tree.
830 # Make sure that the slot atom is available in either the
831 # portdb or the vardb, since otherwise the user certainly
832 # doesn't want the SLOT atom recorded in the world file
833 # (case 1 above). If it's only available in the vardb,
834 # the user may be trying to prevent a USE=multislot
835 # package from being removed by --depclean (case 2 above).
838 if not portdb.match(slot_atom):
839 # SLOT seems to come from an installed multislot package
841 # If there is no installed package matching the SLOT atom,
842 # it probably changed SLOT spontaneously due to USE=multislot,
843 # so just record an unslotted atom.
844 if vardb.match(slot_atom):
845 # Now verify that the argument is precise
846 # enough to identify a specific slot.
847 matches = mydb.match(arg_atom)
848 matched_slots = set()
850 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
851 if len(matched_slots) == 1:
852 new_world_atom = slot_atom
854 if new_world_atom == sets["world"].findAtomForPackage(pkg):
855 # Both atoms would be identical, so there's nothing to add.
858 # Unlike world atoms, system atoms are not greedy for slots, so they
859 # can't be safely excluded from world if they are slotted.
860 system_atom = sets["system"].findAtomForPackage(pkg)
862 if not portage.dep_getkey(system_atom).startswith("virtual/"):
864 # System virtuals aren't safe to exclude from world since they can
865 # match multiple old-style virtuals but only one of them will be
866 # pulled in by update or depclean.
867 providers = portdb.mysettings.getvirtuals().get(
868 portage.dep_getkey(system_atom))
869 if providers and len(providers) == 1 and providers[0] == cp:
871 return new_world_atom
873 def filter_iuse_defaults(iuse):
875 if flag.startswith("+") or flag.startswith("-"):
880 class SlotObject(object):
881 __slots__ = ("__weakref__",)
883 def __init__(self, **kwargs):
884 classes = [self.__class__]
889 classes.extend(c.__bases__)
890 slots = getattr(c, "__slots__", None)
894 myvalue = kwargs.get(myattr, None)
895 setattr(self, myattr, myvalue)
899 Create a new instance and copy all attributes
900 defined from __slots__ (including those from
903 obj = self.__class__()
905 classes = [self.__class__]
910 classes.extend(c.__bases__)
911 slots = getattr(c, "__slots__", None)
915 setattr(obj, myattr, getattr(self, myattr))
919 class AbstractDepPriority(SlotObject):
920 __slots__ = ("buildtime", "runtime", "runtime_post")
922 def __lt__(self, other):
923 return self.__int__() < other
925 def __le__(self, other):
926 return self.__int__() <= other
928 def __eq__(self, other):
929 return self.__int__() == other
931 def __ne__(self, other):
932 return self.__int__() != other
934 def __gt__(self, other):
935 return self.__int__() > other
937 def __ge__(self, other):
938 return self.__int__() >= other
942 return copy.copy(self)
944 class DepPriority(AbstractDepPriority):
946 This class generates an integer priority level based of various
947 attributes of the dependency relationship. Attributes can be assigned
948 at any time and the new integer value will be generated on calls to the
949 __int__() method. Rich comparison operators are supported.
951 The boolean attributes that affect the integer value are "satisfied",
952 "buildtime", "runtime", and "system". Various combinations of
953 attributes lead to the following priority levels:
955 Combination of properties Priority Category
957 not satisfied and buildtime 0 HARD
958 not satisfied and runtime -1 MEDIUM
959 not satisfied and runtime_post -2 MEDIUM_SOFT
960 satisfied and buildtime and rebuild -3 SOFT
961 satisfied and buildtime -4 SOFT
962 satisfied and runtime -5 SOFT
963 satisfied and runtime_post -6 SOFT
964 (none of the above) -6 SOFT
966 Several integer constants are defined for categorization of priority
969 MEDIUM The upper boundary for medium dependencies.
970 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
971 SOFT The upper boundary for soft dependencies.
972 MIN The lower boundary for soft dependencies.
974 __slots__ = ("satisfied", "rebuild")
981 if not self.satisfied:
986 if self.runtime_post:
994 if self.runtime_post:
999 myvalue = self.__int__()
1000 if myvalue > self.MEDIUM:
1002 if myvalue > self.MEDIUM_SOFT:
1004 if myvalue > self.SOFT:
1005 return "medium-soft"
1008 class BlockerDepPriority(DepPriority):
1013 BlockerDepPriority.instance = BlockerDepPriority()
1015 class UnmergeDepPriority(AbstractDepPriority):
1016 __slots__ = ("satisfied",)
1018 Combination of properties Priority Category
1021 runtime_post -1 HARD
1023 (none of the above) -2 SOFT
1033 if self.runtime_post:
1040 myvalue = self.__int__()
1041 if myvalue > self.SOFT:
1045 class FakeVartree(portage.vartree):
1046 """This is implements an in-memory copy of a vartree instance that provides
1047 all the interfaces required for use by the depgraph. The vardb is locked
1048 during the constructor call just long enough to read a copy of the
1049 installed package information. This allows the depgraph to do it's
1050 dependency calculations without holding a lock on the vardb. It also
1051 allows things like vardb global updates to be done in memory so that the
1052 user doesn't necessarily need write access to the vardb in cases where
1053 global updates are necessary (updates are performed when necessary if there
1054 is not a matching ebuild in the tree)."""
1055 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1056 self._root_config = root_config
1057 if pkg_cache is None:
1059 real_vartree = root_config.trees["vartree"]
1060 portdb = root_config.trees["porttree"].dbapi
1061 self.root = real_vartree.root
1062 self.settings = real_vartree.settings
1063 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1064 self._pkg_cache = pkg_cache
1065 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1066 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1068 # At least the parent needs to exist for the lock file.
1069 portage.util.ensure_dirs(vdb_path)
1070 except portage.exception.PortageException:
1074 if acquire_lock and os.access(vdb_path, os.W_OK):
1075 vdb_lock = portage.locks.lockdir(vdb_path)
1076 real_dbapi = real_vartree.dbapi
1078 for cpv in real_dbapi.cpv_all():
1079 cache_key = ("installed", self.root, cpv, "nomerge")
1080 pkg = self._pkg_cache.get(cache_key)
1082 metadata = pkg.metadata
1084 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1085 myslot = metadata["SLOT"]
1086 mycp = portage.dep_getkey(cpv)
1087 myslot_atom = "%s:%s" % (mycp, myslot)
1089 mycounter = long(metadata["COUNTER"])
1092 metadata["COUNTER"] = str(mycounter)
1093 other_counter = slot_counters.get(myslot_atom, None)
1094 if other_counter is not None:
1095 if other_counter > mycounter:
1097 slot_counters[myslot_atom] = mycounter
1099 pkg = Package(built=True, cpv=cpv,
1100 installed=True, metadata=metadata,
1101 root_config=root_config, type_name="installed")
1102 self._pkg_cache[pkg] = pkg
1103 self.dbapi.cpv_inject(pkg)
1104 real_dbapi.flush_cache()
1107 portage.locks.unlockdir(vdb_lock)
1108 # Populate the old-style virtuals using the cached values.
1109 if not self.settings.treeVirtuals:
1110 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1111 portage.getCPFromCPV, self.get_all_provides())
1113 # Intialize variables needed for lazy cache pulls of the live ebuild
1114 # metadata. This ensures that the vardb lock is released ASAP, without
1115 # being delayed in case cache generation is triggered.
1116 self._aux_get = self.dbapi.aux_get
1117 self.dbapi.aux_get = self._aux_get_wrapper
1118 self._match = self.dbapi.match
1119 self.dbapi.match = self._match_wrapper
1120 self._aux_get_history = set()
1121 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1122 self._portdb = portdb
1123 self._global_updates = None
1125 def _match_wrapper(self, cpv, use_cache=1):
1127 Make sure the metadata in Package instances gets updated for any
1128 cpv that is returned from a match() call, since the metadata can
1129 be accessed directly from the Package instance instead of via
1132 matches = self._match(cpv, use_cache=use_cache)
1134 if cpv in self._aux_get_history:
1136 self._aux_get_wrapper(cpv, [])
1139 def _aux_get_wrapper(self, pkg, wants):
1140 if pkg in self._aux_get_history:
1141 return self._aux_get(pkg, wants)
1142 self._aux_get_history.add(pkg)
1144 # Use the live ebuild metadata if possible.
1145 live_metadata = dict(izip(self._portdb_keys,
1146 self._portdb.aux_get(pkg, self._portdb_keys)))
1147 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1149 self.dbapi.aux_update(pkg, live_metadata)
1150 except (KeyError, portage.exception.PortageException):
1151 if self._global_updates is None:
1152 self._global_updates = \
1153 grab_global_updates(self._portdb.porttree_root)
1154 perform_global_updates(
1155 pkg, self.dbapi, self._global_updates)
1156 return self._aux_get(pkg, wants)
1158 def sync(self, acquire_lock=1):
1160 Call this method to synchronize state with the real vardb
1161 after one or more packages may have been installed or
1164 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1166 # At least the parent needs to exist for the lock file.
1167 portage.util.ensure_dirs(vdb_path)
1168 except portage.exception.PortageException:
1172 if acquire_lock and os.access(vdb_path, os.W_OK):
1173 vdb_lock = portage.locks.lockdir(vdb_path)
1177 portage.locks.unlockdir(vdb_lock)
1181 real_vardb = self._root_config.trees["vartree"].dbapi
1182 current_cpv_set = frozenset(real_vardb.cpv_all())
1183 pkg_vardb = self.dbapi
1184 aux_get_history = self._aux_get_history
1186 # Remove any packages that have been uninstalled.
1187 for pkg in list(pkg_vardb):
1188 if pkg.cpv not in current_cpv_set:
1189 pkg_vardb.cpv_remove(pkg)
1190 aux_get_history.discard(pkg.cpv)
1192 # Validate counters and timestamps.
1195 validation_keys = ["COUNTER", "_mtime_"]
1196 for cpv in current_cpv_set:
1198 pkg_hash_key = ("installed", root, cpv, "nomerge")
1199 pkg = pkg_vardb.get(pkg_hash_key)
1201 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1203 if counter != pkg.metadata["COUNTER"] or \
1205 pkg_vardb.cpv_remove(pkg)
1206 aux_get_history.discard(pkg.cpv)
1210 pkg = self._pkg(cpv)
1212 other_counter = slot_counters.get(pkg.slot_atom)
1213 if other_counter is not None:
1214 if other_counter > pkg.counter:
1217 slot_counters[pkg.slot_atom] = pkg.counter
1218 pkg_vardb.cpv_inject(pkg)
1220 real_vardb.flush_cache()
1222 def _pkg(self, cpv):
1223 root_config = self._root_config
1224 real_vardb = root_config.trees["vartree"].dbapi
1225 db_keys = list(real_vardb._aux_cache_keys)
1226 pkg = Package(cpv=cpv, installed=True,
1227 metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1228 root_config=root_config,
1229 type_name="installed")
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __lt__(self, other):
1504 if other.cp != self.cp:
1506 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1510 def __le__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1517 def __gt__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1524 def __ge__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532 if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1541 Detect metadata updates and synchronize Package attributes.
1544 __slots__ = ("_pkg",)
1545 _wrapped_keys = frozenset(
1546 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1548 def __init__(self, pkg, metadata):
1549 _PackageMetadataWrapperBase.__init__(self)
1551 self.update(metadata)
1553 def __setitem__(self, k, v):
1554 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555 if k in self._wrapped_keys:
1556 getattr(self, "_set_" + k.lower())(k, v)
1558 def _set_inherited(self, k, v):
1559 if isinstance(v, basestring):
1560 v = frozenset(v.split())
1561 self._pkg.inherited = v
1563 def _set_iuse(self, k, v):
1564 self._pkg.iuse = self._pkg._iuse(
1565 v.split(), self._pkg.root_config.iuse_implicit)
1567 def _set_slot(self, k, v):
1570 def _set_use(self, k, v):
1571 self._pkg.use = self._pkg._use(v.split())
1573 def _set_counter(self, k, v):
1574 if isinstance(v, basestring):
1579 self._pkg.counter = v
1581 def _set__mtime_(self, k, v):
1582 if isinstance(v, basestring):
1584 v = float(v.strip())
1589 class EbuildFetchonly(SlotObject):
1591 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1594 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595 # ensuring sane $PWD (bug #239560) and storing elog
1596 # messages. Use a private temp directory, in order
1597 # to avoid locking the main one.
1598 settings = self.settings
1599 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600 from tempfile import mkdtemp
1602 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1604 if e.errno != portage.exception.PermissionDenied.errno:
1606 raise portage.exception.PermissionDenied(global_tmpdir)
1607 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608 settings.backup_changes("PORTAGE_TMPDIR")
1610 retval = self._execute()
1612 settings["PORTAGE_TMPDIR"] = global_tmpdir
1613 settings.backup_changes("PORTAGE_TMPDIR")
1614 shutil.rmtree(private_tmpdir)
1618 settings = self.settings
1620 root_config = pkg.root_config
1621 portdb = root_config.trees["porttree"].dbapi
1622 ebuild_path = portdb.findname(pkg.cpv)
1623 settings.setcpv(pkg)
1624 debug = settings.get("PORTAGE_DEBUG") == "1"
1625 use_cache = 1 # always true
1626 portage.doebuild_environment(ebuild_path, "fetch",
1627 root_config.root, settings, debug, use_cache, portdb)
1628 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1630 retval = portage.doebuild(ebuild_path, "fetch",
1631 self.settings["ROOT"], self.settings, debug=debug,
1632 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633 mydbapi=portdb, tree="porttree")
1635 if retval != os.EX_OK:
1636 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637 eerror(msg, phase="unpack", key=pkg.cpv)
1639 portage.elog.elog_process(self.pkg.cpv, self.settings)
1642 class PollConstants(object):
1645 Provides POLL* constants that are equivalent to those from the
1646 select module, for use by PollSelectAdapter.
1649 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1652 locals()[k] = getattr(select, k, v)
1656 class AsynchronousTask(SlotObject):
1658 Subclasses override _wait() and _poll() so that calls
1659 to public methods can be wrapped for implementing
1660 hooks such as exit listener notification.
1662 Sublasses should call self.wait() to notify exit listeners after
1663 the task is complete and self.returncode has been set.
1666 __slots__ = ("background", "cancelled", "returncode") + \
1667 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1671 Start an asynchronous task and then return as soon as possible.
1677 raise NotImplementedError(self)
1680 return self.returncode is None
1687 return self.returncode
1690 if self.returncode is None:
1693 return self.returncode
1696 return self.returncode
1699 self.cancelled = True
1702 def addStartListener(self, f):
1704 The function will be called with one argument, a reference to self.
1706 if self._start_listeners is None:
1707 self._start_listeners = []
1708 self._start_listeners.append(f)
1710 def removeStartListener(self, f):
1711 if self._start_listeners is None:
1713 self._start_listeners.remove(f)
1715 def _start_hook(self):
1716 if self._start_listeners is not None:
1717 start_listeners = self._start_listeners
1718 self._start_listeners = None
1720 for f in start_listeners:
1723 def addExitListener(self, f):
1725 The function will be called with one argument, a reference to self.
1727 if self._exit_listeners is None:
1728 self._exit_listeners = []
1729 self._exit_listeners.append(f)
1731 def removeExitListener(self, f):
1732 if self._exit_listeners is None:
1733 if self._exit_listener_stack is not None:
1734 self._exit_listener_stack.remove(f)
1736 self._exit_listeners.remove(f)
1738 def _wait_hook(self):
1740 Call this method after the task completes, just before returning
1741 the returncode from wait() or poll(). This hook is
1742 used to trigger exit listeners when the returncode first
1745 if self.returncode is not None and \
1746 self._exit_listeners is not None:
1748 # This prevents recursion, in case one of the
1749 # exit handlers triggers this method again by
1750 # calling wait(). Use a stack that gives
1751 # removeExitListener() an opportunity to consume
1752 # listeners from the stack, before they can get
1753 # called below. This is necessary because a call
1754 # to one exit listener may result in a call to
1755 # removeExitListener() for another listener on
1756 # the stack. That listener needs to be removed
1757 # from the stack since it would be inconsistent
1758 # to call it after it has been been passed into
1759 # removeExitListener().
1760 self._exit_listener_stack = self._exit_listeners
1761 self._exit_listeners = None
1763 self._exit_listener_stack.reverse()
1764 while self._exit_listener_stack:
1765 self._exit_listener_stack.pop()(self)
1767 class AbstractPollTask(AsynchronousTask):
1769 __slots__ = ("scheduler",) + \
1773 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1777 def _unregister(self):
1778 raise NotImplementedError(self)
1780 def _unregister_if_appropriate(self, event):
1781 if self._registered:
1782 if event & self._exceptional_events:
1785 elif event & PollConstants.POLLHUP:
1789 class PipeReader(AbstractPollTask):
1792 Reads output from one or more files and saves it in memory,
1793 for retrieval via the getvalue() method. This is driven by
1794 the scheduler's poll() loop, so it runs entirely within the
1798 __slots__ = ("input_files",) + \
1799 ("_read_data", "_reg_ids")
1802 self._reg_ids = set()
1803 self._read_data = []
1804 for k, f in self.input_files.iteritems():
1805 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807 self._reg_ids.add(self.scheduler.register(f.fileno(),
1808 self._registered_events, self._output_handler))
1809 self._registered = True
1812 return self._registered
1815 if self.returncode is None:
1817 self.cancelled = True
1821 if self.returncode is not None:
1822 return self.returncode
1824 if self._registered:
1825 self.scheduler.schedule(self._reg_ids)
1828 self.returncode = os.EX_OK
1829 return self.returncode
1832 """Retrieve the entire contents"""
1833 return "".join(self._read_data)
1836 """Free the memory buffer."""
1837 self._read_data = None
1839 def _output_handler(self, fd, event):
1840 files = self.input_files
1841 for f in files.itervalues():
1842 if fd == f.fileno():
1845 buf = array.array('B')
1846 if event & PollConstants.POLLIN:
1848 buf.fromfile(f, self._bufsize)
1853 self._read_data.append(buf.tostring())
1858 self._unregister_if_appropriate(event)
1859 return self._registered
1861 def _unregister(self):
1863 Unregister from the scheduler and close open files.
1866 self._registered = False
1868 if self._reg_ids is not None:
1869 for reg_id in self._reg_ids:
1870 self.scheduler.unregister(reg_id)
1871 self._reg_ids = None
1873 if self.input_files is not None:
1874 for f in self.input_files.itervalues():
1876 self.input_files = None
1878 class CompositeTask(AsynchronousTask):
1880 __slots__ = ("scheduler",) + ("_current_task",)
1883 return self._current_task is not None
1886 self.cancelled = True
1887 if self._current_task is not None:
1888 self._current_task.cancel()
1892 This does a loop calling self._current_task.poll()
1893 repeatedly as long as the value of self._current_task
1894 keeps changing. It calls poll() a maximum of one time
1895 for a given self._current_task instance. This is useful
1896 since calling poll() on a task can trigger advance to
1897 the next task could eventually lead to the returncode
1898 being set in cases when polling only a single task would
1899 not have the same effect.
1904 task = self._current_task
1905 if task is None or task is prev:
1906 # don't poll the same task more than once
1911 return self.returncode
1917 task = self._current_task
1919 # don't wait for the same task more than once
1922 # Before the task.wait() method returned, an exit
1923 # listener should have set self._current_task to either
1924 # a different task or None. Something is wrong.
1925 raise AssertionError("self._current_task has not " + \
1926 "changed since calling wait", self, task)
1930 return self.returncode
1932 def _assert_current(self, task):
1934 Raises an AssertionError if the given task is not the
1935 same one as self._current_task. This can be useful
1938 if task is not self._current_task:
1939 raise AssertionError("Unrecognized task: %s" % (task,))
1941 def _default_exit(self, task):
1943 Calls _assert_current() on the given task and then sets the
1944 composite returncode attribute if task.returncode != os.EX_OK.
1945 If the task failed then self._current_task will be set to None.
1946 Subclasses can use this as a generic task exit callback.
1949 @returns: The task.returncode attribute.
1951 self._assert_current(task)
1952 if task.returncode != os.EX_OK:
1953 self.returncode = task.returncode
1954 self._current_task = None
1955 return task.returncode
1957 def _final_exit(self, task):
1959 Assumes that task is the final task of this composite task.
1960 Calls _default_exit() and sets self.returncode to the task's
1961 returncode and sets self._current_task to None.
1963 self._default_exit(task)
1964 self._current_task = None
1965 self.returncode = task.returncode
1966 return self.returncode
1968 def _default_final_exit(self, task):
1970 This calls _final_exit() and then wait().
1972 Subclasses can use this as a generic final task exit callback.
1975 self._final_exit(task)
1978 def _start_task(self, task, exit_handler):
1980 Register exit handler for the given task, set it
1981 as self._current_task, and call task.start().
1983 Subclasses can use this as a generic way to start
1987 task.addExitListener(exit_handler)
1988 self._current_task = task
1991 class TaskSequence(CompositeTask):
1993 A collection of tasks that executes sequentially. Each task
1994 must have a addExitListener() method that can be used as
1995 a means to trigger movement from one task to the next.
1998 __slots__ = ("_task_queue",)
2000 def __init__(self, **kwargs):
2001 AsynchronousTask.__init__(self, **kwargs)
2002 self._task_queue = deque()
2004 def add(self, task):
2005 self._task_queue.append(task)
2008 self._start_next_task()
2011 self._task_queue.clear()
2012 CompositeTask.cancel(self)
2014 def _start_next_task(self):
2015 self._start_task(self._task_queue.popleft(),
2016 self._task_exit_handler)
2018 def _task_exit_handler(self, task):
2019 if self._default_exit(task) != os.EX_OK:
2021 elif self._task_queue:
2022 self._start_next_task()
2024 self._final_exit(task)
2027 class SubProcess(AbstractPollTask):
2029 __slots__ = ("pid",) + \
2030 ("_files", "_reg_id")
2032 # A file descriptor is required for the scheduler to monitor changes from
2033 # inside a poll() loop. When logging is not enabled, create a pipe just to
2034 # serve this purpose alone.
2038 if self.returncode is not None:
2039 return self.returncode
2040 if self.pid is None:
2041 return self.returncode
2042 if self._registered:
2043 return self.returncode
2046 retval = os.waitpid(self.pid, os.WNOHANG)
2048 if e.errno != errno.ECHILD:
2051 retval = (self.pid, 1)
2053 if retval == (0, 0):
2055 self._set_returncode(retval)
2056 return self.returncode
2061 os.kill(self.pid, signal.SIGTERM)
2063 if e.errno != errno.ESRCH:
2067 self.cancelled = True
2068 if self.pid is not None:
2070 return self.returncode
2073 return self.pid is not None and \
2074 self.returncode is None
2078 if self.returncode is not None:
2079 return self.returncode
2081 if self._registered:
2082 self.scheduler.schedule(self._reg_id)
2084 if self.returncode is not None:
2085 return self.returncode
2088 wait_retval = os.waitpid(self.pid, 0)
2090 if e.errno != errno.ECHILD:
2093 self._set_returncode((self.pid, 1))
2095 self._set_returncode(wait_retval)
2097 return self.returncode
2099 def _unregister(self):
2101 Unregister from the scheduler and close open files.
2104 self._registered = False
2106 if self._reg_id is not None:
2107 self.scheduler.unregister(self._reg_id)
2110 if self._files is not None:
2111 for f in self._files.itervalues():
2115 def _set_returncode(self, wait_retval):
2117 retval = wait_retval[1]
2119 if retval != os.EX_OK:
2121 retval = (retval & 0xff) << 8
2123 retval = retval >> 8
2125 self.returncode = retval
2127 class SpawnProcess(SubProcess):
2130 Constructor keyword args are passed into portage.process.spawn().
2131 The required "args" keyword argument will be passed as the first
2135 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2136 "uid", "gid", "groups", "umask", "logfile",
2137 "path_lookup", "pre_exec")
2139 __slots__ = ("args",) + \
2142 _file_names = ("log", "process", "stdout")
2143 _files_dict = slot_dict_class(_file_names, prefix="")
2150 if self.fd_pipes is None:
2152 fd_pipes = self.fd_pipes
2153 fd_pipes.setdefault(0, sys.stdin.fileno())
2154 fd_pipes.setdefault(1, sys.stdout.fileno())
2155 fd_pipes.setdefault(2, sys.stderr.fileno())
2157 # flush any pending output
2158 for fd in fd_pipes.itervalues():
2159 if fd == sys.stdout.fileno():
2161 if fd == sys.stderr.fileno():
2164 logfile = self.logfile
2165 self._files = self._files_dict()
2168 master_fd, slave_fd = self._pipe(fd_pipes)
2169 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2170 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2173 fd_pipes_orig = fd_pipes.copy()
2175 # TODO: Use job control functions like tcsetpgrp() to control
2176 # access to stdin. Until then, use /dev/null so that any
2177 # attempts to read from stdin will immediately return EOF
2178 # instead of blocking indefinitely.
2179 null_input = open('/dev/null', 'rb')
2180 fd_pipes[0] = null_input.fileno()
2182 fd_pipes[0] = fd_pipes_orig[0]
2184 files.process = os.fdopen(master_fd, 'r')
2185 if logfile is not None:
2187 fd_pipes[1] = slave_fd
2188 fd_pipes[2] = slave_fd
2190 files.log = open(logfile, "a")
2191 portage.util.apply_secpass_permissions(logfile,
2192 uid=portage.portage_uid, gid=portage.portage_gid,
2195 if not self.background:
2196 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2198 output_handler = self._output_handler
2202 # Create a dummy pipe so the scheduler can monitor
2203 # the process from inside a poll() loop.
2204 fd_pipes[self._dummy_pipe_fd] = slave_fd
2206 fd_pipes[1] = slave_fd
2207 fd_pipes[2] = slave_fd
2208 output_handler = self._dummy_handler
2211 for k in self._spawn_kwarg_names:
2212 v = getattr(self, k)
2216 kwargs["fd_pipes"] = fd_pipes
2217 kwargs["returnpid"] = True
2218 kwargs.pop("logfile", None)
2220 self._reg_id = self.scheduler.register(files.process.fileno(),
2221 self._registered_events, output_handler)
2222 self._registered = True
2224 retval = self._spawn(self.args, **kwargs)
2227 if null_input is not None:
2230 if isinstance(retval, int):
2233 self.returncode = retval
2237 self.pid = retval[0]
2238 portage.process.spawned_pids.remove(self.pid)
2240 def _pipe(self, fd_pipes):
2242 @type fd_pipes: dict
2243 @param fd_pipes: pipes from which to copy terminal size if desired.
2247 def _spawn(self, args, **kwargs):
2248 return portage.process.spawn(args, **kwargs)
2250 def _output_handler(self, fd, event):
2252 buf = array.array('B')
2253 if event & PollConstants.POLLIN:
2255 buf.fromfile(files.process, self._bufsize)
2259 if not self.background:
2260 buf.tofile(files.stdout)
2261 files.stdout.flush()
2262 buf.tofile(files.log)
2268 self._unregister_if_appropriate(event)
2269 return self._registered
2271 def _dummy_handler(self, fd, event):
2273 This method is mainly interested in detecting EOF, since
2274 the only purpose of the pipe is to allow the scheduler to
2275 monitor the process from inside a poll() loop.
2278 buf = array.array('B')
2279 if event & PollConstants.POLLIN:
2281 buf.fromfile(files.process, self._bufsize)
2290 self._unregister_if_appropriate(event)
2291 return self._registered
2293 class MiscFunctionsProcess(SpawnProcess):
2295 Spawns misc-functions.sh with an existing ebuild environment.
2298 __slots__ = ("commands", "phase", "pkg", "settings")
2301 settings = self.settings
2302 settings.pop("EBUILD_PHASE", None)
2303 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2304 misc_sh_binary = os.path.join(portage_bin_path,
2305 os.path.basename(portage.const.MISC_SH_BINARY))
2307 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2308 self.logfile = settings.get("PORTAGE_LOG_FILE")
2310 portage._doebuild_exit_status_unlink(
2311 settings.get("EBUILD_EXIT_STATUS_FILE"))
2313 SpawnProcess._start(self)
2315 def _spawn(self, args, **kwargs):
2316 settings = self.settings
2317 debug = settings.get("PORTAGE_DEBUG") == "1"
2318 return portage.spawn(" ".join(args), settings,
2319 debug=debug, **kwargs)
2321 def _set_returncode(self, wait_retval):
2322 SpawnProcess._set_returncode(self, wait_retval)
2323 self.returncode = portage._doebuild_exit_status_check_and_log(
2324 self.settings, self.phase, self.returncode)
2326 class EbuildFetcher(SpawnProcess):
2328 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2333 root_config = self.pkg.root_config
2334 portdb = root_config.trees["porttree"].dbapi
2335 ebuild_path = portdb.findname(self.pkg.cpv)
2336 settings = self.config_pool.allocate()
2337 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2338 self._build_dir.lock()
2339 self._build_dir.clean()
2340 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2341 if self.logfile is None:
2342 self.logfile = settings.get("PORTAGE_LOG_FILE")
2348 # If any incremental variables have been overridden
2349 # via the environment, those values need to be passed
2350 # along here so that they are correctly considered by
2351 # the config instance in the subproccess.
2352 fetch_env = os.environ.copy()
2354 fetch_env["PORTAGE_NICENESS"] = "0"
2356 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2358 ebuild_binary = os.path.join(
2359 settings["PORTAGE_BIN_PATH"], "ebuild")
2361 fetch_args = [ebuild_binary, ebuild_path, phase]
2362 debug = settings.get("PORTAGE_DEBUG") == "1"
2364 fetch_args.append("--debug")
2366 self.args = fetch_args
2367 self.env = fetch_env
2368 SpawnProcess._start(self)
2370 def _pipe(self, fd_pipes):
2371 """When appropriate, use a pty so that fetcher progress bars,
2372 like wget has, will work properly."""
2373 if self.background or not sys.stdout.isatty():
2374 # When the output only goes to a log file,
2375 # there's no point in creating a pty.
2377 stdout_pipe = fd_pipes.get(1)
2378 got_pty, master_fd, slave_fd = \
2379 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2380 return (master_fd, slave_fd)
2382 def _set_returncode(self, wait_retval):
2383 SpawnProcess._set_returncode(self, wait_retval)
2384 # Collect elog messages that might have been
2385 # created by the pkg_nofetch phase.
2386 if self._build_dir is not None:
2387 # Skip elog messages for prefetch, in order to avoid duplicates.
2388 if not self.prefetch and self.returncode != os.EX_OK:
2390 if self.logfile is not None:
2392 elog_out = open(self.logfile, 'a')
2393 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2394 if self.logfile is not None:
2395 msg += ", Log file:"
2396 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2397 if self.logfile is not None:
2398 eerror(" '%s'" % (self.logfile,),
2399 phase="unpack", key=self.pkg.cpv, out=elog_out)
2400 if elog_out is not None:
2402 if not self.prefetch:
2403 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2404 features = self._build_dir.settings.features
2405 if self.returncode == os.EX_OK:
2406 self._build_dir.clean()
2407 self._build_dir.unlock()
2408 self.config_pool.deallocate(self._build_dir.settings)
2409 self._build_dir = None
2411 class EbuildBuildDir(SlotObject):
2413 __slots__ = ("dir_path", "pkg", "settings",
2414 "locked", "_catdir", "_lock_obj")
2416 def __init__(self, **kwargs):
2417 SlotObject.__init__(self, **kwargs)
2422 This raises an AlreadyLocked exception if lock() is called
2423 while a lock is already held. In order to avoid this, call
2424 unlock() or check whether the "locked" attribute is True
2425 or False before calling lock().
2427 if self._lock_obj is not None:
2428 raise self.AlreadyLocked((self._lock_obj,))
2430 dir_path = self.dir_path
2431 if dir_path is None:
2432 root_config = self.pkg.root_config
2433 portdb = root_config.trees["porttree"].dbapi
2434 ebuild_path = portdb.findname(self.pkg.cpv)
2435 settings = self.settings
2436 settings.setcpv(self.pkg)
2437 debug = settings.get("PORTAGE_DEBUG") == "1"
2438 use_cache = 1 # always true
2439 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2440 self.settings, debug, use_cache, portdb)
2441 dir_path = self.settings["PORTAGE_BUILDDIR"]
2443 catdir = os.path.dirname(dir_path)
2444 self._catdir = catdir
2446 portage.util.ensure_dirs(os.path.dirname(catdir),
2447 gid=portage.portage_gid,
2451 catdir_lock = portage.locks.lockdir(catdir)
2452 portage.util.ensure_dirs(catdir,
2453 gid=portage.portage_gid,
2455 self._lock_obj = portage.locks.lockdir(dir_path)
2457 self.locked = self._lock_obj is not None
2458 if catdir_lock is not None:
2459 portage.locks.unlockdir(catdir_lock)
2462 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2463 by keepwork or keeptemp in FEATURES."""
2464 settings = self.settings
2465 features = settings.features
2466 if not ("keepwork" in features or "keeptemp" in features):
2468 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2469 except EnvironmentError, e:
2470 if e.errno != errno.ENOENT:
2475 if self._lock_obj is None:
2478 portage.locks.unlockdir(self._lock_obj)
2479 self._lock_obj = None
2482 catdir = self._catdir
2485 catdir_lock = portage.locks.lockdir(catdir)
2491 if e.errno not in (errno.ENOENT,
2492 errno.ENOTEMPTY, errno.EEXIST):
2495 portage.locks.unlockdir(catdir_lock)
2497 class AlreadyLocked(portage.exception.PortageException):
2500 class EbuildBuild(CompositeTask):
2502 __slots__ = ("args_set", "config_pool", "find_blockers",
2503 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2504 "prefetcher", "settings", "world_atom") + \
2505 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2509 logger = self.logger
2512 settings = self.settings
2513 world_atom = self.world_atom
2514 root_config = pkg.root_config
2517 portdb = root_config.trees[tree].dbapi
2518 settings["EMERGE_FROM"] = pkg.type_name
2519 settings.backup_changes("EMERGE_FROM")
2521 ebuild_path = portdb.findname(self.pkg.cpv)
2522 self._ebuild_path = ebuild_path
2524 prefetcher = self.prefetcher
2525 if prefetcher is None:
2527 elif not prefetcher.isAlive():
2529 elif prefetcher.poll() is None:
2531 waiting_msg = "Fetching files " + \
2532 "in the background. " + \
2533 "To view fetch progress, run `tail -f " + \
2534 "/var/log/emerge-fetch.log` in another " + \
2536 msg_prefix = colorize("GOOD", " * ")
2537 from textwrap import wrap
2538 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2539 for line in wrap(waiting_msg, 65))
2540 if not self.background:
2541 writemsg(waiting_msg, noiselevel=-1)
2543 self._current_task = prefetcher
2544 prefetcher.addExitListener(self._prefetch_exit)
2547 self._prefetch_exit(prefetcher)
2549 def _prefetch_exit(self, prefetcher):
2553 settings = self.settings
2556 fetcher = EbuildFetchonly(
2557 fetch_all=opts.fetch_all_uri,
2558 pkg=pkg, pretend=opts.pretend,
2560 retval = fetcher.execute()
2561 self.returncode = retval
2565 fetcher = EbuildFetcher(config_pool=self.config_pool,
2566 fetchall=opts.fetch_all_uri,
2567 fetchonly=opts.fetchonly,
2568 background=self.background,
2569 pkg=pkg, scheduler=self.scheduler)
2571 self._start_task(fetcher, self._fetch_exit)
2573 def _fetch_exit(self, fetcher):
2577 fetch_failed = False
2579 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2581 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2583 if fetch_failed and fetcher.logfile is not None and \
2584 os.path.exists(fetcher.logfile):
2585 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2587 if not fetch_failed and fetcher.logfile is not None:
2588 # Fetch was successful, so remove the fetch log.
2590 os.unlink(fetcher.logfile)
2594 if fetch_failed or opts.fetchonly:
2598 logger = self.logger
2600 pkg_count = self.pkg_count
2601 scheduler = self.scheduler
2602 settings = self.settings
2603 features = settings.features
2604 ebuild_path = self._ebuild_path
2605 system_set = pkg.root_config.sets["system"]
2607 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2608 self._build_dir.lock()
2610 # Cleaning is triggered before the setup
2611 # phase, in portage.doebuild().
2612 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2613 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2614 short_msg = "emerge: (%s of %s) %s Clean" % \
2615 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2616 logger.log(msg, short_msg=short_msg)
2618 #buildsyspkg: Check if we need to _force_ binary package creation
2619 self._issyspkg = "buildsyspkg" in features and \
2620 system_set.findAtomForPackage(pkg) and \
2623 if opts.buildpkg or self._issyspkg:
2625 self._buildpkg = True
2627 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2628 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2629 short_msg = "emerge: (%s of %s) %s Compile" % \
2630 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2631 logger.log(msg, short_msg=short_msg)
2634 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2635 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2636 short_msg = "emerge: (%s of %s) %s Compile" % \
2637 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2638 logger.log(msg, short_msg=short_msg)
2640 build = EbuildExecuter(background=self.background, pkg=pkg,
2641 scheduler=scheduler, settings=settings)
2642 self._start_task(build, self._build_exit)
2644 def _unlock_builddir(self):
2645 portage.elog.elog_process(self.pkg.cpv, self.settings)
2646 self._build_dir.unlock()
2648 def _build_exit(self, build):
2649 if self._default_exit(build) != os.EX_OK:
2650 self._unlock_builddir()
2655 buildpkg = self._buildpkg
2658 self._final_exit(build)
2663 msg = ">>> This is a system package, " + \
2664 "let's pack a rescue tarball.\n"
2666 log_path = self.settings.get("PORTAGE_LOG_FILE")
2667 if log_path is not None:
2668 log_file = open(log_path, 'a')
2674 if not self.background:
2675 portage.writemsg_stdout(msg, noiselevel=-1)
2677 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2678 scheduler=self.scheduler, settings=self.settings)
2680 self._start_task(packager, self._buildpkg_exit)
2682 def _buildpkg_exit(self, packager):
2684 Released build dir lock when there is a failure or
2685 when in buildpkgonly mode. Otherwise, the lock will
2686 be released when merge() is called.
2689 if self._default_exit(packager) == os.EX_OK and \
2690 self.opts.buildpkgonly:
2691 # Need to call "clean" phase for buildpkgonly mode
2692 portage.elog.elog_process(self.pkg.cpv, self.settings)
2694 clean_phase = EbuildPhase(background=self.background,
2695 pkg=self.pkg, phase=phase,
2696 scheduler=self.scheduler, settings=self.settings,
2698 self._start_task(clean_phase, self._clean_exit)
2701 if self._final_exit(packager) != os.EX_OK or \
2702 self.opts.buildpkgonly:
2703 self._unlock_builddir()
2706 def _clean_exit(self, clean_phase):
2707 if self._final_exit(clean_phase) != os.EX_OK or \
2708 self.opts.buildpkgonly:
2709 self._unlock_builddir()
2714 Install the package and then clean up and release locks.
2715 Only call this after the build has completed successfully
2716 and neither fetchonly nor buildpkgonly mode are enabled.
2719 find_blockers = self.find_blockers
2720 ldpath_mtimes = self.ldpath_mtimes
2721 logger = self.logger
2723 pkg_count = self.pkg_count
2724 settings = self.settings
2725 world_atom = self.world_atom
2726 ebuild_path = self._ebuild_path
2729 merge = EbuildMerge(find_blockers=self.find_blockers,
2730 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2731 pkg_count=pkg_count, pkg_path=ebuild_path,
2732 scheduler=self.scheduler,
2733 settings=settings, tree=tree, world_atom=world_atom)
2735 msg = " === (%s of %s) Merging (%s::%s)" % \
2736 (pkg_count.curval, pkg_count.maxval,
2737 pkg.cpv, ebuild_path)
2738 short_msg = "emerge: (%s of %s) %s Merge" % \
2739 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2740 logger.log(msg, short_msg=short_msg)
2743 rval = merge.execute()
2745 self._unlock_builddir()
2749 class EbuildExecuter(CompositeTask):
2751 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2753 _phases = ("prepare", "configure", "compile", "test", "install")
2755 _live_eclasses = frozenset([
2765 self._tree = "porttree"
2768 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2769 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2770 self._start_task(clean_phase, self._clean_phase_exit)
2772 def _clean_phase_exit(self, clean_phase):
2774 if self._default_exit(clean_phase) != os.EX_OK:
2779 scheduler = self.scheduler
2780 settings = self.settings
2783 # This initializes PORTAGE_LOG_FILE.
2784 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2786 setup_phase = EbuildPhase(background=self.background,
2787 pkg=pkg, phase="setup", scheduler=scheduler,
2788 settings=settings, tree=self._tree)
2790 setup_phase.addExitListener(self._setup_exit)
2791 self._current_task = setup_phase
2792 self.scheduler.scheduleSetup(setup_phase)
2794 def _setup_exit(self, setup_phase):
2796 if self._default_exit(setup_phase) != os.EX_OK:
2800 unpack_phase = EbuildPhase(background=self.background,
2801 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2802 settings=self.settings, tree=self._tree)
2804 if self._live_eclasses.intersection(self.pkg.inherited):
2805 # Serialize $DISTDIR access for live ebuilds since
2806 # otherwise they can interfere with eachother.
2808 unpack_phase.addExitListener(self._unpack_exit)
2809 self._current_task = unpack_phase
2810 self.scheduler.scheduleUnpack(unpack_phase)
2813 self._start_task(unpack_phase, self._unpack_exit)
2815 def _unpack_exit(self, unpack_phase):
2817 if self._default_exit(unpack_phase) != os.EX_OK:
2821 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2824 phases = self._phases
2825 eapi = pkg.metadata["EAPI"]
2826 if eapi in ("0", "1", "2_pre1"):
2827 # skip src_prepare and src_configure
2829 elif eapi in ("2_pre2",):
2833 for phase in phases:
2834 ebuild_phases.add(EbuildPhase(background=self.background,
2835 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2836 settings=self.settings, tree=self._tree))
2838 self._start_task(ebuild_phases, self._default_final_exit)
2840 class EbuildMetadataPhase(SubProcess):
2843 Asynchronous interface for the ebuild "depend" phase which is
2844 used to extract metadata from the ebuild.
2847 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2848 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2851 _file_names = ("ebuild",)
2852 _files_dict = slot_dict_class(_file_names, prefix="")
2856 settings = self.settings
2858 ebuild_path = self.ebuild_path
2859 debug = settings.get("PORTAGE_DEBUG") == "1"
2863 if self.fd_pipes is not None:
2864 fd_pipes = self.fd_pipes.copy()
2868 fd_pipes.setdefault(0, sys.stdin.fileno())
2869 fd_pipes.setdefault(1, sys.stdout.fileno())
2870 fd_pipes.setdefault(2, sys.stderr.fileno())
2872 # flush any pending output
2873 for fd in fd_pipes.itervalues():
2874 if fd == sys.stdout.fileno():
2876 if fd == sys.stderr.fileno():
2879 fd_pipes_orig = fd_pipes.copy()
2880 self._files = self._files_dict()
2883 master_fd, slave_fd = os.pipe()
2884 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2885 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2887 fd_pipes[self._metadata_fd] = slave_fd
2889 self._raw_metadata = []
2890 files.ebuild = os.fdopen(master_fd, 'r')
2891 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2892 self._registered_events, self._output_handler)
2893 self._registered = True
2895 retval = portage.doebuild(ebuild_path, "depend",
2896 settings["ROOT"], settings, debug,
2897 mydbapi=self.portdb, tree="porttree",
2898 fd_pipes=fd_pipes, returnpid=True)
2902 if isinstance(retval, int):
2903 # doebuild failed before spawning
2905 self.returncode = retval
2909 self.pid = retval[0]
2910 portage.process.spawned_pids.remove(self.pid)
2912 def _output_handler(self, fd, event):
2914 if event & PollConstants.POLLIN:
2915 self._raw_metadata.append(files.ebuild.read())
2916 if not self._raw_metadata[-1] or event & PollConstants.POLLHUP:
2917 # Split lines here so they can be counted inside _set_returncode().
2918 self._raw_metadata = "".join(self._raw_metadata).splitlines()
2922 if self.returncode == os.EX_OK:
2923 metadata = izip(portage.auxdbkeys, self._raw_metadata)
2924 self.metadata_callback(self.cpv, self.ebuild_path,
2925 self.repo_path, metadata, self.ebuild_mtime)
2927 self._unregister_if_appropriate(event)
2928 return self._registered
2930 def _set_returncode(self, wait_retval):
2931 SubProcess._set_returncode(self, wait_retval)
2932 if self.returncode == os.EX_OK and \
2933 len(portage.auxdbkeys) != len(self._raw_metadata):
2934 # Don't trust bash's returncode if the
2935 # number of lines is incorrect.
2938 class EbuildProcess(SpawnProcess):
2940 __slots__ = ("phase", "pkg", "settings", "tree")
2943 # Don't open the log file during the clean phase since the
2944 # open file can result in an nfs lock on $T/build.log which
2945 # prevents the clean phase from removing $T.
2946 if self.phase not in ("clean", "cleanrm"):
2947 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2948 SpawnProcess._start(self)
2950 def _pipe(self, fd_pipes):
2951 stdout_pipe = fd_pipes.get(1)
2952 got_pty, master_fd, slave_fd = \
2953 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2954 return (master_fd, slave_fd)
2956 def _spawn(self, args, **kwargs):
2958 root_config = self.pkg.root_config
2960 mydbapi = root_config.trees[tree].dbapi
2961 settings = self.settings
2962 ebuild_path = settings["EBUILD"]
2963 debug = settings.get("PORTAGE_DEBUG") == "1"
2965 rval = portage.doebuild(ebuild_path, self.phase,
2966 root_config.root, settings, debug,
2967 mydbapi=mydbapi, tree=tree, **kwargs)
2971 def _set_returncode(self, wait_retval):
2972 SpawnProcess._set_returncode(self, wait_retval)
2974 if self.phase not in ("clean", "cleanrm"):
2975 self.returncode = portage._doebuild_exit_status_check_and_log(
2976 self.settings, self.phase, self.returncode)
2978 if self.phase == "test" and self.returncode != os.EX_OK and \
2979 "test-fail-continue" in self.settings.features:
2980 self.returncode = os.EX_OK
2982 portage._post_phase_userpriv_perms(self.settings)
2984 class EbuildPhase(CompositeTask):
2986 __slots__ = ("background", "pkg", "phase",
2987 "scheduler", "settings", "tree")
2989 _post_phase_cmds = portage._post_phase_cmds
2993 ebuild_process = EbuildProcess(background=self.background,
2994 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2995 settings=self.settings, tree=self.tree)
2997 self._start_task(ebuild_process, self._ebuild_exit)
2999 def _ebuild_exit(self, ebuild_process):
3001 if self.phase == "install":
3003 log_path = self.settings.get("PORTAGE_LOG_FILE")
3005 if self.background and log_path is not None:
3006 log_file = open(log_path, 'a')
3009 portage._check_build_log(self.settings, out=out)
3011 if log_file is not None:
3014 if self._default_exit(ebuild_process) != os.EX_OK:
3018 settings = self.settings
3020 if self.phase == "install":
3021 portage._post_src_install_uid_fix(settings)
3023 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3024 if post_phase_cmds is not None:
3025 post_phase = MiscFunctionsProcess(background=self.background,
3026 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3027 scheduler=self.scheduler, settings=settings)
3028 self._start_task(post_phase, self._post_phase_exit)
3031 self.returncode = ebuild_process.returncode
3032 self._current_task = None
3035 def _post_phase_exit(self, post_phase):
3036 if self._final_exit(post_phase) != os.EX_OK:
3037 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3039 self._current_task = None
3043 class EbuildBinpkg(EbuildProcess):
3045 This assumes that src_install() has successfully completed.
3047 __slots__ = ("_binpkg_tmpfile",)
3050 self.phase = "package"
3051 self.tree = "porttree"
3053 root_config = pkg.root_config
3054 portdb = root_config.trees["porttree"].dbapi
3055 bintree = root_config.trees["bintree"]
3056 ebuild_path = portdb.findname(self.pkg.cpv)
3057 settings = self.settings
3058 debug = settings.get("PORTAGE_DEBUG") == "1"
3060 bintree.prevent_collision(pkg.cpv)
3061 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3062 pkg.cpv + ".tbz2." + str(os.getpid()))
3063 self._binpkg_tmpfile = binpkg_tmpfile
3064 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3065 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3068 EbuildProcess._start(self)
3070 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3072 def _set_returncode(self, wait_retval):
3073 EbuildProcess._set_returncode(self, wait_retval)
3076 bintree = pkg.root_config.trees["bintree"]
3077 binpkg_tmpfile = self._binpkg_tmpfile
3078 if self.returncode == os.EX_OK:
3079 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3081 class EbuildMerge(SlotObject):
3083 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3084 "pkg", "pkg_count", "pkg_path", "pretend",
3085 "scheduler", "settings", "tree", "world_atom")
3088 root_config = self.pkg.root_config
3089 settings = self.settings
3090 retval = portage.merge(settings["CATEGORY"],
3091 settings["PF"], settings["D"],
3092 os.path.join(settings["PORTAGE_BUILDDIR"],
3093 "build-info"), root_config.root, settings,
3094 myebuild=settings["EBUILD"],
3095 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3096 vartree=root_config.trees["vartree"],
3097 prev_mtimes=self.ldpath_mtimes,
3098 scheduler=self.scheduler,
3099 blockers=self.find_blockers)
3101 if retval == os.EX_OK:
3102 self.world_atom(self.pkg)
3107 def _log_success(self):
3109 pkg_count = self.pkg_count
3110 pkg_path = self.pkg_path
3111 logger = self.logger
3112 if "noclean" not in self.settings.features:
3113 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3114 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3115 logger.log((" === (%s of %s) " + \
3116 "Post-Build Cleaning (%s::%s)") % \
3117 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3118 short_msg=short_msg)
3119 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3120 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3122 class PackageUninstall(AsynchronousTask):
3124 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3128 unmerge(self.pkg.root_config, self.opts, "unmerge",
3129 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3130 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3131 writemsg_level=self._writemsg_level)
3132 except UninstallFailure, e:
3133 self.returncode = e.status
3135 self.returncode = os.EX_OK
3138 def _writemsg_level(self, msg, level=0, noiselevel=0):
3140 log_path = self.settings.get("PORTAGE_LOG_FILE")
3141 background = self.background
3143 if log_path is None:
3144 if not (background and level < logging.WARNING):
3145 portage.util.writemsg_level(msg,
3146 level=level, noiselevel=noiselevel)
3149 portage.util.writemsg_level(msg,
3150 level=level, noiselevel=noiselevel)
3152 f = open(log_path, 'a')
3158 class Binpkg(CompositeTask):
3160 __slots__ = ("find_blockers",
3161 "ldpath_mtimes", "logger", "opts",
3162 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3163 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3164 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3166 def _writemsg_level(self, msg, level=0, noiselevel=0):
3168 if not self.background:
3169 portage.util.writemsg_level(msg,
3170 level=level, noiselevel=noiselevel)
3172 log_path = self.settings.get("PORTAGE_LOG_FILE")
3173 if log_path is not None:
3174 f = open(log_path, 'a')
3183 settings = self.settings
3184 settings.setcpv(pkg)
3185 self._tree = "bintree"
3186 self._bintree = self.pkg.root_config.trees[self._tree]
3187 self._verify = not self.opts.pretend
3189 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3190 "portage", pkg.category, pkg.pf)
3191 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3192 pkg=pkg, settings=settings)
3193 self._image_dir = os.path.join(dir_path, "image")
3194 self._infloc = os.path.join(dir_path, "build-info")
3195 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3196 settings["EBUILD"] = self._ebuild_path
3197 debug = settings.get("PORTAGE_DEBUG") == "1"
3198 portage.doebuild_environment(self._ebuild_path, "setup",
3199 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3201 # The prefetcher has already completed or it
3202 # could be running now. If it's running now,
3203 # wait for it to complete since it holds
3204 # a lock on the file being fetched. The
3205 # portage.locks functions are only designed
3206 # to work between separate processes. Since
3207 # the lock is held by the current process,
3208 # use the scheduler and fetcher methods to
3209 # synchronize with the fetcher.
3210 prefetcher = self.prefetcher
3211 if prefetcher is None:
3213 elif not prefetcher.isAlive():
3215 elif prefetcher.poll() is None:
3217 waiting_msg = ("Fetching '%s' " + \
3218 "in the background. " + \
3219 "To view fetch progress, run `tail -f " + \
3220 "/var/log/emerge-fetch.log` in another " + \
3221 "terminal.") % prefetcher.pkg_path
3222 msg_prefix = colorize("GOOD", " * ")
3223 from textwrap import wrap
3224 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3225 for line in wrap(waiting_msg, 65))
3226 if not self.background:
3227 writemsg(waiting_msg, noiselevel=-1)
3229 self._current_task = prefetcher
3230 prefetcher.addExitListener(self._prefetch_exit)
3233 self._prefetch_exit(prefetcher)
3235 def _prefetch_exit(self, prefetcher):
3238 pkg_count = self.pkg_count
3239 if not (self.opts.pretend or self.opts.fetchonly):
3240 self._build_dir.lock()
3242 shutil.rmtree(self._build_dir.dir_path)
3243 except EnvironmentError, e:
3244 if e.errno != errno.ENOENT:
3247 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3248 fetcher = BinpkgFetcher(background=self.background,
3249 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3250 pretend=self.opts.pretend, scheduler=self.scheduler)
3251 pkg_path = fetcher.pkg_path
3252 self._pkg_path = pkg_path
3254 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3256 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3257 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3258 short_msg = "emerge: (%s of %s) %s Fetch" % \
3259 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3260 self.logger.log(msg, short_msg=short_msg)
3261 self._start_task(fetcher, self._fetcher_exit)
3264 self._fetcher_exit(fetcher)
3266 def _fetcher_exit(self, fetcher):
3268 # The fetcher only has a returncode when
3269 # --getbinpkg is enabled.
3270 if fetcher.returncode is not None:
3271 self._fetched_pkg = True
3272 if self._default_exit(fetcher) != os.EX_OK:
3273 self._unlock_builddir()
3277 if self.opts.pretend:
3278 self._current_task = None
3279 self.returncode = os.EX_OK
3287 logfile = self.settings.get("PORTAGE_LOG_FILE")
3288 verifier = BinpkgVerifier(background=self.background,
3289 logfile=logfile, pkg=self.pkg)
3290 self._start_task(verifier, self._verifier_exit)
3293 self._verifier_exit(verifier)
3295 def _verifier_exit(self, verifier):
3296 if verifier is not None and \
3297 self._default_exit(verifier) != os.EX_OK:
3298 self._unlock_builddir()
3302 logger = self.logger
3304 pkg_count = self.pkg_count
3305 pkg_path = self._pkg_path
3307 if self._fetched_pkg:
3308 self._bintree.inject(pkg.cpv, filename=pkg_path)
3310 if self.opts.fetchonly:
3311 self._current_task = None
3312 self.returncode = os.EX_OK
3316 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3317 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3318 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3319 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3320 logger.log(msg, short_msg=short_msg)
3323 settings = self.settings
3324 ebuild_phase = EbuildPhase(background=self.background,
3325 pkg=pkg, phase=phase, scheduler=self.scheduler,
3326 settings=settings, tree=self._tree)
3328 self._start_task(ebuild_phase, self._clean_exit)
3330 def _clean_exit(self, clean_phase):
3331 if self._default_exit(clean_phase) != os.EX_OK:
3332 self._unlock_builddir()
3336 dir_path = self._build_dir.dir_path
3339 shutil.rmtree(dir_path)
3340 except (IOError, OSError), e:
3341 if e.errno != errno.ENOENT:
3345 infloc = self._infloc
3347 pkg_path = self._pkg_path
3350 for mydir in (dir_path, self._image_dir, infloc):
3351 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3352 gid=portage.data.portage_gid, mode=dir_mode)
3354 # This initializes PORTAGE_LOG_FILE.
3355 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3356 self._writemsg_level(">>> Extracting info\n")
3358 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3359 check_missing_metadata = ("CATEGORY", "PF")
3360 missing_metadata = set()
3361 for k in check_missing_metadata:
3362 v = pkg_xpak.getfile(k)
3364 missing_metadata.add(k)
3366 pkg_xpak.unpackinfo(infloc)
3367 for k in missing_metadata:
3375 f = open(os.path.join(infloc, k), 'wb')
3381 # Store the md5sum in the vdb.
3382 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3384 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3388 # This gives bashrc users an opportunity to do various things
3389 # such as remove binary packages after they're installed.
3390 settings = self.settings
3391 settings.setcpv(self.pkg)
3392 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3393 settings.backup_changes("PORTAGE_BINPKG_FILE")
3396 setup_phase = EbuildPhase(background=self.background,
3397 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3398 settings=settings, tree=self._tree)
3400 setup_phase.addExitListener(self._setup_exit)
3401 self._current_task = setup_phase
3402 self.scheduler.scheduleSetup(setup_phase)
3404 def _setup_exit(self, setup_phase):
3405 if self._default_exit(setup_phase) != os.EX_OK:
3406 self._unlock_builddir()
3410 extractor = BinpkgExtractorAsync(background=self.background,
3411 image_dir=self._image_dir,
3412 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3413 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3414 self._start_task(extractor, self._extractor_exit)
3416 def _extractor_exit(self, extractor):
3417 if self._final_exit(extractor) != os.EX_OK:
3418 self._unlock_builddir()
3419 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3423 def _unlock_builddir(self):
3424 if self.opts.pretend or self.opts.fetchonly:
3426 portage.elog.elog_process(self.pkg.cpv, self.settings)
3427 self._build_dir.unlock()
3431 # This gives bashrc users an opportunity to do various things
3432 # such as remove binary packages after they're installed.
3433 settings = self.settings
3434 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3435 settings.backup_changes("PORTAGE_BINPKG_FILE")
3437 merge = EbuildMerge(find_blockers=self.find_blockers,
3438 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3439 pkg=self.pkg, pkg_count=self.pkg_count,
3440 pkg_path=self._pkg_path, scheduler=self.scheduler,
3441 settings=settings, tree=self._tree, world_atom=self.world_atom)
3444 retval = merge.execute()
3446 settings.pop("PORTAGE_BINPKG_FILE", None)
3447 self._unlock_builddir()
3450 class BinpkgFetcher(SpawnProcess):
3452 __slots__ = ("pkg", "pretend",
3453 "locked", "pkg_path", "_lock_obj")
3455 def __init__(self, **kwargs):
3456 SpawnProcess.__init__(self, **kwargs)
3458 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3466 pretend = self.pretend
3467 bintree = pkg.root_config.trees["bintree"]
3468 settings = bintree.settings
3469 use_locks = "distlocks" in settings.features
3470 pkg_path = self.pkg_path
3473 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3476 exists = os.path.exists(pkg_path)
3477 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3478 if not (pretend or resume):
3479 # Remove existing file or broken symlink.
3485 # urljoin doesn't work correctly with
3486 # unrecognized protocols like sftp
3487 if bintree._remote_has_index:
3488 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3490 rel_uri = pkg.cpv + ".tbz2"
3491 uri = bintree._remote_base_uri.rstrip("/") + \
3492 "/" + rel_uri.lstrip("/")
3494 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3495 "/" + pkg.pf + ".tbz2"
3498 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3499 self.returncode = os.EX_OK
3503 protocol = urlparse.urlparse(uri)[0]
3504 fcmd_prefix = "FETCHCOMMAND"
3506 fcmd_prefix = "RESUMECOMMAND"
3507 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3509 fcmd = settings.get(fcmd_prefix)
3512 "DISTDIR" : os.path.dirname(pkg_path),
3514 "FILE" : os.path.basename(pkg_path)
3517 fetch_env = dict(settings.iteritems())
3518 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3519 for x in shlex.split(fcmd)]
3521 if self.fd_pipes is None:
3523 fd_pipes = self.fd_pipes
3525 # Redirect all output to stdout since some fetchers like
3526 # wget pollute stderr (if portage detects a problem then it
3527 # can send it's own message to stderr).
3528 fd_pipes.setdefault(0, sys.stdin.fileno())
3529 fd_pipes.setdefault(1, sys.stdout.fileno())
3530 fd_pipes.setdefault(2, sys.stdout.fileno())
3532 self.args = fetch_args
3533 self.env = fetch_env
3534 SpawnProcess._start(self)
3536 def _set_returncode(self, wait_retval):
3537 SpawnProcess._set_returncode(self, wait_retval)
3538 if self.returncode == os.EX_OK:
3539 # If possible, update the mtime to match the remote package if
3540 # the fetcher didn't already do it automatically.
3541 bintree = self.pkg.root_config.trees["bintree"]
3542 if bintree._remote_has_index:
3543 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3544 if remote_mtime is not None:
3546 remote_mtime = float(remote_mtime)
3551 local_mtime = os.stat(self.pkg_path).st_mtime
3555 if remote_mtime != local_mtime:
3557 os.utime(self.pkg_path,
3558 (remote_mtime, remote_mtime))
3567 This raises an AlreadyLocked exception if lock() is called
3568 while a lock is already held. In order to avoid this, call
3569 unlock() or check whether the "locked" attribute is True
3570 or False before calling lock().
3572 if self._lock_obj is not None:
3573 raise self.AlreadyLocked((self._lock_obj,))
3575 self._lock_obj = portage.locks.lockfile(
3576 self.pkg_path, wantnewlockfile=1)
3579 class AlreadyLocked(portage.exception.PortageException):
3583 if self._lock_obj is None:
3585 portage.locks.unlockfile(self._lock_obj)
3586 self._lock_obj = None
3589 class BinpkgVerifier(AsynchronousTask):
3590 __slots__ = ("logfile", "pkg",)
3594 Note: Unlike a normal AsynchronousTask.start() method,
3595 this one does all work is synchronously. The returncode
3596 attribute will be set before it returns.
3600 root_config = pkg.root_config
3601 bintree = root_config.trees["bintree"]
3603 stdout_orig = sys.stdout
3604 stderr_orig = sys.stderr
3606 if self.background and self.logfile is not None:
3607 log_file = open(self.logfile, 'a')
3609 if log_file is not None:
3610 sys.stdout = log_file
3611 sys.stderr = log_file
3613 bintree.digestCheck(pkg)
3614 except portage.exception.FileNotFound:
3615 writemsg("!!! Fetching Binary failed " + \
3616 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3618 except portage.exception.DigestException, e:
3619 writemsg("\n!!! Digest verification failed:\n",
3621 writemsg("!!! %s\n" % e.value[0],
3623 writemsg("!!! Reason: %s\n" % e.value[1],
3625 writemsg("!!! Got: %s\n" % e.value[2],
3627 writemsg("!!! Expected: %s\n" % e.value[3],
3630 if rval != os.EX_OK:
3631 pkg_path = bintree.getname(pkg.cpv)
3632 head, tail = os.path.split(pkg_path)
3633 temp_filename = portage._checksum_failure_temp_file(head, tail)
3634 writemsg("File renamed to '%s'\n" % (temp_filename,),
3637 sys.stdout = stdout_orig
3638 sys.stderr = stderr_orig
3639 if log_file is not None:
3642 self.returncode = rval
3645 class BinpkgPrefetcher(CompositeTask):
3647 __slots__ = ("pkg",) + \
3648 ("pkg_path", "_bintree",)
3651 self._bintree = self.pkg.root_config.trees["bintree"]
3652 fetcher = BinpkgFetcher(background=self.background,
3653 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3654 scheduler=self.scheduler)
3655 self.pkg_path = fetcher.pkg_path
3656 self._start_task(fetcher, self._fetcher_exit)
3658 def _fetcher_exit(self, fetcher):
3660 if self._default_exit(fetcher) != os.EX_OK:
3664 verifier = BinpkgVerifier(background=self.background,
3665 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3666 self._start_task(verifier, self._verifier_exit)
3668 def _verifier_exit(self, verifier):
3669 if self._default_exit(verifier) != os.EX_OK:
3673 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3675 self._current_task = None
3676 self.returncode = os.EX_OK
3679 class BinpkgExtractorAsync(SpawnProcess):
3681 __slots__ = ("image_dir", "pkg", "pkg_path")
3683 _shell_binary = portage.const.BASH_BINARY
3686 self.args = [self._shell_binary, "-c",
3687 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3688 (portage._shell_quote(self.pkg_path),
3689 portage._shell_quote(self.image_dir))]
3691 self.env = self.pkg.root_config.settings.environ()
3692 SpawnProcess._start(self)
3694 class MergeListItem(CompositeTask):
3697 TODO: For parallel scheduling, everything here needs asynchronous
3698 execution support (start, poll, and wait methods).
3701 __slots__ = ("args_set",
3702 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3703 "find_blockers", "logger", "mtimedb", "pkg",
3704 "pkg_count", "pkg_to_replace", "prefetcher",
3705 "settings", "statusMessage", "world_atom") + \
3711 build_opts = self.build_opts
3714 # uninstall, executed by self.merge()
3715 self.returncode = os.EX_OK
3719 args_set = self.args_set
3720 find_blockers = self.find_blockers
3721 logger = self.logger
3722 mtimedb = self.mtimedb
3723 pkg_count = self.pkg_count
3724 scheduler = self.scheduler
3725 settings = self.settings
3726 world_atom = self.world_atom
3727 ldpath_mtimes = mtimedb["ldpath"]
3729 action_desc = "Emerging"
3731 if pkg.type_name == "binary":
3732 action_desc += " binary"
3734 if build_opts.fetchonly:
3735 action_desc = "Fetching"
3737 msg = "%s (%s of %s) %s" % \
3739 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3740 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3741 colorize("GOOD", pkg.cpv))
3744 msg += " %s %s" % (preposition, pkg.root)
3746 if not build_opts.pretend:
3747 self.statusMessage(msg)
3748 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3749 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3751 if pkg.type_name == "ebuild":
3753 build = EbuildBuild(args_set=args_set,
3754 background=self.background,
3755 config_pool=self.config_pool,
3756 find_blockers=find_blockers,
3757 ldpath_mtimes=ldpath_mtimes, logger=logger,
3758 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3759 prefetcher=self.prefetcher, scheduler=scheduler,
3760 settings=settings, world_atom=world_atom)
3762 self._install_task = build
3763 self._start_task(build, self._default_final_exit)
3766 elif pkg.type_name == "binary":
3768 binpkg = Binpkg(background=self.background,
3769 find_blockers=find_blockers,
3770 ldpath_mtimes=ldpath_mtimes, logger=logger,
3771 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3772 prefetcher=self.prefetcher, settings=settings,
3773 scheduler=scheduler, world_atom=world_atom)
3775 self._install_task = binpkg
3776 self._start_task(binpkg, self._default_final_exit)
3780 self._install_task.poll()
3781 return self.returncode
3784 self._install_task.wait()
3785 return self.returncode
3790 build_opts = self.build_opts
3791 find_blockers = self.find_blockers
3792 logger = self.logger
3793 mtimedb = self.mtimedb
3794 pkg_count = self.pkg_count
3795 prefetcher = self.prefetcher
3796 scheduler = self.scheduler
3797 settings = self.settings
3798 world_atom = self.world_atom
3799 ldpath_mtimes = mtimedb["ldpath"]
3802 if not (build_opts.buildpkgonly or \
3803 build_opts.fetchonly or build_opts.pretend):
3805 uninstall = PackageUninstall(background=self.background,
3806 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3807 pkg=pkg, scheduler=scheduler, settings=settings)
3810 retval = uninstall.wait()
3811 if retval != os.EX_OK:
3815 if build_opts.fetchonly or \
3816 build_opts.buildpkgonly:
3817 return self.returncode
3819 retval = self._install_task.install()
3822 class PackageMerge(AsynchronousTask):
3824 TODO: Implement asynchronous merge so that the scheduler can
3825 run while a merge is executing.
3828 __slots__ = ("merge",)
3832 pkg = self.merge.pkg
3833 pkg_count = self.merge.pkg_count
3836 action_desc = "Uninstalling"
3837 preposition = "from"
3839 action_desc = "Installing"
3842 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3845 msg += " %s %s" % (preposition, pkg.root)
3847 if not self.merge.build_opts.fetchonly and \
3848 not self.merge.build_opts.pretend and \
3849 not self.merge.build_opts.buildpkgonly:
3850 self.merge.statusMessage(msg)
3852 self.returncode = self.merge.merge()
3855 class DependencyArg(object):
3856 def __init__(self, arg=None, root_config=None):
3858 self.root_config = root_config
3861 return str(self.arg)
3863 class AtomArg(DependencyArg):
3864 def __init__(self, atom=None, **kwargs):
3865 DependencyArg.__init__(self, **kwargs)
3867 if not isinstance(self.atom, portage.dep.Atom):
3868 self.atom = portage.dep.Atom(self.atom)
3869 self.set = (self.atom, )
3871 class PackageArg(DependencyArg):
3872 def __init__(self, package=None, **kwargs):
3873 DependencyArg.__init__(self, **kwargs)
3874 self.package = package
3875 self.atom = portage.dep.Atom("=" + package.cpv)
3876 self.set = (self.atom, )
3878 class SetArg(DependencyArg):
3879 def __init__(self, set=None, **kwargs):
3880 DependencyArg.__init__(self, **kwargs)
3882 self.name = self.arg[len(SETPREFIX):]
3884 class Dependency(SlotObject):
3885 __slots__ = ("atom", "blocker", "depth",
3886 "parent", "onlydeps", "priority", "root")
3887 def __init__(self, **kwargs):
3888 SlotObject.__init__(self, **kwargs)
3889 if self.priority is None:
3890 self.priority = DepPriority()
3891 if self.depth is None:
3894 class BlockerCache(DictMixin):
3895 """This caches blockers of installed packages so that dep_check does not
3896 have to be done for every single installed package on every invocation of
3897 emerge. The cache is invalidated whenever it is detected that something
3898 has changed that might alter the results of dep_check() calls:
3899 1) the set of installed packages (including COUNTER) has changed
3900 2) the old-style virtuals have changed
3903 # Number of uncached packages to trigger cache update, since
3904 # it's wasteful to update it for every vdb change.
3905 _cache_threshold = 5
3907 class BlockerData(object):
3909 __slots__ = ("__weakref__", "atoms", "counter")
3911 def __init__(self, counter, atoms):
3912 self.counter = counter
3915 def __init__(self, myroot, vardb):
3917 self._virtuals = vardb.settings.getvirtuals()
3918 self._cache_filename = os.path.join(myroot,
3919 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3920 self._cache_version = "1"
3921 self._cache_data = None
3922 self._modified = set()
3927 f = open(self._cache_filename)
3928 mypickle = pickle.Unpickler(f)
3929 mypickle.find_global = None
3930 self._cache_data = mypickle.load()
3933 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3934 if isinstance(e, pickle.UnpicklingError):
3935 writemsg("!!! Error loading '%s': %s\n" % \
3936 (self._cache_filename, str(e)), noiselevel=-1)
3939 cache_valid = self._cache_data and \
3940 isinstance(self._cache_data, dict) and \
3941 self._cache_data.get("version") == self._cache_version and \
3942 isinstance(self._cache_data.get("blockers"), dict)
3944 # Validate all the atoms and counters so that
3945 # corruption is detected as soon as possible.
3946 invalid_items = set()
3947 for k, v in self._cache_data["blockers"].iteritems():
3948 if not isinstance(k, basestring):
3949 invalid_items.add(k)
3952 if portage.catpkgsplit(k) is None:
3953 invalid_items.add(k)
3955 except portage.exception.InvalidData:
3956 invalid_items.add(k)
3958 if not isinstance(v, tuple) or \
3960 invalid_items.add(k)
3963 if not isinstance(counter, (int, long)):
3964 invalid_items.add(k)
3966 if not isinstance(atoms, (list, tuple)):
3967 invalid_items.add(k)
3969 invalid_atom = False
3971 if not isinstance(atom, basestring):
3974 if atom[:1] != "!" or \
3975 not portage.isvalidatom(
3976 atom, allow_blockers=True):
3980 invalid_items.add(k)
3983 for k in invalid_items:
3984 del self._cache_data["blockers"][k]
3985 if not self._cache_data["blockers"]:
3989 self._cache_data = {"version":self._cache_version}
3990 self._cache_data["blockers"] = {}
3991 self._cache_data["virtuals"] = self._virtuals
3992 self._modified.clear()
3995 """If the current user has permission and the internal blocker cache
3996 been updated, save it to disk and mark it unmodified. This is called
3997 by emerge after it has proccessed blockers for all installed packages.
3998 Currently, the cache is only written if the user has superuser
3999 privileges (since that's required to obtain a lock), but all users
4000 have read access and benefit from faster blocker lookups (as long as
4001 the entire cache is still valid). The cache is stored as a pickled
4002 dict object with the following format:
4006 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4007 "virtuals" : vardb.settings.getvirtuals()
4010 if len(self._modified) >= self._cache_threshold and \
4013 f = portage.util.atomic_ofstream(self._cache_filename)
4014 pickle.dump(self._cache_data, f, -1)
4016 portage.util.apply_secpass_permissions(
4017 self._cache_filename, gid=portage.portage_gid, mode=0644)
4018 except (IOError, OSError), e:
4020 self._modified.clear()
4022 def __setitem__(self, cpv, blocker_data):
4024 Update the cache and mark it as modified for a future call to
4027 @param cpv: Package for which to cache blockers.
4029 @param blocker_data: An object with counter and atoms attributes.
4030 @type blocker_data: BlockerData
4032 self._cache_data["blockers"][cpv] = \
4033 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4034 self._modified.add(cpv)
4037 if self._cache_data is None:
4038 # triggered by python-trace
4040 return iter(self._cache_data["blockers"])
4042 def __delitem__(self, cpv):
4043 del self._cache_data["blockers"][cpv]
4045 def __getitem__(self, cpv):
4048 @returns: An object with counter and atoms attributes.
4050 return self.BlockerData(*self._cache_data["blockers"][cpv])
4053 """This needs to be implemented so that self.__repr__() doesn't raise
4054 an AttributeError."""
4057 class BlockerDB(object):
4059 def __init__(self, root_config):
4060 self._root_config = root_config
4061 self._vartree = root_config.trees["vartree"]
4062 self._portdb = root_config.trees["porttree"].dbapi
4064 self._dep_check_trees = None
4065 self._fake_vartree = None
4067 def _get_fake_vartree(self, acquire_lock=0):
4068 fake_vartree = self._fake_vartree
4069 if fake_vartree is None:
4070 fake_vartree = FakeVartree(self._root_config,
4071 acquire_lock=acquire_lock)
4072 self._fake_vartree = fake_vartree
4073 self._dep_check_trees = { self._vartree.root : {
4074 "porttree" : fake_vartree,
4075 "vartree" : fake_vartree,
4078 fake_vartree.sync(acquire_lock=acquire_lock)
4081 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4082 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4083 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4084 settings = self._vartree.settings
4085 stale_cache = set(blocker_cache)
4086 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4087 dep_check_trees = self._dep_check_trees
4088 vardb = fake_vartree.dbapi
4089 installed_pkgs = list(vardb)
4091 for inst_pkg in installed_pkgs:
4092 stale_cache.discard(inst_pkg.cpv)
4093 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4094 if cached_blockers is not None and \
4095 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4096 cached_blockers = None
4097 if cached_blockers is not None:
4098 blocker_atoms = cached_blockers.atoms
4100 # Use aux_get() to trigger FakeVartree global
4101 # updates on *DEPEND when appropriate.
4102 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4104 portage.dep._dep_check_strict = False
4105 success, atoms = portage.dep_check(depstr,
4106 vardb, settings, myuse=inst_pkg.use.enabled,
4107 trees=dep_check_trees, myroot=inst_pkg.root)
4109 portage.dep._dep_check_strict = True
4111 pkg_location = os.path.join(inst_pkg.root,
4112 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4113 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4114 (pkg_location, atoms), noiselevel=-1)
4117 blocker_atoms = [atom for atom in atoms \
4118 if atom.startswith("!")]
4119 blocker_atoms.sort()
4120 counter = long(inst_pkg.metadata["COUNTER"])
4121 blocker_cache[inst_pkg.cpv] = \
4122 blocker_cache.BlockerData(counter, blocker_atoms)
4123 for cpv in stale_cache:
4124 del blocker_cache[cpv]
4125 blocker_cache.flush()
4127 blocker_parents = digraph()
4129 for pkg in installed_pkgs:
4130 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4131 blocker_atom = blocker_atom.lstrip("!")
4132 blocker_atoms.append(blocker_atom)
4133 blocker_parents.add(blocker_atom, pkg)
4135 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4136 blocking_pkgs = set()
4137 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4138 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4140 # Check for blockers in the other direction.
4141 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4143 portage.dep._dep_check_strict = False
4144 success, atoms = portage.dep_check(depstr,
4145 vardb, settings, myuse=new_pkg.use.enabled,
4146 trees=dep_check_trees, myroot=new_pkg.root)
4148 portage.dep._dep_check_strict = True
4150 # We should never get this far with invalid deps.
4151 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4154 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4157 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4158 for inst_pkg in installed_pkgs:
4160 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4161 except (portage.exception.InvalidDependString, StopIteration):
4163 blocking_pkgs.add(inst_pkg)
4165 return blocking_pkgs
4167 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4169 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4170 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4171 p_type, p_root, p_key, p_status = parent_node
4173 if p_status == "nomerge":
4174 category, pf = portage.catsplit(p_key)
4175 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4176 msg.append("Portage is unable to process the dependencies of the ")
4177 msg.append("'%s' package. " % p_key)
4178 msg.append("In order to correct this problem, the package ")
4179 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4180 msg.append("As a temporary workaround, the --nodeps option can ")
4181 msg.append("be used to ignore all dependencies. For reference, ")
4182 msg.append("the problematic dependencies can be found in the ")
4183 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4185 msg.append("This package can not be installed. ")
4186 msg.append("Please notify the '%s' package maintainer " % p_key)
4187 msg.append("about this problem.")
4189 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4190 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4192 class PackageVirtualDbapi(portage.dbapi):
4194 A dbapi-like interface class that represents the state of the installed
4195 package database as new packages are installed, replacing any packages
4196 that previously existed in the same slot. The main difference between
4197 this class and fakedbapi is that this one uses Package instances
4198 internally (passed in via cpv_inject() and cpv_remove() calls).
4200 def __init__(self, settings):
4201 portage.dbapi.__init__(self)
4202 self.settings = settings
4203 self._match_cache = {}
4209 Remove all packages.
4213 self._cp_map.clear()
4214 self._cpv_map.clear()
4217 obj = PackageVirtualDbapi(self.settings)
4218 obj._match_cache = self._match_cache.copy()
4219 obj._cp_map = self._cp_map.copy()
4220 for k, v in obj._cp_map.iteritems():
4221 obj._cp_map[k] = v[:]
4222 obj._cpv_map = self._cpv_map.copy()
4226 return self._cpv_map.itervalues()
4228 def __contains__(self, item):
4229 existing = self._cpv_map.get(item.cpv)
4230 if existing is not None and \
4235 def get(self, item, default=None):
4236 cpv = getattr(item, "cpv", None)
4240 type_name, root, cpv, operation = item
4242 existing = self._cpv_map.get(cpv)
4243 if existing is not None and \
4248 def match_pkgs(self, atom):
4249 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4251 def _clear_cache(self):
4252 if self._categories is not None:
4253 self._categories = None
4254 if self._match_cache:
4255 self._match_cache = {}
4257 def match(self, origdep, use_cache=1):
4258 result = self._match_cache.get(origdep)
4259 if result is not None:
4261 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4262 self._match_cache[origdep] = result
4265 def cpv_exists(self, cpv):
4266 return cpv in self._cpv_map
4268 def cp_list(self, mycp, use_cache=1):
4269 cachelist = self._match_cache.get(mycp)
4270 # cp_list() doesn't expand old-style virtuals
4271 if cachelist and cachelist[0].startswith(mycp):
4273 cpv_list = self._cp_map.get(mycp)
4274 if cpv_list is None:
4277 cpv_list = [pkg.cpv for pkg in cpv_list]
4278 self._cpv_sort_ascending(cpv_list)
4279 if not (not cpv_list and mycp.startswith("virtual/")):
4280 self._match_cache[mycp] = cpv_list
4284 return list(self._cp_map)
4287 return list(self._cpv_map)
4289 def cpv_inject(self, pkg):
4290 cp_list = self._cp_map.get(pkg.cp)
4293 self._cp_map[pkg.cp] = cp_list
4294 e_pkg = self._cpv_map.get(pkg.cpv)
4295 if e_pkg is not None:
4298 self.cpv_remove(e_pkg)
4299 for e_pkg in cp_list:
4300 if e_pkg.slot_atom == pkg.slot_atom:
4303 self.cpv_remove(e_pkg)
4306 self._cpv_map[pkg.cpv] = pkg
4309 def cpv_remove(self, pkg):
4310 old_pkg = self._cpv_map.get(pkg.cpv)
4313 self._cp_map[pkg.cp].remove(pkg)
4314 del self._cpv_map[pkg.cpv]
4317 def aux_get(self, cpv, wants):
4318 metadata = self._cpv_map[cpv].metadata
4319 return [metadata.get(x, "") for x in wants]
4321 def aux_update(self, cpv, values):
4322 self._cpv_map[cpv].metadata.update(values)
4325 class depgraph(object):
4327 pkg_tree_map = RootConfig.pkg_tree_map
4329 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4331 def __init__(self, settings, trees, myopts, myparams, spinner):
4332 self.settings = settings
4333 self.target_root = settings["ROOT"]
4334 self.myopts = myopts
4335 self.myparams = myparams
4337 if settings.get("PORTAGE_DEBUG", "") == "1":
4339 self.spinner = spinner
4340 self._running_root = trees["/"]["root_config"]
4341 self._opts_no_restart = Scheduler._opts_no_restart
4342 self.pkgsettings = {}
4343 # Maps slot atom to package for each Package added to the graph.
4344 self._slot_pkg_map = {}
4345 # Maps nodes to the reasons they were selected for reinstallation.
4346 self._reinstall_nodes = {}
4349 self._trees_orig = trees
4351 # Contains a filtered view of preferred packages that are selected
4352 # from available repositories.
4353 self._filtered_trees = {}
4354 # Contains installed packages and new packages that have been added
4356 self._graph_trees = {}
4357 # All Package instances
4358 self._pkg_cache = {}
4359 for myroot in trees:
4360 self.trees[myroot] = {}
4361 # Create a RootConfig instance that references
4362 # the FakeVartree instead of the real one.
4363 self.roots[myroot] = RootConfig(
4364 trees[myroot]["vartree"].settings,
4366 trees[myroot]["root_config"].setconfig)
4367 for tree in ("porttree", "bintree"):
4368 self.trees[myroot][tree] = trees[myroot][tree]
4369 self.trees[myroot]["vartree"] = \
4370 FakeVartree(trees[myroot]["root_config"],
4371 pkg_cache=self._pkg_cache)
4372 self.pkgsettings[myroot] = portage.config(
4373 clone=self.trees[myroot]["vartree"].settings)
4374 self._slot_pkg_map[myroot] = {}
4375 vardb = self.trees[myroot]["vartree"].dbapi
4376 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4377 "--buildpkgonly" not in self.myopts
4378 # This fakedbapi instance will model the state that the vdb will
4379 # have after new packages have been installed.
4380 fakedb = PackageVirtualDbapi(vardb.settings)
4381 if preload_installed_pkgs:
4383 self.spinner.update()
4384 # This triggers metadata updates via FakeVartree.
4385 vardb.aux_get(pkg.cpv, [])
4386 fakedb.cpv_inject(pkg)
4388 # Now that the vardb state is cached in our FakeVartree,
4389 # we won't be needing the real vartree cache for awhile.
4390 # To make some room on the heap, clear the vardbapi
4392 trees[myroot]["vartree"].dbapi._clear_cache()
4395 self.mydbapi[myroot] = fakedb
4398 graph_tree.dbapi = fakedb
4399 self._graph_trees[myroot] = {}
4400 self._filtered_trees[myroot] = {}
4401 # Substitute the graph tree for the vartree in dep_check() since we
4402 # want atom selections to be consistent with package selections
4403 # have already been made.
4404 self._graph_trees[myroot]["porttree"] = graph_tree
4405 self._graph_trees[myroot]["vartree"] = graph_tree
4406 def filtered_tree():
4408 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4409 self._filtered_trees[myroot]["porttree"] = filtered_tree
4411 # Passing in graph_tree as the vartree here could lead to better
4412 # atom selections in some cases by causing atoms for packages that
4413 # have been added to the graph to be preferred over other choices.
4414 # However, it can trigger atom selections that result in
4415 # unresolvable direct circular dependencies. For example, this
4416 # happens with gwydion-dylan which depends on either itself or
4417 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4418 # gwydion-dylan-bin needs to be selected in order to avoid a
4419 # an unresolvable direct circular dependency.
4421 # To solve the problem described above, pass in "graph_db" so that
4422 # packages that have been added to the graph are distinguishable
4423 # from other available packages and installed packages. Also, pass
4424 # the parent package into self._select_atoms() calls so that
4425 # unresolvable direct circular dependencies can be detected and
4426 # avoided when possible.
4427 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4428 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4431 portdb = self.trees[myroot]["porttree"].dbapi
4432 bindb = self.trees[myroot]["bintree"].dbapi
4433 vardb = self.trees[myroot]["vartree"].dbapi
4434 # (db, pkg_type, built, installed, db_keys)
4435 if "--usepkgonly" not in self.myopts:
4436 db_keys = list(portdb._aux_cache_keys)
4437 dbs.append((portdb, "ebuild", False, False, db_keys))
4438 if "--usepkg" in self.myopts:
4439 db_keys = list(bindb._aux_cache_keys)
4440 dbs.append((bindb, "binary", True, False, db_keys))
4441 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4442 dbs.append((vardb, "installed", True, True, db_keys))
4443 self._filtered_trees[myroot]["dbs"] = dbs
4444 if "--usepkg" in self.myopts:
4445 self.trees[myroot]["bintree"].populate(
4446 "--getbinpkg" in self.myopts,
4447 "--getbinpkgonly" in self.myopts)
4450 self.digraph=portage.digraph()
4451 # contains all sets added to the graph
4453 # contains atoms given as arguments
4454 self._sets["args"] = InternalPackageSet()
4455 # contains all atoms from all sets added to the graph, including
4456 # atoms given as arguments
4457 self._set_atoms = InternalPackageSet()
4458 self._atom_arg_map = {}
4459 # contains all nodes pulled in by self._set_atoms
4460 self._set_nodes = set()
4461 # Contains only Blocker -> Uninstall edges
4462 self._blocker_uninstalls = digraph()
4463 # Contains only Package -> Blocker edges
4464 self._blocker_parents = digraph()
4465 # Contains only irrelevant Package -> Blocker edges
4466 self._irrelevant_blockers = digraph()
4467 # Contains only unsolvable Package -> Blocker edges
4468 self._unsolvable_blockers = digraph()
4469 self._slot_collision_info = {}
4470 # Slot collision nodes are not allowed to block other packages since
4471 # blocker validation is only able to account for one package per slot.
4472 self._slot_collision_nodes = set()
4473 self._parent_atoms = {}
4474 self._slot_conflict_parent_atoms = set()
4475 self._serialized_tasks_cache = None
4476 self._scheduler_graph = None
4477 self._displayed_list = None
4478 self._pprovided_args = []
4479 self._missing_args = []
4480 self._masked_installed = set()
4481 self._unsatisfied_deps_for_display = []
4482 self._unsatisfied_blockers_for_display = None
4483 self._circular_deps_for_display = None
4484 self._dep_stack = []
4485 self._unsatisfied_deps = []
4486 self._initially_unsatisfied_deps = []
4487 self._ignored_deps = []
4488 self._required_set_names = set(["system", "world"])
4489 self._select_atoms = self._select_atoms_highest_available
4490 self._select_package = self._select_pkg_highest_available
4491 self._highest_pkg_cache = {}
4493 def _show_slot_collision_notice(self):
4494 """Show an informational message advising the user to mask one of the
4495 the packages. In some cases it may be possible to resolve this
4496 automatically, but support for backtracking (removal nodes that have
4497 already been selected) will be required in order to handle all possible
4501 if not self._slot_collision_info:
4504 self._show_merge_list()
4507 msg.append("\n!!! Multiple package instances within a single " + \
4508 "package slot have been pulled\n")
4509 msg.append("!!! into the dependency graph, resulting" + \
4510 " in a slot conflict:\n\n")
4512 # Max number of parents shown, to avoid flooding the display.
4514 explanation_columns = 70
4516 for (slot_atom, root), slot_nodes \
4517 in self._slot_collision_info.iteritems():
4518 msg.append(str(slot_atom))
4521 for node in slot_nodes:
4523 msg.append(str(node))
4524 parent_atoms = self._parent_atoms.get(node)
4527 # Prefer conflict atoms over others.
4528 for parent_atom in parent_atoms:
4529 if len(pruned_list) >= max_parents:
4531 if parent_atom in self._slot_conflict_parent_atoms:
4532 pruned_list.add(parent_atom)
4534 # If this package was pulled in by conflict atoms then
4535 # show those alone since those are the most interesting.
4537 # When generating the pruned list, prefer instances
4538 # of DependencyArg over instances of Package.
4539 for parent_atom in parent_atoms:
4540 if len(pruned_list) >= max_parents:
4542 parent, atom = parent_atom
4543 if isinstance(parent, DependencyArg):
4544 pruned_list.add(parent_atom)
4545 # Prefer Packages instances that themselves have been
4546 # pulled into collision slots.
4547 for parent_atom in parent_atoms:
4548 if len(pruned_list) >= max_parents:
4550 parent, atom = parent_atom
4551 if isinstance(parent, Package) and \
4552 (parent.slot_atom, parent.root) \
4553 in self._slot_collision_info:
4554 pruned_list.add(parent_atom)
4555 for parent_atom in parent_atoms:
4556 if len(pruned_list) >= max_parents:
4558 pruned_list.add(parent_atom)
4559 omitted_parents = len(parent_atoms) - len(pruned_list)
4560 parent_atoms = pruned_list
4561 msg.append(" pulled in by\n")
4562 for parent_atom in parent_atoms:
4563 parent, atom = parent_atom
4564 msg.append(2*indent)
4565 if isinstance(parent,
4566 (PackageArg, AtomArg)):
4567 # For PackageArg and AtomArg types, it's
4568 # redundant to display the atom attribute.
4569 msg.append(str(parent))
4571 # Display the specific atom from SetArg or
4573 msg.append("%s required by %s" % (atom, parent))
4576 msg.append(2*indent)
4577 msg.append("(and %d more)\n" % omitted_parents)
4579 msg.append(" (no parents)\n")
4581 explanation = self._slot_conflict_explanation(slot_nodes)
4584 msg.append(indent + "Explanation:\n\n")
4585 for line in textwrap.wrap(explanation, explanation_columns):
4586 msg.append(2*indent + line + "\n")
4589 sys.stderr.write("".join(msg))
4592 explanations_for_all = explanations == len(self._slot_collision_info)
4594 if explanations_for_all or "--quiet" in self.myopts:
4598 msg.append("It may be possible to solve this problem ")
4599 msg.append("by using package.mask to prevent one of ")
4600 msg.append("those packages from being selected. ")
4601 msg.append("However, it is also possible that conflicting ")
4602 msg.append("dependencies exist such that they are impossible to ")
4603 msg.append("satisfy simultaneously. If such a conflict exists in ")
4604 msg.append("the dependencies of two different packages, then those ")
4605 msg.append("packages can not be installed simultaneously.")
4607 from formatter import AbstractFormatter, DumbWriter
4608 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4610 f.add_flowing_data(x)
4614 msg.append("For more information, see MASKED PACKAGES ")
4615 msg.append("section in the emerge man page or refer ")
4616 msg.append("to the Gentoo Handbook.")
4618 f.add_flowing_data(x)
4622 def _slot_conflict_explanation(self, slot_nodes):
4624 When a slot conflict occurs due to USE deps, there are a few
4625 different cases to consider:
4627 1) New USE are correctly set but --newuse wasn't requested so an
4628 installed package with incorrect USE happened to get pulled
4629 into graph before the new one.
4631 2) New USE are incorrectly set but an installed package has correct
4632 USE so it got pulled into the graph, and a new instance also got
4633 pulled in due to --newuse or an upgrade.
4635 3) Multiple USE deps exist that can't be satisfied simultaneously,
4636 and multiple package instances got pulled into the same slot to
4637 satisfy the conflicting deps.
4639 Currently, explanations and suggested courses of action are generated
4640 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4643 if len(slot_nodes) != 2:
4644 # Suggestions are only implemented for
4645 # conflicts between two packages.
4648 all_conflict_atoms = self._slot_conflict_parent_atoms
4650 matched_atoms = None
4651 unmatched_node = None
4652 for node in slot_nodes:
4653 parent_atoms = self._parent_atoms.get(node)
4654 if not parent_atoms:
4655 # Normally, there are always parent atoms. If there are
4656 # none then something unexpected is happening and there's
4657 # currently no suggestion for this case.
4659 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4660 for parent_atom in conflict_atoms:
4661 parent, atom = parent_atom
4663 # Suggestions are currently only implemented for cases
4664 # in which all conflict atoms have USE deps.
4667 if matched_node is not None:
4668 # If conflict atoms match multiple nodes
4669 # then there's no suggestion.
4672 matched_atoms = conflict_atoms
4674 if unmatched_node is not None:
4675 # Neither node is matched by conflict atoms, and
4676 # there is no suggestion for this case.
4678 unmatched_node = node
4680 if matched_node is None or unmatched_node is None:
4681 # This shouldn't happen.
4684 if unmatched_node.installed and not matched_node.installed:
4685 return "New USE are correctly set, but --newuse wasn't" + \
4686 " requested, so an installed package with incorrect USE " + \
4687 "happened to get pulled into the dependency graph. " + \
4688 "In order to solve " + \
4689 "this, either specify the --newuse option or explicitly " + \
4690 " reinstall '%s'." % matched_node.slot_atom
4692 if matched_node.installed and not unmatched_node.installed:
4693 atoms = sorted(set(atom for parent, atom in matched_atoms))
4694 explanation = ("New USE for '%s' are incorrectly set. " + \
4695 "In order to solve this, adjust USE to satisfy '%s'") % \
4696 (matched_node.slot_atom, atoms[0])
4698 for atom in atoms[1:-1]:
4699 explanation += ", '%s'" % (atom,)
4702 explanation += " and '%s'" % (atoms[-1],)
4708 def _process_slot_conflicts(self):
4710 Process slot conflict data to identify specific atoms which
4711 lead to conflict. These atoms only match a subset of the
4712 packages that have been pulled into a given slot.
4714 for (slot_atom, root), slot_nodes \
4715 in self._slot_collision_info.iteritems():
4717 all_parent_atoms = set()
4718 for pkg in slot_nodes:
4719 parent_atoms = self._parent_atoms.get(pkg)
4720 if not parent_atoms:
4722 all_parent_atoms.update(parent_atoms)
4724 for pkg in slot_nodes:
4725 parent_atoms = self._parent_atoms.get(pkg)
4726 if parent_atoms is None:
4727 parent_atoms = set()
4728 self._parent_atoms[pkg] = parent_atoms
4729 for parent_atom in all_parent_atoms:
4730 if parent_atom in parent_atoms:
4732 # Use package set for matching since it will match via
4733 # PROVIDE when necessary, while match_from_list does not.
4734 parent, atom = parent_atom
4735 atom_set = InternalPackageSet(
4736 initial_atoms=(atom,))
4737 if atom_set.findAtomForPackage(pkg):
4738 parent_atoms.add(parent_atom)
4740 self._slot_conflict_parent_atoms.add(parent_atom)
4742 def _reinstall_for_flags(self, forced_flags,
4743 orig_use, orig_iuse, cur_use, cur_iuse):
4744 """Return a set of flags that trigger reinstallation, or None if there
4745 are no such flags."""
4746 if "--newuse" in self.myopts:
4747 flags = set(orig_iuse.symmetric_difference(
4748 cur_iuse).difference(forced_flags))
4749 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4750 cur_iuse.intersection(cur_use)))
4753 elif "changed-use" == self.myopts.get("--reinstall"):
4754 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4755 cur_iuse.intersection(cur_use))
4760 def _create_graph(self, allow_unsatisfied=False):
4761 dep_stack = self._dep_stack
4763 self.spinner.update()
4764 dep = dep_stack.pop()
4765 if isinstance(dep, Package):
4766 if not self._add_pkg_deps(dep,
4767 allow_unsatisfied=allow_unsatisfied):
4770 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4774 def _add_dep(self, dep, allow_unsatisfied=False):
4775 debug = "--debug" in self.myopts
4776 buildpkgonly = "--buildpkgonly" in self.myopts
4777 nodeps = "--nodeps" in self.myopts
4778 empty = "empty" in self.myparams
4779 deep = "deep" in self.myparams
4780 update = "--update" in self.myopts and dep.depth <= 1
4782 if not buildpkgonly and \
4784 dep.parent not in self._slot_collision_nodes:
4785 if dep.parent.onlydeps:
4786 # It's safe to ignore blockers if the
4787 # parent is an --onlydeps node.
4789 # The blocker applies to the root where
4790 # the parent is or will be installed.
4791 blocker = Blocker(atom=dep.atom,
4792 eapi=dep.parent.metadata["EAPI"],
4793 root=dep.parent.root)
4794 self._blocker_parents.add(blocker, dep.parent)
4796 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4797 onlydeps=dep.onlydeps)
4799 if allow_unsatisfied:
4800 self._unsatisfied_deps.append(dep)
4802 self._unsatisfied_deps_for_display.append(
4803 ((dep.root, dep.atom), {"myparent":dep.parent}))
4805 # In some cases, dep_check will return deps that shouldn't
4806 # be proccessed any further, so they are identified and
4807 # discarded here. Try to discard as few as possible since
4808 # discarded dependencies reduce the amount of information
4809 # available for optimization of merge order.
4810 if dep.priority.satisfied and \
4811 not (existing_node or empty or deep or update):
4813 if dep.root == self.target_root:
4815 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4816 except StopIteration:
4818 except portage.exception.InvalidDependString:
4819 if not dep_pkg.installed:
4820 # This shouldn't happen since the package
4821 # should have been masked.
4824 self._ignored_deps.append(dep)
4827 if not self._add_pkg(dep_pkg, dep):
4831 def _add_pkg(self, pkg, dep):
4838 myparent = dep.parent
4839 priority = dep.priority
4841 if priority is None:
4842 priority = DepPriority()
4844 Fills the digraph with nodes comprised of packages to merge.
4845 mybigkey is the package spec of the package to merge.
4846 myparent is the package depending on mybigkey ( or None )
4847 addme = Should we add this package to the digraph or are we just looking at it's deps?
4848 Think --onlydeps, we need to ignore packages in that case.
4851 #IUSE-aware emerge -> USE DEP aware depgraph
4852 #"no downgrade" emerge
4854 # Ensure that the dependencies of the same package
4855 # are never processed more than once.
4856 previously_added = pkg in self.digraph
4858 # select the correct /var database that we'll be checking against
4859 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4860 pkgsettings = self.pkgsettings[pkg.root]
4865 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4866 except portage.exception.InvalidDependString, e:
4867 if not pkg.installed:
4868 show_invalid_depstring_notice(
4869 pkg, pkg.metadata["PROVIDE"], str(e))
4873 if not pkg.onlydeps:
4874 if not pkg.installed and \
4875 "empty" not in self.myparams and \
4876 vardbapi.match(pkg.slot_atom):
4877 # Increase the priority of dependencies on packages that
4878 # are being rebuilt. This optimizes merge order so that
4879 # dependencies are rebuilt/updated as soon as possible,
4880 # which is needed especially when emerge is called by
4881 # revdep-rebuild since dependencies may be affected by ABI
4882 # breakage that has rendered them useless. Don't adjust
4883 # priority here when in "empty" mode since all packages
4884 # are being merged in that case.
4885 priority.rebuild = True
4887 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4888 slot_collision = False
4890 existing_node_matches = pkg.cpv == existing_node.cpv
4891 if existing_node_matches and \
4892 pkg != existing_node and \
4893 dep.atom is not None:
4894 # Use package set for matching since it will match via
4895 # PROVIDE when necessary, while match_from_list does not.
4896 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4897 if not atom_set.findAtomForPackage(existing_node):
4898 existing_node_matches = False
4899 if existing_node_matches:
4900 # The existing node can be reused.
4902 for parent_atom in arg_atoms:
4903 parent, atom = parent_atom
4904 self.digraph.add(existing_node, parent,
4906 self._add_parent_atom(existing_node, parent_atom)
4907 # If a direct circular dependency is not an unsatisfied
4908 # buildtime dependency then drop it here since otherwise
4909 # it can skew the merge order calculation in an unwanted
4911 if existing_node != myparent or \
4912 (priority.buildtime and not priority.satisfied):
4913 self.digraph.addnode(existing_node, myparent,
4915 if dep.atom is not None and dep.parent is not None:
4916 self._add_parent_atom(existing_node,
4917 (dep.parent, dep.atom))
4921 # A slot collision has occurred. Sometimes this coincides
4922 # with unresolvable blockers, so the slot collision will be
4923 # shown later if there are no unresolvable blockers.
4924 self._add_slot_conflict(pkg)
4925 slot_collision = True
4928 # Now add this node to the graph so that self.display()
4929 # can show use flags and --tree portage.output. This node is
4930 # only being partially added to the graph. It must not be
4931 # allowed to interfere with the other nodes that have been
4932 # added. Do not overwrite data for existing nodes in
4933 # self.mydbapi since that data will be used for blocker
4935 # Even though the graph is now invalid, continue to process
4936 # dependencies so that things like --fetchonly can still
4937 # function despite collisions.
4940 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4941 self.mydbapi[pkg.root].cpv_inject(pkg)
4943 if not pkg.installed:
4944 # Allow this package to satisfy old-style virtuals in case it
4945 # doesn't already. Any pre-existing providers will be preferred
4948 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4949 # For consistency, also update the global virtuals.
4950 settings = self.roots[pkg.root].settings
4952 settings.setinst(pkg.cpv, pkg.metadata)
4954 except portage.exception.InvalidDependString, e:
4955 show_invalid_depstring_notice(
4956 pkg, pkg.metadata["PROVIDE"], str(e))
4961 self._set_nodes.add(pkg)
4963 # Do this even when addme is False (--onlydeps) so that the
4964 # parent/child relationship is always known in case
4965 # self._show_slot_collision_notice() needs to be called later.
4966 self.digraph.add(pkg, myparent, priority=priority)
4967 if dep.atom is not None and dep.parent is not None:
4968 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4971 for parent_atom in arg_atoms:
4972 parent, atom = parent_atom
4973 self.digraph.add(pkg, parent, priority=priority)
4974 self._add_parent_atom(pkg, parent_atom)
4976 """ This section determines whether we go deeper into dependencies or not.
4977 We want to go deeper on a few occasions:
4978 Installing package A, we need to make sure package A's deps are met.
4979 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4980 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4982 dep_stack = self._dep_stack
4983 if "recurse" not in self.myparams:
4985 elif pkg.installed and \
4986 "deep" not in self.myparams:
4987 dep_stack = self._ignored_deps
4989 self.spinner.update()
4994 if not previously_added:
4995 dep_stack.append(pkg)
4998 def _add_parent_atom(self, pkg, parent_atom):
4999 parent_atoms = self._parent_atoms.get(pkg)
5000 if parent_atoms is None:
5001 parent_atoms = set()
5002 self._parent_atoms[pkg] = parent_atoms
5003 parent_atoms.add(parent_atom)
5005 def _add_slot_conflict(self, pkg):
5006 self._slot_collision_nodes.add(pkg)
5007 slot_key = (pkg.slot_atom, pkg.root)
5008 slot_nodes = self._slot_collision_info.get(slot_key)
5009 if slot_nodes is None:
5011 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5012 self._slot_collision_info[slot_key] = slot_nodes
5015 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5017 mytype = pkg.type_name
5020 metadata = pkg.metadata
5021 myuse = pkg.use.enabled
5023 depth = pkg.depth + 1
5024 removal_action = "remove" in self.myparams
5027 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5029 edepend[k] = metadata[k]
5031 if not pkg.built and \
5032 "--buildpkgonly" in self.myopts and \
5033 "deep" not in self.myparams and \
5034 "empty" not in self.myparams:
5035 edepend["RDEPEND"] = ""
5036 edepend["PDEPEND"] = ""
5037 bdeps_satisfied = False
5039 if pkg.built and not removal_action:
5040 if self.myopts.get("--with-bdeps", "n") == "y":
5041 # Pull in build time deps as requested, but marked them as
5042 # "satisfied" since they are not strictly required. This allows
5043 # more freedom in the merge order calculation for solving
5044 # circular dependencies. Don't convert to PDEPEND since that
5045 # could make --with-bdeps=y less effective if it is used to
5046 # adjust merge order to prevent built_with_use() calls from
5048 bdeps_satisfied = True
5050 # built packages do not have build time dependencies.
5051 edepend["DEPEND"] = ""
5053 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5054 edepend["DEPEND"] = ""
5057 ("/", edepend["DEPEND"],
5058 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5059 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5060 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5063 debug = "--debug" in self.myopts
5064 strict = mytype != "installed"
5066 for dep_root, dep_string, dep_priority in deps:
5068 # Decrease priority so that --buildpkgonly
5069 # hasallzeros() works correctly.
5070 dep_priority = DepPriority()
5075 print "Parent: ", jbigkey
5076 print "Depstring:", dep_string
5077 print "Priority:", dep_priority
5078 vardb = self.roots[dep_root].trees["vartree"].dbapi
5080 selected_atoms = self._select_atoms(dep_root,
5081 dep_string, myuse=myuse, parent=pkg, strict=strict)
5082 except portage.exception.InvalidDependString, e:
5083 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5086 print "Candidates:", selected_atoms
5088 for atom in selected_atoms:
5091 atom = portage.dep.Atom(atom)
5093 mypriority = dep_priority.copy()
5094 if not atom.blocker and vardb.match(atom):
5095 mypriority.satisfied = True
5097 if not self._add_dep(Dependency(atom=atom,
5098 blocker=atom.blocker, depth=depth, parent=pkg,
5099 priority=mypriority, root=dep_root),
5100 allow_unsatisfied=allow_unsatisfied):
5103 except portage.exception.InvalidAtom, e:
5104 show_invalid_depstring_notice(
5105 pkg, dep_string, str(e))
5107 if not pkg.installed:
5111 print "Exiting...", jbigkey
5112 except portage.exception.AmbiguousPackageName, e:
5114 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5115 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5117 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5118 portage.writemsg("\n", noiselevel=-1)
5119 if mytype == "binary":
5121 "!!! This binary package cannot be installed: '%s'\n" % \
5122 mykey, noiselevel=-1)
5123 elif mytype == "ebuild":
5124 portdb = self.roots[myroot].trees["porttree"].dbapi
5125 myebuild, mylocation = portdb.findname2(mykey)
5126 portage.writemsg("!!! This ebuild cannot be installed: " + \
5127 "'%s'\n" % myebuild, noiselevel=-1)
5128 portage.writemsg("!!! Please notify the package maintainer " + \
5129 "that atoms must be fully-qualified.\n", noiselevel=-1)
5133 def _priority(self, **kwargs):
5134 if "remove" in self.myparams:
5135 priority_constructor = UnmergeDepPriority
5137 priority_constructor = DepPriority
5138 return priority_constructor(**kwargs)
5140 def _dep_expand(self, root_config, atom_without_category):
5142 @param root_config: a root config instance
5143 @type root_config: RootConfig
5144 @param atom_without_category: an atom without a category component
5145 @type atom_without_category: String
5147 @returns: a list of atoms containing categories (possibly empty)
5149 null_cp = portage.dep_getkey(insert_category_into_atom(
5150 atom_without_category, "null"))
5151 cat, atom_pn = portage.catsplit(null_cp)
5154 for db, pkg_type, built, installed, db_keys in \
5155 self._filtered_trees[root_config.root]["dbs"]:
5156 cp_set.update(db.cp_all())
5157 for cp in list(cp_set):
5158 cat, pn = portage.catsplit(cp)
5163 cat, pn = portage.catsplit(cp)
5164 deps.append(insert_category_into_atom(
5165 atom_without_category, cat))
5168 def _have_new_virt(self, root, atom_cp):
5170 for db, pkg_type, built, installed, db_keys in \
5171 self._filtered_trees[root]["dbs"]:
5172 if db.cp_list(atom_cp):
5177 def _iter_atoms_for_pkg(self, pkg):
5178 # TODO: add multiple $ROOT support
5179 if pkg.root != self.target_root:
5181 atom_arg_map = self._atom_arg_map
5182 root_config = self.roots[pkg.root]
5183 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5184 atom_cp = portage.dep_getkey(atom)
5185 if atom_cp != pkg.cp and \
5186 self._have_new_virt(pkg.root, atom_cp):
5188 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5189 visible_pkgs.reverse() # descending order
5191 for visible_pkg in visible_pkgs:
5192 if visible_pkg.cp != atom_cp:
5194 if pkg >= visible_pkg:
5195 # This is descending order, and we're not
5196 # interested in any versions <= pkg given.
5198 if pkg.slot_atom != visible_pkg.slot_atom:
5199 higher_slot = visible_pkg
5201 if higher_slot is not None:
5203 for arg in atom_arg_map[(atom, pkg.root)]:
5204 if isinstance(arg, PackageArg) and \
5209 def select_files(self, myfiles):
5210 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5211 appropriate depgraph and return a favorite list."""
5212 debug = "--debug" in self.myopts
5213 root_config = self.roots[self.target_root]
5214 sets = root_config.sets
5215 getSetAtoms = root_config.setconfig.getSetAtoms
5217 myroot = self.target_root
5218 dbs = self._filtered_trees[myroot]["dbs"]
5219 vardb = self.trees[myroot]["vartree"].dbapi
5220 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5221 portdb = self.trees[myroot]["porttree"].dbapi
5222 bindb = self.trees[myroot]["bintree"].dbapi
5223 pkgsettings = self.pkgsettings[myroot]
5225 onlydeps = "--onlydeps" in self.myopts
5228 ext = os.path.splitext(x)[1]
5230 if not os.path.exists(x):
5232 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5233 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5234 elif os.path.exists(
5235 os.path.join(pkgsettings["PKGDIR"], x)):
5236 x = os.path.join(pkgsettings["PKGDIR"], x)
5238 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5239 print "!!! Please ensure the tbz2 exists as specified.\n"
5240 return 0, myfavorites
5241 mytbz2=portage.xpak.tbz2(x)
5242 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5243 if os.path.realpath(x) != \
5244 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5245 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5246 return 0, myfavorites
5247 db_keys = list(bindb._aux_cache_keys)
5248 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5249 pkg = Package(type_name="binary", root_config=root_config,
5250 cpv=mykey, built=True, metadata=metadata,
5252 self._pkg_cache[pkg] = pkg
5253 args.append(PackageArg(arg=x, package=pkg,
5254 root_config=root_config))
5255 elif ext==".ebuild":
5256 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5257 pkgdir = os.path.dirname(ebuild_path)
5258 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5259 cp = pkgdir[len(tree_root)+1:]
5260 e = portage.exception.PackageNotFound(
5261 ("%s is not in a valid portage tree " + \
5262 "hierarchy or does not exist") % x)
5263 if not portage.isvalidatom(cp):
5265 cat = portage.catsplit(cp)[0]
5266 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5267 if not portage.isvalidatom("="+mykey):
5269 ebuild_path = portdb.findname(mykey)
5271 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5272 cp, os.path.basename(ebuild_path)):
5273 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5274 return 0, myfavorites
5275 if mykey not in portdb.xmatch(
5276 "match-visible", portage.dep_getkey(mykey)):
5277 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5278 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5279 print colorize("BAD", "*** page for details.")
5280 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5283 raise portage.exception.PackageNotFound(
5284 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5285 db_keys = list(portdb._aux_cache_keys)
5286 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5287 pkg = Package(type_name="ebuild", root_config=root_config,
5288 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5289 pkgsettings.setcpv(pkg)
5290 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5291 self._pkg_cache[pkg] = pkg
5292 args.append(PackageArg(arg=x, package=pkg,
5293 root_config=root_config))
5294 elif x.startswith(os.path.sep):
5295 if not x.startswith(myroot):
5296 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5297 " $ROOT.\n") % x, noiselevel=-1)
5299 # Queue these up since it's most efficient to handle
5300 # multiple files in a single iter_owners() call.
5301 lookup_owners.append(x)
5303 if x in ("system", "world"):
5305 if x.startswith(SETPREFIX):
5306 s = x[len(SETPREFIX):]
5308 raise portage.exception.PackageSetNotFound(s)
5311 # Recursively expand sets so that containment tests in
5312 # self._get_parent_sets() properly match atoms in nested
5313 # sets (like if world contains system).
5314 expanded_set = InternalPackageSet(
5315 initial_atoms=getSetAtoms(s))
5316 self._sets[s] = expanded_set
5317 args.append(SetArg(arg=x, set=expanded_set,
5318 root_config=root_config))
5319 myfavorites.append(x)
5321 if not is_valid_package_atom(x):
5322 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5324 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5325 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5327 # Don't expand categories or old-style virtuals here unless
5328 # necessary. Expansion of old-style virtuals here causes at
5329 # least the following problems:
5330 # 1) It's more difficult to determine which set(s) an atom
5331 # came from, if any.
5332 # 2) It takes away freedom from the resolver to choose other
5333 # possible expansions when necessary.
5335 args.append(AtomArg(arg=x, atom=x,
5336 root_config=root_config))
5338 expanded_atoms = self._dep_expand(root_config, x)
5339 installed_cp_set = set()
5340 for atom in expanded_atoms:
5341 atom_cp = portage.dep_getkey(atom)
5342 if vardb.cp_list(atom_cp):
5343 installed_cp_set.add(atom_cp)
5344 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5345 installed_cp = iter(installed_cp_set).next()
5346 expanded_atoms = [atom for atom in expanded_atoms \
5347 if portage.dep_getkey(atom) == installed_cp]
5349 if len(expanded_atoms) > 1:
5352 ambiguous_package_name(x, expanded_atoms, root_config,
5353 self.spinner, self.myopts)
5354 return False, myfavorites
5356 atom = expanded_atoms[0]
5358 null_atom = insert_category_into_atom(x, "null")
5359 null_cp = portage.dep_getkey(null_atom)
5360 cat, atom_pn = portage.catsplit(null_cp)
5361 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5363 # Allow the depgraph to choose which virtual.
5364 atom = insert_category_into_atom(x, "virtual")
5366 atom = insert_category_into_atom(x, "null")
5368 args.append(AtomArg(arg=x, atom=atom,
5369 root_config=root_config))
5373 search_for_multiple = False
5374 if len(lookup_owners) > 1:
5375 search_for_multiple = True
5377 for x in lookup_owners:
5378 if not search_for_multiple and os.path.isdir(x):
5379 search_for_multiple = True
5380 relative_paths.append(x[len(myroot):])
5383 for pkg, relative_path in \
5384 real_vardb._owners.iter_owners(relative_paths):
5385 owners.add(pkg.mycpv)
5386 if not search_for_multiple:
5390 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5391 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5395 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5397 # portage now masks packages with missing slot, but it's
5398 # possible that one was installed by an older version
5399 atom = portage.cpv_getkey(cpv)
5401 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5402 args.append(AtomArg(arg=atom, atom=atom,
5403 root_config=root_config))
5405 if "--update" in self.myopts:
5406 # Enable greedy SLOT atoms for atoms given as arguments.
5407 # This is currently disabled for sets since greedy SLOT
5408 # atoms could be a property of the set itself.
5411 # In addition to any installed slots, also try to pull
5412 # in the latest new slot that may be available.
5413 greedy_atoms.append(arg)
5414 if not isinstance(arg, (AtomArg, PackageArg)):
5416 atom_cp = portage.dep_getkey(arg.atom)
5418 for cpv in vardb.match(arg.atom):
5419 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5421 greedy_atoms.append(
5422 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5423 root_config=root_config))
5427 # Create the "args" package set from atoms and
5428 # packages given as arguments.
5429 args_set = self._sets["args"]
5431 if not isinstance(arg, (AtomArg, PackageArg)):
5434 if myatom in args_set:
5436 args_set.add(myatom)
5437 myfavorites.append(myatom)
5438 self._set_atoms.update(chain(*self._sets.itervalues()))
5439 atom_arg_map = self._atom_arg_map
5441 for atom in arg.set:
5442 atom_key = (atom, myroot)
5443 refs = atom_arg_map.get(atom_key)
5446 atom_arg_map[atom_key] = refs
5449 pprovideddict = pkgsettings.pprovideddict
5451 portage.writemsg("\n", noiselevel=-1)
5452 # Order needs to be preserved since a feature of --nodeps
5453 # is to allow the user to force a specific merge order.
5457 for atom in arg.set:
5458 self.spinner.update()
5459 dep = Dependency(atom=atom, onlydeps=onlydeps,
5460 root=myroot, parent=arg)
5461 atom_cp = portage.dep_getkey(atom)
5463 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5464 if pprovided and portage.match_from_list(atom, pprovided):
5465 # A provided package has been specified on the command line.
5466 self._pprovided_args.append((arg, atom))
5468 if isinstance(arg, PackageArg):
5469 if not self._add_pkg(arg.package, dep) or \
5470 not self._create_graph():
5471 sys.stderr.write(("\n\n!!! Problem resolving " + \
5472 "dependencies for %s\n") % arg.arg)
5473 return 0, myfavorites
5476 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5477 (arg, atom), noiselevel=-1)
5478 pkg, existing_node = self._select_package(
5479 myroot, atom, onlydeps=onlydeps)
5481 if not (isinstance(arg, SetArg) and \
5482 arg.name in ("system", "world")):
5483 self._unsatisfied_deps_for_display.append(
5484 ((myroot, atom), {}))
5485 return 0, myfavorites
5486 self._missing_args.append((arg, atom))
5488 if atom_cp != pkg.cp:
5489 # For old-style virtuals, we need to repeat the
5490 # package.provided check against the selected package.
5491 expanded_atom = atom.replace(atom_cp, pkg.cp)
5492 pprovided = pprovideddict.get(pkg.cp)
5494 portage.match_from_list(expanded_atom, pprovided):
5495 # A provided package has been
5496 # specified on the command line.
5497 self._pprovided_args.append((arg, atom))
5499 if pkg.installed and "selective" not in self.myparams:
5500 self._unsatisfied_deps_for_display.append(
5501 ((myroot, atom), {}))
5502 # Previous behavior was to bail out in this case, but
5503 # since the dep is satisfied by the installed package,
5504 # it's more friendly to continue building the graph
5505 # and just show a warning message. Therefore, only bail
5506 # out here if the atom is not from either the system or
5508 if not (isinstance(arg, SetArg) and \
5509 arg.name in ("system", "world")):
5510 return 0, myfavorites
5512 # Add the selected package to the graph as soon as possible
5513 # so that later dep_check() calls can use it as feedback
5514 # for making more consistent atom selections.
5515 if not self._add_pkg(pkg, dep):
5516 if isinstance(arg, SetArg):
5517 sys.stderr.write(("\n\n!!! Problem resolving " + \
5518 "dependencies for %s from %s\n") % \
5521 sys.stderr.write(("\n\n!!! Problem resolving " + \
5522 "dependencies for %s\n") % atom)
5523 return 0, myfavorites
5525 except portage.exception.MissingSignature, e:
5526 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5527 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5528 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5529 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5530 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5531 return 0, myfavorites
5532 except portage.exception.InvalidSignature, e:
5533 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5534 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5535 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5536 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5537 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5538 return 0, myfavorites
5539 except SystemExit, e:
5540 raise # Needed else can't exit
5541 except Exception, e:
5542 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5543 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5546 # Now that the root packages have been added to the graph,
5547 # process the dependencies.
5548 if not self._create_graph():
5549 return 0, myfavorites
5552 if "--usepkgonly" in self.myopts:
5553 for xs in self.digraph.all_nodes():
5554 if not isinstance(xs, Package):
5556 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5560 print "Missing binary for:",xs[2]
5564 except self._unknown_internal_error:
5565 return False, myfavorites
5567 # We're true here unless we are missing binaries.
5568 return (not missing,myfavorites)
5570 def _select_atoms_from_graph(self, *pargs, **kwargs):
5572 Prefer atoms matching packages that have already been
5573 added to the graph or those that are installed and have
5574 not been scheduled for replacement.
5576 kwargs["trees"] = self._graph_trees
5577 return self._select_atoms_highest_available(*pargs, **kwargs)
5579 def _select_atoms_highest_available(self, root, depstring,
5580 myuse=None, parent=None, strict=True, trees=None):
5581 """This will raise InvalidDependString if necessary. If trees is
5582 None then self._filtered_trees is used."""
5583 pkgsettings = self.pkgsettings[root]
5585 trees = self._filtered_trees
5588 if parent is not None:
5589 trees[root]["parent"] = parent
5591 portage.dep._dep_check_strict = False
5592 mycheck = portage.dep_check(depstring, None,
5593 pkgsettings, myuse=myuse,
5594 myroot=root, trees=trees)
5596 if parent is not None:
5597 trees[root].pop("parent")
5598 portage.dep._dep_check_strict = True
5600 raise portage.exception.InvalidDependString(mycheck[1])
5601 selected_atoms = mycheck[1]
5602 return selected_atoms
5604 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5605 atom = portage.dep.Atom(atom)
5606 atom_set = InternalPackageSet(initial_atoms=(atom,))
5607 atom_without_use = atom
5609 atom_without_use = portage.dep.remove_slot(atom)
5611 atom_without_use += ":" + atom.slot
5612 atom_without_use = portage.dep.Atom(atom_without_use)
5613 xinfo = '"%s"' % atom
5616 # Discard null/ from failed cpv_expand category expansion.
5617 xinfo = xinfo.replace("null/", "")
5618 masked_packages = []
5620 missing_licenses = []
5621 have_eapi_mask = False
5622 pkgsettings = self.pkgsettings[root]
5623 implicit_iuse = pkgsettings._get_implicit_iuse()
5624 root_config = self.roots[root]
5625 portdb = self.roots[root].trees["porttree"].dbapi
5626 dbs = self._filtered_trees[root]["dbs"]
5627 for db, pkg_type, built, installed, db_keys in dbs:
5631 if hasattr(db, "xmatch"):
5632 cpv_list = db.xmatch("match-all", atom_without_use)
5634 cpv_list = db.match(atom_without_use)
5637 for cpv in cpv_list:
5638 metadata, mreasons = get_mask_info(root_config, cpv,
5639 pkgsettings, db, pkg_type, built, installed, db_keys)
5640 if metadata is not None:
5641 pkg = Package(built=built, cpv=cpv,
5642 installed=installed, metadata=metadata,
5643 root_config=root_config)
5644 if pkg.cp != atom.cp:
5645 # A cpv can be returned from dbapi.match() as an
5646 # old-style virtual match even in cases when the
5647 # package does not actually PROVIDE the virtual.
5648 # Filter out any such false matches here.
5649 if not atom_set.findAtomForPackage(pkg):
5651 if atom.use and not mreasons:
5652 missing_use.append(pkg)
5654 masked_packages.append(
5655 (root_config, pkgsettings, cpv, metadata, mreasons))
5657 missing_use_reasons = []
5658 missing_iuse_reasons = []
5659 for pkg in missing_use:
5660 use = pkg.use.enabled
5661 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5662 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5664 for x in atom.use.required:
5665 if iuse_re.match(x) is None:
5666 missing_iuse.append(x)
5669 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5670 missing_iuse_reasons.append((pkg, mreasons))
5672 need_enable = sorted(atom.use.enabled.difference(use))
5673 need_disable = sorted(atom.use.disabled.intersection(use))
5674 if need_enable or need_disable:
5676 changes.extend(colorize("red", "+" + x) \
5677 for x in need_enable)
5678 changes.extend(colorize("blue", "-" + x) \
5679 for x in need_disable)
5680 mreasons.append("Change USE: %s" % " ".join(changes))
5681 missing_use_reasons.append((pkg, mreasons))
5683 if missing_iuse_reasons and not missing_use_reasons:
5684 missing_use_reasons = missing_iuse_reasons
5685 elif missing_use_reasons:
5686 # Only show the latest version.
5687 del missing_use_reasons[1:]
5689 if missing_use_reasons:
5690 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5691 print "!!! One of the following packages is required to complete your request:"
5692 for pkg, mreasons in missing_use_reasons:
5693 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5695 elif masked_packages:
5697 colorize("BAD", "All ebuilds that could satisfy ") + \
5698 colorize("INFORM", xinfo) + \
5699 colorize("BAD", " have been masked.")
5700 print "!!! One of the following masked packages is required to complete your request:"
5701 have_eapi_mask = show_masked_packages(masked_packages)
5704 msg = ("The current version of portage supports " + \
5705 "EAPI '%s'. You must upgrade to a newer version" + \
5706 " of portage before EAPI masked packages can" + \
5707 " be installed.") % portage.const.EAPI
5708 from textwrap import wrap
5709 for line in wrap(msg, 75):
5714 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5716 # Show parent nodes and the argument that pulled them in.
5717 traversed_nodes = set()
5720 while node is not None:
5721 traversed_nodes.add(node)
5722 msg.append('(dependency required by "%s" [%s])' % \
5723 (colorize('INFORM', str(node.cpv)), node.type_name))
5724 # When traversing to parents, prefer arguments over packages
5725 # since arguments are root nodes. Never traverse the same
5726 # package twice, in order to prevent an infinite loop.
5727 selected_parent = None
5728 for parent in self.digraph.parent_nodes(node):
5729 if isinstance(parent, DependencyArg):
5730 msg.append('(dependency required by "%s" [argument])' % \
5731 (colorize('INFORM', str(parent))))
5732 selected_parent = None
5734 if parent not in traversed_nodes:
5735 selected_parent = parent
5736 node = selected_parent
5742 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5743 cache_key = (root, atom, onlydeps)
5744 ret = self._highest_pkg_cache.get(cache_key)
5747 if pkg and not existing:
5748 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5749 if existing and existing == pkg:
5750 # Update the cache to reflect that the
5751 # package has been added to the graph.
5753 self._highest_pkg_cache[cache_key] = ret
5755 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5756 self._highest_pkg_cache[cache_key] = ret
5759 settings = pkg.root_config.settings
5760 if visible(settings, pkg) and not (pkg.installed and \
5761 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5762 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5765 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5766 root_config = self.roots[root]
5767 pkgsettings = self.pkgsettings[root]
5768 dbs = self._filtered_trees[root]["dbs"]
5769 vardb = self.roots[root].trees["vartree"].dbapi
5770 portdb = self.roots[root].trees["porttree"].dbapi
5771 # List of acceptable packages, ordered by type preference.
5772 matched_packages = []
5773 highest_version = None
5774 if not isinstance(atom, portage.dep.Atom):
5775 atom = portage.dep.Atom(atom)
5777 atom_set = InternalPackageSet(initial_atoms=(atom,))
5778 existing_node = None
5780 usepkgonly = "--usepkgonly" in self.myopts
5781 empty = "empty" in self.myparams
5782 selective = "selective" in self.myparams
5784 noreplace = "--noreplace" in self.myopts
5785 # Behavior of the "selective" parameter depends on
5786 # whether or not a package matches an argument atom.
5787 # If an installed package provides an old-style
5788 # virtual that is no longer provided by an available
5789 # package, the installed package may match an argument
5790 # atom even though none of the available packages do.
5791 # Therefore, "selective" logic does not consider
5792 # whether or not an installed package matches an
5793 # argument atom. It only considers whether or not
5794 # available packages match argument atoms, which is
5795 # represented by the found_available_arg flag.
5796 found_available_arg = False
5797 for find_existing_node in True, False:
5800 for db, pkg_type, built, installed, db_keys in dbs:
5803 if installed and not find_existing_node:
5804 want_reinstall = reinstall or empty or \
5805 (found_available_arg and not selective)
5806 if want_reinstall and matched_packages:
5808 if hasattr(db, "xmatch"):
5809 cpv_list = db.xmatch("match-all", atom)
5811 cpv_list = db.match(atom)
5813 # USE=multislot can make an installed package appear as if
5814 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5815 # won't do any good as long as USE=multislot is enabled since
5816 # the newly built package still won't have the expected slot.
5817 # Therefore, assume that such SLOT dependencies are already
5818 # satisfied rather than forcing a rebuild.
5819 if installed and not cpv_list and atom.slot:
5820 for cpv in db.match(atom.cp):
5821 slot_available = False
5822 for other_db, other_type, other_built, \
5823 other_installed, other_keys in dbs:
5826 other_db.aux_get(cpv, ["SLOT"])[0]:
5827 slot_available = True
5831 if not slot_available:
5833 inst_pkg = self._pkg(cpv, "installed",
5834 root_config, installed=installed)
5835 # Remove the slot from the atom and verify that
5836 # the package matches the resulting atom.
5837 atom_without_slot = portage.dep.remove_slot(atom)
5839 atom_without_slot += str(atom.use)
5840 atom_without_slot = portage.dep.Atom(atom_without_slot)
5841 if portage.match_from_list(
5842 atom_without_slot, [inst_pkg]):
5843 cpv_list = [inst_pkg.cpv]
5848 pkg_status = "merge"
5849 if installed or onlydeps:
5850 pkg_status = "nomerge"
5853 for cpv in cpv_list:
5854 # Make --noreplace take precedence over --newuse.
5855 if not installed and noreplace and \
5856 cpv in vardb.match(atom):
5857 # If the installed version is masked, it may
5858 # be necessary to look at lower versions,
5859 # in case there is a visible downgrade.
5861 reinstall_for_flags = None
5862 cache_key = (pkg_type, root, cpv, pkg_status)
5863 calculated_use = True
5864 pkg = self._pkg_cache.get(cache_key)
5866 calculated_use = False
5868 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5871 pkg = Package(built=built, cpv=cpv,
5872 installed=installed, metadata=metadata,
5873 onlydeps=onlydeps, root_config=root_config,
5875 metadata = pkg.metadata
5876 if not built and ("?" in metadata["LICENSE"] or \
5877 "?" in metadata["PROVIDE"]):
5878 # This is avoided whenever possible because
5879 # it's expensive. It only needs to be done here
5880 # if it has an effect on visibility.
5881 pkgsettings.setcpv(pkg)
5882 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5883 calculated_use = True
5884 self._pkg_cache[pkg] = pkg
5886 if not installed or (built and matched_packages):
5887 # Only enforce visibility on installed packages
5888 # if there is at least one other visible package
5889 # available. By filtering installed masked packages
5890 # here, packages that have been masked since they
5891 # were installed can be automatically downgraded
5892 # to an unmasked version.
5894 if not visible(pkgsettings, pkg):
5896 except portage.exception.InvalidDependString:
5900 # Enable upgrade or downgrade to a version
5901 # with visible KEYWORDS when the installed
5902 # version is masked by KEYWORDS, but never
5903 # reinstall the same exact version only due
5904 # to a KEYWORDS mask.
5905 if built and matched_packages:
5907 different_version = None
5908 for avail_pkg in matched_packages:
5909 if not portage.dep.cpvequal(
5910 pkg.cpv, avail_pkg.cpv):
5911 different_version = avail_pkg
5913 if different_version is not None:
5916 pkgsettings._getMissingKeywords(
5917 pkg.cpv, pkg.metadata):
5920 # If the ebuild no longer exists or it's
5921 # keywords have been dropped, reject built
5922 # instances (installed or binary).
5923 # If --usepkgonly is enabled, assume that
5924 # the ebuild status should be ignored.
5928 pkg.cpv, "ebuild", root_config)
5929 except portage.exception.PackageNotFound:
5932 if not visible(pkgsettings, pkg_eb):
5935 if not pkg.built and not calculated_use:
5936 # This is avoided whenever possible because
5938 pkgsettings.setcpv(pkg)
5939 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5941 if pkg.cp != atom.cp:
5942 # A cpv can be returned from dbapi.match() as an
5943 # old-style virtual match even in cases when the
5944 # package does not actually PROVIDE the virtual.
5945 # Filter out any such false matches here.
5946 if not atom_set.findAtomForPackage(pkg):
5950 if root == self.target_root:
5952 # Ebuild USE must have been calculated prior
5953 # to this point, in case atoms have USE deps.
5954 myarg = self._iter_atoms_for_pkg(pkg).next()
5955 except StopIteration:
5957 except portage.exception.InvalidDependString:
5959 # masked by corruption
5961 if not installed and myarg:
5962 found_available_arg = True
5964 if atom.use and not pkg.built:
5965 use = pkg.use.enabled
5966 if atom.use.enabled.difference(use):
5968 if atom.use.disabled.intersection(use):
5970 if pkg.cp == atom_cp:
5971 if highest_version is None:
5972 highest_version = pkg
5973 elif pkg > highest_version:
5974 highest_version = pkg
5975 # At this point, we've found the highest visible
5976 # match from the current repo. Any lower versions
5977 # from this repo are ignored, so this so the loop
5978 # will always end with a break statement below
5980 if find_existing_node:
5981 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5984 if portage.dep.match_from_list(atom, [e_pkg]):
5985 if highest_version and \
5986 e_pkg.cp == atom_cp and \
5987 e_pkg < highest_version and \
5988 e_pkg.slot_atom != highest_version.slot_atom:
5989 # There is a higher version available in a
5990 # different slot, so this existing node is
5994 matched_packages.append(e_pkg)
5995 existing_node = e_pkg
5997 # Compare built package to current config and
5998 # reject the built package if necessary.
5999 if built and not installed and \
6000 ("--newuse" in self.myopts or \
6001 "--reinstall" in self.myopts):
6002 iuses = pkg.iuse.all
6003 old_use = pkg.use.enabled
6005 pkgsettings.setcpv(myeb)
6007 pkgsettings.setcpv(pkg)
6008 now_use = pkgsettings["PORTAGE_USE"].split()
6009 forced_flags = set()
6010 forced_flags.update(pkgsettings.useforce)
6011 forced_flags.update(pkgsettings.usemask)
6013 if myeb and not usepkgonly:
6014 cur_iuse = myeb.iuse.all
6015 if self._reinstall_for_flags(forced_flags,
6019 # Compare current config to installed package
6020 # and do not reinstall if possible.
6021 if not installed and \
6022 ("--newuse" in self.myopts or \
6023 "--reinstall" in self.myopts) and \
6024 cpv in vardb.match(atom):
6025 pkgsettings.setcpv(pkg)
6026 forced_flags = set()
6027 forced_flags.update(pkgsettings.useforce)
6028 forced_flags.update(pkgsettings.usemask)
6029 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6030 old_iuse = set(filter_iuse_defaults(
6031 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6032 cur_use = pkgsettings["PORTAGE_USE"].split()
6033 cur_iuse = pkg.iuse.all
6034 reinstall_for_flags = \
6035 self._reinstall_for_flags(
6036 forced_flags, old_use, old_iuse,
6038 if reinstall_for_flags:
6042 matched_packages.append(pkg)
6043 if reinstall_for_flags:
6044 self._reinstall_nodes[pkg] = \
6048 if not matched_packages:
6051 if "--debug" in self.myopts:
6052 for pkg in matched_packages:
6053 portage.writemsg("%s %s\n" % \
6054 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6056 # Filter out any old-style virtual matches if they are
6057 # mixed with new-style virtual matches.
6058 cp = portage.dep_getkey(atom)
6059 if len(matched_packages) > 1 and \
6060 "virtual" == portage.catsplit(cp)[0]:
6061 for pkg in matched_packages:
6064 # Got a new-style virtual, so filter
6065 # out any old-style virtuals.
6066 matched_packages = [pkg for pkg in matched_packages \
6070 if len(matched_packages) > 1:
6071 bestmatch = portage.best(
6072 [pkg.cpv for pkg in matched_packages])
6073 matched_packages = [pkg for pkg in matched_packages \
6074 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6076 # ordered by type preference ("ebuild" type is the last resort)
6077 return matched_packages[-1], existing_node
6079 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6081 Select packages that have already been added to the graph or
6082 those that are installed and have not been scheduled for
6085 graph_db = self._graph_trees[root]["porttree"].dbapi
6086 matches = graph_db.match(atom)
6089 cpv = matches[-1] # highest match
6090 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6091 graph_db.aux_get(cpv, ["SLOT"])[0])
6092 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6095 # Since this cpv exists in the graph_db,
6096 # we must have a cached Package instance.
6097 cache_key = ("installed", root, cpv, "nomerge")
6098 return (self._pkg_cache[cache_key], None)
6100 def _complete_graph(self):
6102 Add any deep dependencies of required sets (args, system, world) that
6103 have not been pulled into the graph yet. This ensures that the graph
6104 is consistent such that initially satisfied deep dependencies are not
6105 broken in the new graph. Initially unsatisfied dependencies are
6106 irrelevant since we only want to avoid breaking dependencies that are
6109 Since this method can consume enough time to disturb users, it is
6110 currently only enabled by the --complete-graph option.
6112 if "--buildpkgonly" in self.myopts or \
6113 "recurse" not in self.myparams:
6116 if "complete" not in self.myparams:
6117 # Skip this to avoid consuming enough time to disturb users.
6120 # Put the depgraph into a mode that causes it to only
6121 # select packages that have already been added to the
6122 # graph or those that are installed and have not been
6123 # scheduled for replacement. Also, toggle the "deep"
6124 # parameter so that all dependencies are traversed and
6126 self._select_atoms = self._select_atoms_from_graph
6127 self._select_package = self._select_pkg_from_graph
6128 already_deep = "deep" in self.myparams
6129 if not already_deep:
6130 self.myparams.add("deep")
6132 for root in self.roots:
6133 required_set_names = self._required_set_names.copy()
6134 if root == self.target_root and \
6135 (already_deep or "empty" in self.myparams):
6136 required_set_names.difference_update(self._sets)
6137 if not required_set_names and not self._ignored_deps:
6139 root_config = self.roots[root]
6140 setconfig = root_config.setconfig
6142 # Reuse existing SetArg instances when available.
6143 for arg in self.digraph.root_nodes():
6144 if not isinstance(arg, SetArg):
6146 if arg.root_config != root_config:
6148 if arg.name in required_set_names:
6150 required_set_names.remove(arg.name)
6151 # Create new SetArg instances only when necessary.
6152 for s in required_set_names:
6153 expanded_set = InternalPackageSet(
6154 initial_atoms=setconfig.getSetAtoms(s))
6155 atom = SETPREFIX + s
6156 args.append(SetArg(arg=atom, set=expanded_set,
6157 root_config=root_config))
6158 vardb = root_config.trees["vartree"].dbapi
6160 for atom in arg.set:
6161 self._dep_stack.append(
6162 Dependency(atom=atom, root=root, parent=arg))
6163 if self._ignored_deps:
6164 self._dep_stack.extend(self._ignored_deps)
6165 self._ignored_deps = []
6166 if not self._create_graph(allow_unsatisfied=True):
6168 # Check the unsatisfied deps to see if any initially satisfied deps
6169 # will become unsatisfied due to an upgrade. Initially unsatisfied
6170 # deps are irrelevant since we only want to avoid breaking deps
6171 # that are initially satisfied.
6172 while self._unsatisfied_deps:
6173 dep = self._unsatisfied_deps.pop()
6174 matches = vardb.match_pkgs(dep.atom)
6176 self._initially_unsatisfied_deps.append(dep)
6178 # An scheduled installation broke a deep dependency.
6179 # Add the installed package to the graph so that it
6180 # will be appropriately reported as a slot collision
6181 # (possibly solvable via backtracking).
6182 pkg = matches[-1] # highest match
6183 if not self._add_pkg(pkg, dep):
6185 if not self._create_graph(allow_unsatisfied=True):
6189 def _pkg(self, cpv, type_name, root_config, installed=False):
6191 Get a package instance from the cache, or create a new
6192 one if necessary. Raises KeyError from aux_get if it
6193 failures for some reason (package does not exist or is
6198 operation = "nomerge"
6199 pkg = self._pkg_cache.get(
6200 (type_name, root_config.root, cpv, operation))
6202 tree_type = self.pkg_tree_map[type_name]
6203 db = root_config.trees[tree_type].dbapi
6204 db_keys = list(self._trees_orig[root_config.root][
6205 tree_type].dbapi._aux_cache_keys)
6207 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6209 raise portage.exception.PackageNotFound(cpv)
6210 pkg = Package(cpv=cpv, metadata=metadata,
6211 root_config=root_config, installed=installed)
6212 if type_name == "ebuild":
6213 settings = self.pkgsettings[root_config.root]
6214 settings.setcpv(pkg)
6215 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6216 self._pkg_cache[pkg] = pkg
6219 def validate_blockers(self):
6220 """Remove any blockers from the digraph that do not match any of the
6221 packages within the graph. If necessary, create hard deps to ensure
6222 correct merge order such that mutually blocking packages are never
6223 installed simultaneously."""
6225 if "--buildpkgonly" in self.myopts or \
6226 "--nodeps" in self.myopts:
6229 #if "deep" in self.myparams:
6231 # Pull in blockers from all installed packages that haven't already
6232 # been pulled into the depgraph. This is not enabled by default
6233 # due to the performance penalty that is incurred by all the
6234 # additional dep_check calls that are required.
6236 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6237 for myroot in self.trees:
6238 vardb = self.trees[myroot]["vartree"].dbapi
6239 portdb = self.trees[myroot]["porttree"].dbapi
6240 pkgsettings = self.pkgsettings[myroot]
6241 final_db = self.mydbapi[myroot]
6243 blocker_cache = BlockerCache(myroot, vardb)
6244 stale_cache = set(blocker_cache)
6247 stale_cache.discard(cpv)
6248 pkg_in_graph = self.digraph.contains(pkg)
6250 # Check for masked installed packages. Only warn about
6251 # packages that are in the graph in order to avoid warning
6252 # about those that will be automatically uninstalled during
6253 # the merge process or by --depclean.
6255 if pkg_in_graph and not visible(pkgsettings, pkg):
6256 self._masked_installed.add(pkg)
6258 blocker_atoms = None
6264 self._blocker_parents.child_nodes(pkg))
6269 self._irrelevant_blockers.child_nodes(pkg))
6272 if blockers is not None:
6273 blockers = set(str(blocker.atom) \
6274 for blocker in blockers)
6276 # If this node has any blockers, create a "nomerge"
6277 # node for it so that they can be enforced.
6278 self.spinner.update()
6279 blocker_data = blocker_cache.get(cpv)
6280 if blocker_data is not None and \
6281 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6284 # If blocker data from the graph is available, use
6285 # it to validate the cache and update the cache if
6287 if blocker_data is not None and \
6288 blockers is not None:
6289 if not blockers.symmetric_difference(
6290 blocker_data.atoms):
6294 if blocker_data is None and \
6295 blockers is not None:
6296 # Re-use the blockers from the graph.
6297 blocker_atoms = sorted(blockers)
6298 counter = long(pkg.metadata["COUNTER"])
6300 blocker_cache.BlockerData(counter, blocker_atoms)
6301 blocker_cache[pkg.cpv] = blocker_data
6305 blocker_atoms = blocker_data.atoms
6307 # Use aux_get() to trigger FakeVartree global
6308 # updates on *DEPEND when appropriate.
6309 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6310 # It is crucial to pass in final_db here in order to
6311 # optimize dep_check calls by eliminating atoms via
6312 # dep_wordreduce and dep_eval calls.
6314 portage.dep._dep_check_strict = False
6316 success, atoms = portage.dep_check(depstr,
6317 final_db, pkgsettings, myuse=pkg.use.enabled,
6318 trees=self._graph_trees, myroot=myroot)
6319 except Exception, e:
6320 if isinstance(e, SystemExit):
6322 # This is helpful, for example, if a ValueError
6323 # is thrown from cpv_expand due to multiple
6324 # matches (this can happen if an atom lacks a
6326 show_invalid_depstring_notice(
6327 pkg, depstr, str(e))
6331 portage.dep._dep_check_strict = True
6333 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6334 if replacement_pkg and \
6335 replacement_pkg[0].operation == "merge":
6336 # This package is being replaced anyway, so
6337 # ignore invalid dependencies so as not to
6338 # annoy the user too much (otherwise they'd be
6339 # forced to manually unmerge it first).
6341 show_invalid_depstring_notice(pkg, depstr, atoms)
6343 blocker_atoms = [myatom for myatom in atoms \
6344 if myatom.startswith("!")]
6345 blocker_atoms.sort()
6346 counter = long(pkg.metadata["COUNTER"])
6347 blocker_cache[cpv] = \
6348 blocker_cache.BlockerData(counter, blocker_atoms)
6351 for atom in blocker_atoms:
6352 blocker = Blocker(atom=portage.dep.Atom(atom),
6353 eapi=pkg.metadata["EAPI"], root=myroot)
6354 self._blocker_parents.add(blocker, pkg)
6355 except portage.exception.InvalidAtom, e:
6356 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6357 show_invalid_depstring_notice(
6358 pkg, depstr, "Invalid Atom: %s" % (e,))
6360 for cpv in stale_cache:
6361 del blocker_cache[cpv]
6362 blocker_cache.flush()
6365 # Discard any "uninstall" tasks scheduled by previous calls
6366 # to this method, since those tasks may not make sense given
6367 # the current graph state.
6368 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6369 if previous_uninstall_tasks:
6370 self._blocker_uninstalls = digraph()
6371 self.digraph.difference_update(previous_uninstall_tasks)
6373 for blocker in self._blocker_parents.leaf_nodes():
6374 self.spinner.update()
6375 root_config = self.roots[blocker.root]
6376 virtuals = root_config.settings.getvirtuals()
6377 myroot = blocker.root
6378 initial_db = self.trees[myroot]["vartree"].dbapi
6379 final_db = self.mydbapi[myroot]
6381 provider_virtual = False
6382 if blocker.cp in virtuals and \
6383 not self._have_new_virt(blocker.root, blocker.cp):
6384 provider_virtual = True
6386 if provider_virtual:
6388 for provider_entry in virtuals[blocker.cp]:
6390 portage.dep_getkey(provider_entry)
6391 atoms.append(blocker.atom.replace(
6392 blocker.cp, provider_cp))
6394 atoms = [blocker.atom]
6396 blocked_initial = []
6398 blocked_initial.extend(initial_db.match_pkgs(atom))
6402 blocked_final.extend(final_db.match_pkgs(atom))
6404 if not blocked_initial and not blocked_final:
6405 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6406 self._blocker_parents.remove(blocker)
6407 # Discard any parents that don't have any more blockers.
6408 for pkg in parent_pkgs:
6409 self._irrelevant_blockers.add(blocker, pkg)
6410 if not self._blocker_parents.child_nodes(pkg):
6411 self._blocker_parents.remove(pkg)
6413 for parent in self._blocker_parents.parent_nodes(blocker):
6414 unresolved_blocks = False
6415 depends_on_order = set()
6416 for pkg in blocked_initial:
6417 if pkg.slot_atom == parent.slot_atom:
6418 # TODO: Support blocks within slots in cases where it
6419 # might make sense. For example, a new version might
6420 # require that the old version be uninstalled at build
6423 if parent.installed:
6424 # Two currently installed packages conflict with
6425 # eachother. Ignore this case since the damage
6426 # is already done and this would be likely to
6427 # confuse users if displayed like a normal blocker.
6429 if parent.operation == "merge":
6430 # Maybe the blocked package can be replaced or simply
6431 # unmerged to resolve this block.
6432 depends_on_order.add((pkg, parent))
6434 # None of the above blocker resolutions techniques apply,
6435 # so apparently this one is unresolvable.
6436 unresolved_blocks = True
6437 for pkg in blocked_final:
6438 if pkg.slot_atom == parent.slot_atom:
6439 # TODO: Support blocks within slots.
6441 if parent.operation == "nomerge" and \
6442 pkg.operation == "nomerge":
6443 # This blocker will be handled the next time that a
6444 # merge of either package is triggered.
6447 # Maybe the blocking package can be
6448 # unmerged to resolve this block.
6449 if parent.operation == "merge" and pkg.installed:
6450 depends_on_order.add((pkg, parent))
6452 elif parent.operation == "nomerge":
6453 depends_on_order.add((parent, pkg))
6455 # None of the above blocker resolutions techniques apply,
6456 # so apparently this one is unresolvable.
6457 unresolved_blocks = True
6459 # Make sure we don't unmerge any package that have been pulled
6461 if not unresolved_blocks and depends_on_order:
6462 for inst_pkg, inst_task in depends_on_order:
6463 if self.digraph.contains(inst_pkg) and \
6464 self.digraph.parent_nodes(inst_pkg):
6465 unresolved_blocks = True
6468 if not unresolved_blocks and depends_on_order:
6469 for inst_pkg, inst_task in depends_on_order:
6470 uninst_task = Package(built=inst_pkg.built,
6471 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6472 metadata=inst_pkg.metadata,
6473 operation="uninstall",
6474 root_config=inst_pkg.root_config,
6475 type_name=inst_pkg.type_name)
6476 self._pkg_cache[uninst_task] = uninst_task
6477 # Enforce correct merge order with a hard dep.
6478 self.digraph.addnode(uninst_task, inst_task,
6479 priority=BlockerDepPriority.instance)
6480 # Count references to this blocker so that it can be
6481 # invalidated after nodes referencing it have been
6483 self._blocker_uninstalls.addnode(uninst_task, blocker)
6484 if not unresolved_blocks and not depends_on_order:
6485 self._irrelevant_blockers.add(blocker, parent)
6486 self._blocker_parents.remove_edge(blocker, parent)
6487 if not self._blocker_parents.parent_nodes(blocker):
6488 self._blocker_parents.remove(blocker)
6489 if not self._blocker_parents.child_nodes(parent):
6490 self._blocker_parents.remove(parent)
6491 if unresolved_blocks:
6492 self._unsolvable_blockers.add(blocker, parent)
6496 def _accept_blocker_conflicts(self):
6498 for x in ("--buildpkgonly", "--fetchonly",
6499 "--fetch-all-uri", "--nodeps", "--pretend"):
6500 if x in self.myopts:
6505 def _merge_order_bias(self, mygraph):
6506 """Order nodes from highest to lowest overall reference count for
6507 optimal leaf node selection."""
6509 for node in mygraph.order:
6510 node_info[node] = len(mygraph.parent_nodes(node))
6511 def cmp_merge_preference(node1, node2):
6512 return node_info[node2] - node_info[node1]
6513 mygraph.order.sort(cmp_merge_preference)
6515 def altlist(self, reversed=False):
6517 while self._serialized_tasks_cache is None:
6518 self._resolve_conflicts()
6520 self._serialized_tasks_cache, self._scheduler_graph = \
6521 self._serialize_tasks()
6522 except self._serialize_tasks_retry:
6525 retlist = self._serialized_tasks_cache[:]
6530 def schedulerGraph(self):
6532 The scheduler graph is identical to the normal one except that
6533 uninstall edges are reversed in specific cases that require
6534 conflicting packages to be temporarily installed simultaneously.
6535 This is intended for use by the Scheduler in it's parallelization
6536 logic. It ensures that temporary simultaneous installation of
6537 conflicting packages is avoided when appropriate (especially for
6538 !!atom blockers), but allowed in specific cases that require it.
6540 Note that this method calls break_refs() which alters the state of
6541 internal Package instances such that this depgraph instance should
6542 not be used to perform any more calculations.
6544 if self._scheduler_graph is None:
6546 self.break_refs(self._scheduler_graph.order)
6547 return self._scheduler_graph
6549 def break_refs(self, nodes):
6551 Take a mergelist like that returned from self.altlist() and
6552 break any references that lead back to the depgraph. This is
6553 useful if you want to hold references to packages without
6554 also holding the depgraph on the heap.
6557 if hasattr(node, "root_config"):
6558 # The FakeVartree references the _package_cache which
6559 # references the depgraph. So that Package instances don't
6560 # hold the depgraph and FakeVartree on the heap, replace
6561 # the RootConfig that references the FakeVartree with the
6562 # original RootConfig instance which references the actual
6564 node.root_config = \
6565 self._trees_orig[node.root_config.root]["root_config"]
6567 def _resolve_conflicts(self):
6568 if not self._complete_graph():
6569 raise self._unknown_internal_error()
6571 if not self.validate_blockers():
6572 raise self._unknown_internal_error()
6574 if self._slot_collision_info:
6575 self._process_slot_conflicts()
6577 def _serialize_tasks(self):
6579 if "--debug" in self.myopts:
6580 writemsg("\ndigraph:\n\n", noiselevel=-1)
6581 self.digraph.debug_print()
6582 writemsg("\n", noiselevel=-1)
6584 scheduler_graph = self.digraph.copy()
6585 mygraph=self.digraph.copy()
6586 # Prune "nomerge" root nodes if nothing depends on them, since
6587 # otherwise they slow down merge order calculation. Don't remove
6588 # non-root nodes since they help optimize merge order in some cases
6589 # such as revdep-rebuild.
6590 removed_nodes = set()
6592 for node in mygraph.root_nodes():
6593 if not isinstance(node, Package) or \
6594 node.installed or node.onlydeps:
6595 removed_nodes.add(node)
6597 self.spinner.update()
6598 mygraph.difference_update(removed_nodes)
6599 if not removed_nodes:
6601 removed_nodes.clear()
6602 self._merge_order_bias(mygraph)
6603 def cmp_circular_bias(n1, n2):
6605 RDEPEND is stronger than PDEPEND and this function
6606 measures such a strength bias within a circular
6607 dependency relationship.
6609 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6610 ignore_priority=DepPriority.MEDIUM_SOFT)
6611 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6612 ignore_priority=DepPriority.MEDIUM_SOFT)
6613 if n1_n2_medium == n2_n1_medium:
6618 myblocker_uninstalls = self._blocker_uninstalls.copy()
6620 # Contains uninstall tasks that have been scheduled to
6621 # occur after overlapping blockers have been installed.
6622 scheduled_uninstalls = set()
6623 # Contains any Uninstall tasks that have been ignored
6624 # in order to avoid the circular deps code path. These
6625 # correspond to blocker conflicts that could not be
6627 ignored_uninstall_tasks = set()
6628 have_uninstall_task = False
6629 complete = "complete" in self.myparams
6630 myblocker_parents = self._blocker_parents.copy()
6633 def get_nodes(**kwargs):
6635 Returns leaf nodes excluding Uninstall instances
6636 since those should be executed as late as possible.
6638 return [node for node in mygraph.leaf_nodes(**kwargs) \
6639 if isinstance(node, Package) and \
6640 (node.operation != "uninstall" or \
6641 node in scheduled_uninstalls)]
6643 # sys-apps/portage needs special treatment if ROOT="/"
6644 running_root = self._running_root.root
6645 from portage.const import PORTAGE_PACKAGE_ATOM
6646 runtime_deps = InternalPackageSet(
6647 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6648 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6649 PORTAGE_PACKAGE_ATOM)
6650 replacement_portage = self.mydbapi[running_root].match_pkgs(
6651 PORTAGE_PACKAGE_ATOM)
6654 running_portage = running_portage[0]
6656 running_portage = None
6658 if replacement_portage:
6659 replacement_portage = replacement_portage[0]
6661 replacement_portage = None
6663 if replacement_portage == running_portage:
6664 replacement_portage = None
6666 if replacement_portage is not None:
6667 # update from running_portage to replacement_portage asap
6668 asap_nodes.append(replacement_portage)
6670 if running_portage is not None:
6672 portage_rdepend = self._select_atoms_highest_available(
6673 running_root, running_portage.metadata["RDEPEND"],
6674 myuse=running_portage.use.enabled,
6675 parent=running_portage, strict=False)
6676 except portage.exception.InvalidDependString, e:
6677 portage.writemsg("!!! Invalid RDEPEND in " + \
6678 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6679 (running_root, running_portage.cpv, e), noiselevel=-1)
6681 portage_rdepend = []
6682 runtime_deps.update(atom for atom in portage_rdepend \
6683 if not atom.startswith("!"))
6685 ignore_priority_soft_range = [None]
6686 ignore_priority_soft_range.extend(
6687 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6688 tree_mode = "--tree" in self.myopts
6689 # Tracks whether or not the current iteration should prefer asap_nodes
6690 # if available. This is set to False when the previous iteration
6691 # failed to select any nodes. It is reset whenever nodes are
6692 # successfully selected.
6695 # By default, try to avoid selecting root nodes whenever possible. This
6696 # helps ensure that the maximimum possible number of soft dependencies
6697 # have been removed from the graph before their parent nodes have
6698 # selected. This is especially important when those dependencies are
6699 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6700 # CHOST has been changed (like when building a stage3 from a stage2).
6701 accept_root_node = False
6703 # State of prefer_asap and accept_root_node flags for successive
6704 # iterations that loosen the criteria for node selection.
6706 # iteration prefer_asap accept_root_node
6711 # If no nodes are selected on the 3rd iteration, it is due to
6712 # unresolved blockers or circular dependencies.
6714 while not mygraph.empty():
6715 self.spinner.update()
6716 selected_nodes = None
6717 ignore_priority = None
6718 if prefer_asap and asap_nodes:
6719 """ASAP nodes are merged before their soft deps."""
6720 asap_nodes = [node for node in asap_nodes \
6721 if mygraph.contains(node)]
6722 for node in asap_nodes:
6723 if not mygraph.child_nodes(node,
6724 ignore_priority=DepPriority.SOFT):
6725 selected_nodes = [node]
6726 asap_nodes.remove(node)
6728 if not selected_nodes and \
6729 not (prefer_asap and asap_nodes):
6730 for ignore_priority in ignore_priority_soft_range:
6731 nodes = get_nodes(ignore_priority=ignore_priority)
6735 if ignore_priority is None and not tree_mode:
6736 # Greedily pop all of these nodes since no relationship
6737 # has been ignored. This optimization destroys --tree
6738 # output, so it's disabled in reversed mode. If there
6739 # is a mix of merge and uninstall nodes, save the
6740 # uninstall nodes from later since sometimes a merge
6741 # node will render an install node unnecessary, and
6742 # we want to avoid doing a separate uninstall task in
6744 merge_nodes = [node for node in nodes \
6745 if node.operation == "merge"]
6747 selected_nodes = merge_nodes
6749 selected_nodes = nodes
6751 # For optimal merge order:
6752 # * Only pop one node.
6753 # * Removing a root node (node without a parent)
6754 # will not produce a leaf node, so avoid it.
6756 if mygraph.parent_nodes(node):
6757 # found a non-root node
6758 selected_nodes = [node]
6760 if not selected_nodes and \
6761 (accept_root_node or ignore_priority is None):
6762 # settle for a root node
6763 selected_nodes = [nodes[0]]
6765 if not selected_nodes:
6766 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6768 """Recursively gather a group of nodes that RDEPEND on
6769 eachother. This ensures that they are merged as a group
6770 and get their RDEPENDs satisfied as soon as possible."""
6771 def gather_deps(ignore_priority,
6772 mergeable_nodes, selected_nodes, node):
6773 if node in selected_nodes:
6775 if node not in mergeable_nodes:
6777 if node == replacement_portage and \
6778 mygraph.child_nodes(node,
6779 ignore_priority=DepPriority.MEDIUM_SOFT):
6780 # Make sure that portage always has all of it's
6781 # RDEPENDs installed first.
6783 selected_nodes.add(node)
6784 for child in mygraph.child_nodes(node,
6785 ignore_priority=ignore_priority):
6786 if not gather_deps(ignore_priority,
6787 mergeable_nodes, selected_nodes, child):
6790 mergeable_nodes = set(nodes)
6791 if prefer_asap and asap_nodes:
6793 for ignore_priority in xrange(DepPriority.SOFT,
6794 DepPriority.MEDIUM_SOFT + 1):
6796 if nodes is not asap_nodes and \
6797 not accept_root_node and \
6798 not mygraph.parent_nodes(node):
6800 selected_nodes = set()
6801 if gather_deps(ignore_priority,
6802 mergeable_nodes, selected_nodes, node):
6805 selected_nodes = None
6809 # If any nodes have been selected here, it's always
6810 # possible that anything up to a MEDIUM_SOFT priority
6811 # relationship has been ignored. This state is recorded
6812 # in ignore_priority so that relevant nodes will be
6813 # added to asap_nodes when appropriate.
6815 ignore_priority = DepPriority.MEDIUM_SOFT
6817 if prefer_asap and asap_nodes and not selected_nodes:
6818 # We failed to find any asap nodes to merge, so ignore
6819 # them for the next iteration.
6823 if not selected_nodes and not accept_root_node:
6824 # Maybe there are only root nodes left, so accept them
6825 # for the next iteration.
6826 accept_root_node = True
6829 if selected_nodes and ignore_priority > DepPriority.SOFT:
6830 # Try to merge ignored medium deps as soon as possible.
6831 for node in selected_nodes:
6832 children = set(mygraph.child_nodes(node))
6833 soft = children.difference(
6834 mygraph.child_nodes(node,
6835 ignore_priority=DepPriority.SOFT))
6836 medium_soft = children.difference(
6837 mygraph.child_nodes(node,
6838 ignore_priority=DepPriority.MEDIUM_SOFT))
6839 medium_soft.difference_update(soft)
6840 for child in medium_soft:
6841 if child in selected_nodes:
6843 if child in asap_nodes:
6845 asap_nodes.append(child)
6847 if selected_nodes and len(selected_nodes) > 1:
6848 if not isinstance(selected_nodes, list):
6849 selected_nodes = list(selected_nodes)
6850 selected_nodes.sort(cmp_circular_bias)
6852 if not selected_nodes and not myblocker_uninstalls.is_empty():
6853 # An Uninstall task needs to be executed in order to
6854 # avoid conflict if possible.
6855 min_parent_deps = None
6857 for task in myblocker_uninstalls.leaf_nodes():
6858 # Do some sanity checks so that system or world packages
6859 # don't get uninstalled inappropriately here (only really
6860 # necessary when --complete-graph has not been enabled).
6862 if task in ignored_uninstall_tasks:
6865 if task in scheduled_uninstalls:
6866 # It's been scheduled but it hasn't
6867 # been executed yet due to dependence
6868 # on installation of blocking packages.
6871 root_config = self.roots[task.root]
6872 inst_pkg = self._pkg_cache[
6873 ("installed", task.root, task.cpv, "nomerge")]
6875 if self.digraph.contains(inst_pkg):
6878 forbid_overlap = False
6879 heuristic_overlap = False
6880 for blocker in myblocker_uninstalls.parent_nodes(task):
6881 if blocker.eapi in ("0", "1"):
6882 heuristic_overlap = True
6883 elif blocker.atom.blocker.overlap.forbid:
6884 forbid_overlap = True
6886 if forbid_overlap and running_root == task.root:
6889 if heuristic_overlap and running_root == task.root:
6890 # Never uninstall sys-apps/portage or it's essential
6891 # dependencies, except through replacement.
6893 runtime_dep_atoms = \
6894 list(runtime_deps.iterAtomsForPackage(task))
6895 except portage.exception.InvalidDependString, e:
6896 portage.writemsg("!!! Invalid PROVIDE in " + \
6897 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6898 (task.root, task.cpv, e), noiselevel=-1)
6902 # Don't uninstall a runtime dep if it appears
6903 # to be the only suitable one installed.
6905 vardb = root_config.trees["vartree"].dbapi
6906 for atom in runtime_dep_atoms:
6907 other_version = None
6908 for pkg in vardb.match_pkgs(atom):
6909 if pkg.cpv == task.cpv and \
6910 pkg.metadata["COUNTER"] == \
6911 task.metadata["COUNTER"]:
6915 if other_version is None:
6921 # For packages in the system set, don't take
6922 # any chances. If the conflict can't be resolved
6923 # by a normal replacement operation then abort.
6926 for atom in root_config.sets[
6927 "system"].iterAtomsForPackage(task):
6930 except portage.exception.InvalidDependString, e:
6931 portage.writemsg("!!! Invalid PROVIDE in " + \
6932 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6933 (task.root, task.cpv, e), noiselevel=-1)
6939 # Note that the world check isn't always
6940 # necessary since self._complete_graph() will
6941 # add all packages from the system and world sets to the
6942 # graph. This just allows unresolved conflicts to be
6943 # detected as early as possible, which makes it possible
6944 # to avoid calling self._complete_graph() when it is
6945 # unnecessary due to blockers triggering an abortion.
6947 # For packages in the world set, go ahead an uninstall
6948 # when necessary, as long as the atom will be satisfied
6949 # in the final state.
6950 graph_db = self.mydbapi[task.root]
6953 for atom in root_config.sets[
6954 "world"].iterAtomsForPackage(task):
6956 for pkg in graph_db.match_pkgs(atom):
6964 except portage.exception.InvalidDependString, e:
6965 portage.writemsg("!!! Invalid PROVIDE in " + \
6966 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6967 (task.root, task.cpv, e), noiselevel=-1)
6973 # Check the deps of parent nodes to ensure that
6974 # the chosen task produces a leaf node. Maybe
6975 # this can be optimized some more to make the
6976 # best possible choice, but the current algorithm
6977 # is simple and should be near optimal for most
6980 for parent in mygraph.parent_nodes(task):
6981 parent_deps.update(mygraph.child_nodes(parent,
6982 ignore_priority=DepPriority.MEDIUM_SOFT))
6983 parent_deps.remove(task)
6984 if min_parent_deps is None or \
6985 len(parent_deps) < min_parent_deps:
6986 min_parent_deps = len(parent_deps)
6989 if uninst_task is not None:
6990 # The uninstall is performed only after blocking
6991 # packages have been merged on top of it. File
6992 # collisions between blocking packages are detected
6993 # and removed from the list of files to be uninstalled.
6994 scheduled_uninstalls.add(uninst_task)
6995 parent_nodes = mygraph.parent_nodes(uninst_task)
6997 # Reverse the parent -> uninstall edges since we want
6998 # to do the uninstall after blocking packages have
6999 # been merged on top of it.
7000 mygraph.remove(uninst_task)
7001 for blocked_pkg in parent_nodes:
7002 mygraph.add(blocked_pkg, uninst_task,
7003 priority=BlockerDepPriority.instance)
7004 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7005 scheduler_graph.add(blocked_pkg, uninst_task,
7006 priority=BlockerDepPriority.instance)
7009 # None of the Uninstall tasks are acceptable, so
7010 # the corresponding blockers are unresolvable.
7011 # We need to drop an Uninstall task here in order
7012 # to avoid the circular deps code path, but the
7013 # blocker will still be counted as an unresolved
7015 for node in myblocker_uninstalls.leaf_nodes():
7017 mygraph.remove(node)
7021 ignored_uninstall_tasks.add(node)
7024 # After dropping an Uninstall task, reset
7025 # the state variables for leaf node selection and
7026 # continue trying to select leaf nodes.
7028 accept_root_node = False
7031 if not selected_nodes:
7032 self._circular_deps_for_display = mygraph
7033 raise self._unknown_internal_error()
7035 # At this point, we've succeeded in selecting one or more nodes, so
7036 # it's now safe to reset the prefer_asap and accept_root_node flags
7037 # to their default states.
7039 accept_root_node = False
7041 mygraph.difference_update(selected_nodes)
7043 for node in selected_nodes:
7044 if isinstance(node, Package) and \
7045 node.operation == "nomerge":
7048 # Handle interactions between blockers
7049 # and uninstallation tasks.
7050 solved_blockers = set()
7052 if isinstance(node, Package) and \
7053 "uninstall" == node.operation:
7054 have_uninstall_task = True
7057 vardb = self.trees[node.root]["vartree"].dbapi
7058 previous_cpv = vardb.match(node.slot_atom)
7060 # The package will be replaced by this one, so remove
7061 # the corresponding Uninstall task if necessary.
7062 previous_cpv = previous_cpv[0]
7064 ("installed", node.root, previous_cpv, "uninstall")
7066 mygraph.remove(uninst_task)
7070 if uninst_task is not None and \
7071 uninst_task not in ignored_uninstall_tasks and \
7072 myblocker_uninstalls.contains(uninst_task):
7073 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7074 myblocker_uninstalls.remove(uninst_task)
7075 # Discard any blockers that this Uninstall solves.
7076 for blocker in blocker_nodes:
7077 if not myblocker_uninstalls.child_nodes(blocker):
7078 myblocker_uninstalls.remove(blocker)
7079 solved_blockers.add(blocker)
7081 retlist.append(node)
7083 if (isinstance(node, Package) and \
7084 "uninstall" == node.operation) or \
7085 (uninst_task is not None and \
7086 uninst_task in scheduled_uninstalls):
7087 # Include satisfied blockers in the merge list
7088 # since the user might be interested and also
7089 # it serves as an indicator that blocking packages
7090 # will be temporarily installed simultaneously.
7091 for blocker in solved_blockers:
7092 retlist.append(Blocker(atom=blocker.atom,
7093 root=blocker.root, eapi=blocker.eapi,
7096 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7097 for node in myblocker_uninstalls.root_nodes():
7098 unsolvable_blockers.add(node)
7100 for blocker in unsolvable_blockers:
7101 retlist.append(blocker)
7103 # If any Uninstall tasks need to be executed in order
7104 # to avoid a conflict, complete the graph with any
7105 # dependencies that may have been initially
7106 # neglected (to ensure that unsafe Uninstall tasks
7107 # are properly identified and blocked from execution).
7108 if have_uninstall_task and \
7110 not unsolvable_blockers:
7111 self.myparams.add("complete")
7112 raise self._serialize_tasks_retry("")
7114 if unsolvable_blockers and \
7115 not self._accept_blocker_conflicts():
7116 self._unsatisfied_blockers_for_display = unsolvable_blockers
7117 self._serialized_tasks_cache = retlist[:]
7118 self._scheduler_graph = scheduler_graph
7119 raise self._unknown_internal_error()
7121 if self._slot_collision_info and \
7122 not self._accept_blocker_conflicts():
7123 self._serialized_tasks_cache = retlist[:]
7124 self._scheduler_graph = scheduler_graph
7125 raise self._unknown_internal_error()
7127 return retlist, scheduler_graph
7129 def _show_circular_deps(self, mygraph):
7130 # No leaf nodes are available, so we have a circular
7131 # dependency panic situation. Reduce the noise level to a
7132 # minimum via repeated elimination of root nodes since they
7133 # have no parents and thus can not be part of a cycle.
7135 root_nodes = mygraph.root_nodes(
7136 ignore_priority=DepPriority.MEDIUM_SOFT)
7139 mygraph.difference_update(root_nodes)
7140 # Display the USE flags that are enabled on nodes that are part
7141 # of dependency cycles in case that helps the user decide to
7142 # disable some of them.
7144 tempgraph = mygraph.copy()
7145 while not tempgraph.empty():
7146 nodes = tempgraph.leaf_nodes()
7148 node = tempgraph.order[0]
7151 display_order.append(node)
7152 tempgraph.remove(node)
7153 display_order.reverse()
7154 self.myopts.pop("--quiet", None)
7155 self.myopts.pop("--verbose", None)
7156 self.myopts["--tree"] = True
7157 portage.writemsg("\n\n", noiselevel=-1)
7158 self.display(display_order)
7159 prefix = colorize("BAD", " * ")
7160 portage.writemsg("\n", noiselevel=-1)
7161 portage.writemsg(prefix + "Error: circular dependencies:\n",
7163 portage.writemsg("\n", noiselevel=-1)
7164 mygraph.debug_print()
7165 portage.writemsg("\n", noiselevel=-1)
7166 portage.writemsg(prefix + "Note that circular dependencies " + \
7167 "can often be avoided by temporarily\n", noiselevel=-1)
7168 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7169 "optional dependencies.\n", noiselevel=-1)
7171 def _show_merge_list(self):
7172 if self._serialized_tasks_cache is not None and \
7173 not (self._displayed_list and \
7174 (self._displayed_list == self._serialized_tasks_cache or \
7175 self._displayed_list == \
7176 list(reversed(self._serialized_tasks_cache)))):
7177 display_list = self._serialized_tasks_cache[:]
7178 if "--tree" in self.myopts:
7179 display_list.reverse()
7180 self.display(display_list)
7182 def _show_unsatisfied_blockers(self, blockers):
7183 self._show_merge_list()
7184 msg = "Error: The above package list contains " + \
7185 "packages which cannot be installed " + \
7186 "at the same time on the same system."
7187 prefix = colorize("BAD", " * ")
7188 from textwrap import wrap
7189 portage.writemsg("\n", noiselevel=-1)
7190 for line in wrap(msg, 70):
7191 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7192 if "--quiet" not in self.myopts:
7193 show_blocker_docs_link()
7195 def display(self, mylist, favorites=[], verbosity=None):
7197 # This is used to prevent display_problems() from
7198 # redundantly displaying this exact same merge list
7199 # again via _show_merge_list().
7200 self._displayed_list = mylist
7202 if verbosity is None:
7203 verbosity = ("--quiet" in self.myopts and 1 or \
7204 "--verbose" in self.myopts and 3 or 2)
7205 favorites_set = InternalPackageSet(favorites)
7206 oneshot = "--oneshot" in self.myopts or \
7207 "--onlydeps" in self.myopts
7208 columns = "--columns" in self.myopts
7213 counters = PackageCounters()
7215 if verbosity == 1 and "--verbose" not in self.myopts:
7216 def create_use_string(*args):
7219 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7221 is_new, reinst_flags,
7222 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7223 alphabetical=("--alphabetical" in self.myopts)):
7231 cur_iuse = set(cur_iuse)
7232 enabled_flags = cur_iuse.intersection(cur_use)
7233 removed_iuse = set(old_iuse).difference(cur_iuse)
7234 any_iuse = cur_iuse.union(old_iuse)
7235 any_iuse = list(any_iuse)
7237 for flag in any_iuse:
7240 reinst_flag = reinst_flags and flag in reinst_flags
7241 if flag in enabled_flags:
7243 if is_new or flag in old_use and \
7244 (all_flags or reinst_flag):
7245 flag_str = red(flag)
7246 elif flag not in old_iuse:
7247 flag_str = yellow(flag) + "%*"
7248 elif flag not in old_use:
7249 flag_str = green(flag) + "*"
7250 elif flag in removed_iuse:
7251 if all_flags or reinst_flag:
7252 flag_str = yellow("-" + flag) + "%"
7255 flag_str = "(" + flag_str + ")"
7256 removed.append(flag_str)
7259 if is_new or flag in old_iuse and \
7260 flag not in old_use and \
7261 (all_flags or reinst_flag):
7262 flag_str = blue("-" + flag)
7263 elif flag not in old_iuse:
7264 flag_str = yellow("-" + flag)
7265 if flag not in iuse_forced:
7267 elif flag in old_use:
7268 flag_str = green("-" + flag) + "*"
7270 if flag in iuse_forced:
7271 flag_str = "(" + flag_str + ")"
7273 enabled.append(flag_str)
7275 disabled.append(flag_str)
7278 ret = " ".join(enabled)
7280 ret = " ".join(enabled + disabled + removed)
7282 ret = '%s="%s" ' % (name, ret)
7285 repo_display = RepoDisplay(self.roots)
7289 mygraph = self.digraph.copy()
7291 # If there are any Uninstall instances, add the corresponding
7292 # blockers to the digraph (useful for --tree display).
7294 executed_uninstalls = set(node for node in mylist \
7295 if isinstance(node, Package) and node.operation == "unmerge")
7297 for uninstall in self._blocker_uninstalls.leaf_nodes():
7298 uninstall_parents = \
7299 self._blocker_uninstalls.parent_nodes(uninstall)
7300 if not uninstall_parents:
7303 # Remove the corresponding "nomerge" node and substitute
7304 # the Uninstall node.
7305 inst_pkg = self._pkg_cache[
7306 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7308 mygraph.remove(inst_pkg)
7313 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7315 inst_pkg_blockers = []
7317 # Break the Package -> Uninstall edges.
7318 mygraph.remove(uninstall)
7320 # Resolution of a package's blockers
7321 # depend on it's own uninstallation.
7322 for blocker in inst_pkg_blockers:
7323 mygraph.add(uninstall, blocker)
7325 # Expand Package -> Uninstall edges into
7326 # Package -> Blocker -> Uninstall edges.
7327 for blocker in uninstall_parents:
7328 mygraph.add(uninstall, blocker)
7329 for parent in self._blocker_parents.parent_nodes(blocker):
7330 if parent != inst_pkg:
7331 mygraph.add(blocker, parent)
7333 # If the uninstall task did not need to be executed because
7334 # of an upgrade, display Blocker -> Upgrade edges since the
7335 # corresponding Blocker -> Uninstall edges will not be shown.
7337 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7338 if upgrade_node is not None and \
7339 uninstall not in executed_uninstalls:
7340 for blocker in uninstall_parents:
7341 mygraph.add(upgrade_node, blocker)
7343 unsatisfied_blockers = []
7348 if isinstance(x, Blocker) and not x.satisfied:
7349 unsatisfied_blockers.append(x)
7352 if "--tree" in self.myopts:
7353 depth = len(tree_nodes)
7354 while depth and graph_key not in \
7355 mygraph.child_nodes(tree_nodes[depth-1]):
7358 tree_nodes = tree_nodes[:depth]
7359 tree_nodes.append(graph_key)
7360 display_list.append((x, depth, True))
7361 shown_edges.add((graph_key, tree_nodes[depth-1]))
7363 traversed_nodes = set() # prevent endless circles
7364 traversed_nodes.add(graph_key)
7365 def add_parents(current_node, ordered):
7367 # Do not traverse to parents if this node is an
7368 # an argument or a direct member of a set that has
7369 # been specified as an argument (system or world).
7370 if current_node not in self._set_nodes:
7371 parent_nodes = mygraph.parent_nodes(current_node)
7373 child_nodes = set(mygraph.child_nodes(current_node))
7374 selected_parent = None
7375 # First, try to avoid a direct cycle.
7376 for node in parent_nodes:
7377 if not isinstance(node, (Blocker, Package)):
7379 if node not in traversed_nodes and \
7380 node not in child_nodes:
7381 edge = (current_node, node)
7382 if edge in shown_edges:
7384 selected_parent = node
7386 if not selected_parent:
7387 # A direct cycle is unavoidable.
7388 for node in parent_nodes:
7389 if not isinstance(node, (Blocker, Package)):
7391 if node not in traversed_nodes:
7392 edge = (current_node, node)
7393 if edge in shown_edges:
7395 selected_parent = node
7398 shown_edges.add((current_node, selected_parent))
7399 traversed_nodes.add(selected_parent)
7400 add_parents(selected_parent, False)
7401 display_list.append((current_node,
7402 len(tree_nodes), ordered))
7403 tree_nodes.append(current_node)
7405 add_parents(graph_key, True)
7407 display_list.append((x, depth, True))
7408 mylist = display_list
7409 for x in unsatisfied_blockers:
7410 mylist.append((x, 0, True))
7412 last_merge_depth = 0
7413 for i in xrange(len(mylist)-1,-1,-1):
7414 graph_key, depth, ordered = mylist[i]
7415 if not ordered and depth == 0 and i > 0 \
7416 and graph_key == mylist[i-1][0] and \
7417 mylist[i-1][1] == 0:
7418 # An ordered node got a consecutive duplicate when the tree was
7422 if ordered and graph_key[-1] != "nomerge":
7423 last_merge_depth = depth
7425 if depth >= last_merge_depth or \
7426 i < len(mylist) - 1 and \
7427 depth >= mylist[i+1][1]:
7430 from portage import flatten
7431 from portage.dep import use_reduce, paren_reduce
7432 # files to fetch list - avoids counting a same file twice
7433 # in size display (verbose mode)
7436 # Use this set to detect when all the "repoadd" strings are "[0]"
7437 # and disable the entire repo display in this case.
7440 for mylist_index in xrange(len(mylist)):
7441 x, depth, ordered = mylist[mylist_index]
7445 portdb = self.trees[myroot]["porttree"].dbapi
7446 bindb = self.trees[myroot]["bintree"].dbapi
7447 vardb = self.trees[myroot]["vartree"].dbapi
7448 vartree = self.trees[myroot]["vartree"]
7449 pkgsettings = self.pkgsettings[myroot]
7452 indent = " " * depth
7454 if isinstance(x, Blocker):
7456 blocker_style = "PKG_BLOCKER_SATISFIED"
7457 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7459 blocker_style = "PKG_BLOCKER"
7460 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7462 counters.blocks += 1
7464 counters.blocks_satisfied += 1
7465 resolved = portage.key_expand(
7466 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7467 if "--columns" in self.myopts and "--quiet" in self.myopts:
7468 addl += " " + colorize(blocker_style, resolved)
7470 addl = "[%s %s] %s%s" % \
7471 (colorize(blocker_style, "blocks"),
7472 addl, indent, colorize(blocker_style, resolved))
7473 block_parents = self._blocker_parents.parent_nodes(x)
7474 block_parents = set([pnode[2] for pnode in block_parents])
7475 block_parents = ", ".join(block_parents)
7477 addl += colorize(blocker_style,
7478 " (\"%s\" is blocking %s)") % \
7479 (str(x.atom).lstrip("!"), block_parents)
7481 addl += colorize(blocker_style,
7482 " (is blocking %s)") % block_parents
7483 if isinstance(x, Blocker) and x.satisfied:
7488 blockers.append(addl)
7491 pkg_merge = ordered and pkg_status == "merge"
7492 if not pkg_merge and pkg_status == "merge":
7493 pkg_status = "nomerge"
7494 built = pkg_type != "ebuild"
7495 installed = pkg_type == "installed"
7497 metadata = pkg.metadata
7499 repo_name = metadata["repository"]
7500 if pkg_type == "ebuild":
7501 ebuild_path = portdb.findname(pkg_key)
7502 if not ebuild_path: # shouldn't happen
7503 raise portage.exception.PackageNotFound(pkg_key)
7504 repo_path_real = os.path.dirname(os.path.dirname(
7505 os.path.dirname(ebuild_path)))
7507 repo_path_real = portdb.getRepositoryPath(repo_name)
7508 pkg_use = list(pkg.use.enabled)
7510 restrict = flatten(use_reduce(paren_reduce(
7511 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7512 except portage.exception.InvalidDependString, e:
7513 if not pkg.installed:
7514 show_invalid_depstring_notice(x,
7515 pkg.metadata["RESTRICT"], str(e))
7519 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7520 "fetch" in restrict:
7523 counters.restrict_fetch += 1
7524 if portdb.fetch_check(pkg_key, pkg_use):
7527 counters.restrict_fetch_satisfied += 1
7529 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7530 #param is used for -u, where you still *do* want to see when something is being upgraded.
7533 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7534 if vardb.cpv_exists(pkg_key):
7535 addl=" "+yellow("R")+fetch+" "
7538 counters.reinst += 1
7539 elif pkg_status == "uninstall":
7540 counters.uninst += 1
7541 # filter out old-style virtual matches
7542 elif installed_versions and \
7543 portage.cpv_getkey(installed_versions[0]) == \
7544 portage.cpv_getkey(pkg_key):
7545 myinslotlist = vardb.match(pkg.slot_atom)
7546 # If this is the first install of a new-style virtual, we
7547 # need to filter out old-style virtual matches.
7548 if myinslotlist and \
7549 portage.cpv_getkey(myinslotlist[0]) != \
7550 portage.cpv_getkey(pkg_key):
7553 myoldbest = myinslotlist[:]
7555 if not portage.dep.cpvequal(pkg_key,
7556 portage.best([pkg_key] + myoldbest)):
7558 addl += turquoise("U")+blue("D")
7560 counters.downgrades += 1
7563 addl += turquoise("U") + " "
7565 counters.upgrades += 1
7567 # New slot, mark it new.
7568 addl = " " + green("NS") + fetch + " "
7569 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7571 counters.newslot += 1
7573 if "--changelog" in self.myopts:
7574 inst_matches = vardb.match(pkg.slot_atom)
7576 changelogs.extend(self.calc_changelog(
7577 portdb.findname(pkg_key),
7578 inst_matches[0], pkg_key))
7580 addl = " " + green("N") + " " + fetch + " "
7589 forced_flags = set()
7590 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7591 forced_flags.update(pkgsettings.useforce)
7592 forced_flags.update(pkgsettings.usemask)
7594 cur_use = [flag for flag in pkg.use.enabled \
7595 if flag in pkg.iuse.all]
7596 cur_iuse = sorted(pkg.iuse.all)
7598 if myoldbest and myinslotlist:
7599 previous_cpv = myoldbest[0]
7601 previous_cpv = pkg.cpv
7602 if vardb.cpv_exists(previous_cpv):
7603 old_iuse, old_use = vardb.aux_get(
7604 previous_cpv, ["IUSE", "USE"])
7605 old_iuse = list(set(
7606 filter_iuse_defaults(old_iuse.split())))
7608 old_use = old_use.split()
7615 old_use = [flag for flag in old_use if flag in old_iuse]
7617 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7619 use_expand.reverse()
7620 use_expand_hidden = \
7621 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7623 def map_to_use_expand(myvals, forcedFlags=False,
7627 for exp in use_expand:
7630 for val in myvals[:]:
7631 if val.startswith(exp.lower()+"_"):
7632 if val in forced_flags:
7633 forced[exp].add(val[len(exp)+1:])
7634 ret[exp].append(val[len(exp)+1:])
7637 forced["USE"] = [val for val in myvals \
7638 if val in forced_flags]
7640 for exp in use_expand_hidden:
7646 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7647 # are the only thing that triggered reinstallation.
7648 reinst_flags_map = {}
7649 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7650 reinst_expand_map = None
7651 if reinstall_for_flags:
7652 reinst_flags_map = map_to_use_expand(
7653 list(reinstall_for_flags), removeHidden=False)
7654 for k in list(reinst_flags_map):
7655 if not reinst_flags_map[k]:
7656 del reinst_flags_map[k]
7657 if not reinst_flags_map.get("USE"):
7658 reinst_expand_map = reinst_flags_map.copy()
7659 reinst_expand_map.pop("USE", None)
7660 if reinst_expand_map and \
7661 not set(reinst_expand_map).difference(
7663 use_expand_hidden = \
7664 set(use_expand_hidden).difference(
7667 cur_iuse_map, iuse_forced = \
7668 map_to_use_expand(cur_iuse, forcedFlags=True)
7669 cur_use_map = map_to_use_expand(cur_use)
7670 old_iuse_map = map_to_use_expand(old_iuse)
7671 old_use_map = map_to_use_expand(old_use)
7674 use_expand.insert(0, "USE")
7676 for key in use_expand:
7677 if key in use_expand_hidden:
7679 verboseadd += create_use_string(key.upper(),
7680 cur_iuse_map[key], iuse_forced[key],
7681 cur_use_map[key], old_iuse_map[key],
7682 old_use_map[key], is_new,
7683 reinst_flags_map.get(key))
7688 if pkg_type == "ebuild" and pkg_merge:
7690 myfilesdict = portdb.getfetchsizes(pkg_key,
7691 useflags=pkg_use, debug=self.edebug)
7692 except portage.exception.InvalidDependString, e:
7693 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7694 show_invalid_depstring_notice(x, src_uri, str(e))
7697 if myfilesdict is None:
7698 myfilesdict="[empty/missing/bad digest]"
7700 for myfetchfile in myfilesdict:
7701 if myfetchfile not in myfetchlist:
7702 mysize+=myfilesdict[myfetchfile]
7703 myfetchlist.append(myfetchfile)
7705 counters.totalsize += mysize
7706 verboseadd += format_size(mysize)
7709 # assign index for a previous version in the same slot
7710 has_previous = False
7711 repo_name_prev = None
7712 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7714 slot_matches = vardb.match(slot_atom)
7717 repo_name_prev = vardb.aux_get(slot_matches[0],
7720 # now use the data to generate output
7721 if pkg.installed or not has_previous:
7722 repoadd = repo_display.repoStr(repo_path_real)
7724 repo_path_prev = None
7726 repo_path_prev = portdb.getRepositoryPath(
7728 if repo_path_prev == repo_path_real:
7729 repoadd = repo_display.repoStr(repo_path_real)
7731 repoadd = "%s=>%s" % (
7732 repo_display.repoStr(repo_path_prev),
7733 repo_display.repoStr(repo_path_real))
7735 repoadd_set.add(repoadd)
7737 xs = [portage.cpv_getkey(pkg_key)] + \
7738 list(portage.catpkgsplit(pkg_key)[2:])
7745 if "COLUMNWIDTH" in self.settings:
7747 mywidth = int(self.settings["COLUMNWIDTH"])
7748 except ValueError, e:
7749 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7751 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7752 self.settings["COLUMNWIDTH"], noiselevel=-1)
7754 oldlp = mywidth - 30
7757 # Convert myoldbest from a list to a string.
7761 for pos, key in enumerate(myoldbest):
7762 key = portage.catpkgsplit(key)[2] + \
7763 "-" + portage.catpkgsplit(key)[3]
7764 if key[-3:] == "-r0":
7766 myoldbest[pos] = key
7767 myoldbest = blue("["+", ".join(myoldbest)+"]")
7770 root_config = self.roots[myroot]
7771 system_set = root_config.sets["system"]
7772 world_set = root_config.sets["world"]
7777 pkg_system = system_set.findAtomForPackage(pkg)
7778 pkg_world = world_set.findAtomForPackage(pkg)
7779 if not (oneshot or pkg_world) and \
7780 myroot == self.target_root and \
7781 favorites_set.findAtomForPackage(pkg):
7782 # Maybe it will be added to world now.
7783 if create_world_atom(pkg, favorites_set, root_config):
7785 except portage.exception.InvalidDependString:
7786 # This is reported elsewhere if relevant.
7789 def pkgprint(pkg_str):
7792 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7794 return colorize("PKG_MERGE_WORLD", pkg_str)
7796 return colorize("PKG_MERGE", pkg_str)
7797 elif pkg_status == "uninstall":
7798 return colorize("PKG_UNINSTALL", pkg_str)
7801 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7803 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7805 return colorize("PKG_NOMERGE", pkg_str)
7808 properties = flatten(use_reduce(paren_reduce(
7809 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7810 except portage.exception.InvalidDependString, e:
7811 if not pkg.installed:
7812 show_invalid_depstring_notice(pkg,
7813 pkg.metadata["PROPERTIES"], str(e))
7817 interactive = "interactive" in properties
7818 if interactive and pkg.operation == "merge":
7819 addl = colorize("WARN", "I") + addl[1:]
7821 counters.interactive += 1
7826 if "--columns" in self.myopts:
7827 if "--quiet" in self.myopts:
7828 myprint=addl+" "+indent+pkgprint(pkg_cp)
7829 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7830 myprint=myprint+myoldbest
7831 myprint=myprint+darkgreen("to "+x[1])
7835 myprint = "[%s] %s%s" % \
7836 (pkgprint(pkg_status.ljust(13)),
7837 indent, pkgprint(pkg.cp))
7839 myprint = "[%s %s] %s%s" % \
7840 (pkgprint(pkg.type_name), addl,
7841 indent, pkgprint(pkg.cp))
7842 if (newlp-nc_len(myprint)) > 0:
7843 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7844 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7845 if (oldlp-nc_len(myprint)) > 0:
7846 myprint=myprint+" "*(oldlp-nc_len(myprint))
7847 myprint=myprint+myoldbest
7848 myprint += darkgreen("to " + pkg.root)
7851 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7853 myprint = "[" + pkg_type + " " + addl + "] "
7854 myprint += indent + pkgprint(pkg_key) + " " + \
7855 myoldbest + darkgreen("to " + myroot)
7857 if "--columns" in self.myopts:
7858 if "--quiet" in self.myopts:
7859 myprint=addl+" "+indent+pkgprint(pkg_cp)
7860 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7861 myprint=myprint+myoldbest
7865 myprint = "[%s] %s%s" % \
7866 (pkgprint(pkg_status.ljust(13)),
7867 indent, pkgprint(pkg.cp))
7869 myprint = "[%s %s] %s%s" % \
7870 (pkgprint(pkg.type_name), addl,
7871 indent, pkgprint(pkg.cp))
7872 if (newlp-nc_len(myprint)) > 0:
7873 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7874 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7875 if (oldlp-nc_len(myprint)) > 0:
7876 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7877 myprint += myoldbest
7880 myprint = "[%s] %s%s %s" % \
7881 (pkgprint(pkg_status.ljust(13)),
7882 indent, pkgprint(pkg.cpv),
7885 myprint = "[%s %s] %s%s %s" % \
7886 (pkgprint(pkg_type), addl, indent,
7887 pkgprint(pkg.cpv), myoldbest)
7889 if columns and pkg.operation == "uninstall":
7891 p.append((myprint, verboseadd, repoadd))
7893 if "--tree" not in self.myopts and \
7894 "--quiet" not in self.myopts and \
7895 not self._opts_no_restart.intersection(self.myopts) and \
7896 pkg.root == self._running_root.root and \
7897 portage.match_from_list(
7898 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7899 not vardb.cpv_exists(pkg.cpv) and \
7900 "--quiet" not in self.myopts:
7901 if mylist_index < len(mylist) - 1:
7902 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7903 p.append(colorize("WARN", " then resume the merge."))
7906 show_repos = repoadd_set and repoadd_set != set(["0"])
7909 if isinstance(x, basestring):
7910 out.write("%s\n" % (x,))
7913 myprint, verboseadd, repoadd = x
7916 myprint += " " + verboseadd
7918 if show_repos and repoadd:
7919 myprint += " " + teal("[%s]" % repoadd)
7921 out.write("%s\n" % (myprint,))
7930 sys.stdout.write(str(repo_display))
7932 if "--changelog" in self.myopts:
7934 for revision,text in changelogs:
7935 print bold('*'+revision)
7936 sys.stdout.write(text)
7941 def display_problems(self):
7943 Display problems with the dependency graph such as slot collisions.
7944 This is called internally by display() to show the problems _after_
7945 the merge list where it is most likely to be seen, but if display()
7946 is not going to be called then this method should be called explicitly
7947 to ensure that the user is notified of problems with the graph.
7949 All output goes to stderr, except for unsatisfied dependencies which
7950 go to stdout for parsing by programs such as autounmask.
7953 # Note that show_masked_packages() sends it's output to
7954 # stdout, and some programs such as autounmask parse the
7955 # output in cases when emerge bails out. However, when
7956 # show_masked_packages() is called for installed packages
7957 # here, the message is a warning that is more appropriate
7958 # to send to stderr, so temporarily redirect stdout to
7959 # stderr. TODO: Fix output code so there's a cleaner way
7960 # to redirect everything to stderr.
7965 sys.stdout = sys.stderr
7966 self._display_problems()
7972 # This goes to stdout for parsing by programs like autounmask.
7973 for pargs, kwargs in self._unsatisfied_deps_for_display:
7974 self._show_unsatisfied_dep(*pargs, **kwargs)
7976 def _display_problems(self):
7977 if self._circular_deps_for_display is not None:
7978 self._show_circular_deps(
7979 self._circular_deps_for_display)
7981 # The user is only notified of a slot conflict if
7982 # there are no unresolvable blocker conflicts.
7983 if self._unsatisfied_blockers_for_display is not None:
7984 self._show_unsatisfied_blockers(
7985 self._unsatisfied_blockers_for_display)
7987 self._show_slot_collision_notice()
7989 # TODO: Add generic support for "set problem" handlers so that
7990 # the below warnings aren't special cases for world only.
7992 if self._missing_args:
7993 world_problems = False
7994 if "world" in self._sets:
7995 # Filter out indirect members of world (from nested sets)
7996 # since only direct members of world are desired here.
7997 world_set = self.roots[self.target_root].sets["world"]
7998 for arg, atom in self._missing_args:
7999 if arg.name == "world" and atom in world_set:
8000 world_problems = True
8004 sys.stderr.write("\n!!! Problems have been " + \
8005 "detected with your world file\n")
8006 sys.stderr.write("!!! Please run " + \
8007 green("emaint --check world")+"\n\n")
8009 if self._missing_args:
8010 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8011 " Ebuilds for the following packages are either all\n")
8012 sys.stderr.write(colorize("BAD", "!!!") + \
8013 " masked or don't exist:\n")
8014 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8015 self._missing_args) + "\n")
8017 if self._pprovided_args:
8019 for arg, atom in self._pprovided_args:
8020 if isinstance(arg, SetArg):
8022 arg_atom = (atom, atom)
8025 arg_atom = (arg.arg, atom)
8026 refs = arg_refs.setdefault(arg_atom, [])
8027 if parent not in refs:
8030 msg.append(bad("\nWARNING: "))
8031 if len(self._pprovided_args) > 1:
8032 msg.append("Requested packages will not be " + \
8033 "merged because they are listed in\n")
8035 msg.append("A requested package will not be " + \
8036 "merged because it is listed in\n")
8037 msg.append("package.provided:\n\n")
8038 problems_sets = set()
8039 for (arg, atom), refs in arg_refs.iteritems():
8042 problems_sets.update(refs)
8044 ref_string = ", ".join(["'%s'" % name for name in refs])
8045 ref_string = " pulled in by " + ref_string
8046 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8048 if "world" in problems_sets:
8049 msg.append("This problem can be solved in one of the following ways:\n\n")
8050 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8051 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8052 msg.append(" C) Remove offending entries from package.provided.\n\n")
8053 msg.append("The best course of action depends on the reason that an offending\n")
8054 msg.append("package.provided entry exists.\n\n")
8055 sys.stderr.write("".join(msg))
8057 masked_packages = []
8058 for pkg in self._masked_installed:
8059 root_config = pkg.root_config
8060 pkgsettings = self.pkgsettings[pkg.root]
8061 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8062 masked_packages.append((root_config, pkgsettings,
8063 pkg.cpv, pkg.metadata, mreasons))
8065 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8066 " The following installed packages are masked:\n")
8067 show_masked_packages(masked_packages)
8071 def calc_changelog(self,ebuildpath,current,next):
8072 if ebuildpath == None or not os.path.exists(ebuildpath):
8074 current = '-'.join(portage.catpkgsplit(current)[1:])
8075 if current.endswith('-r0'):
8076 current = current[:-3]
8077 next = '-'.join(portage.catpkgsplit(next)[1:])
8078 if next.endswith('-r0'):
8080 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8082 changelog = open(changelogpath).read()
8083 except SystemExit, e:
8084 raise # Needed else can't exit
8087 divisions = self.find_changelog_tags(changelog)
8088 #print 'XX from',current,'to',next
8089 #for div,text in divisions: print 'XX',div
8090 # skip entries for all revisions above the one we are about to emerge
8091 for i in range(len(divisions)):
8092 if divisions[i][0]==next:
8093 divisions = divisions[i:]
8095 # find out how many entries we are going to display
8096 for i in range(len(divisions)):
8097 if divisions[i][0]==current:
8098 divisions = divisions[:i]
8101 # couldnt find the current revision in the list. display nothing
8105 def find_changelog_tags(self,changelog):
8109 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8111 if release is not None:
8112 divs.append((release,changelog))
8114 if release is not None:
8115 divs.append((release,changelog[:match.start()]))
8116 changelog = changelog[match.end():]
8117 release = match.group(1)
8118 if release.endswith('.ebuild'):
8119 release = release[:-7]
8120 if release.endswith('-r0'):
8121 release = release[:-3]
8123 def saveNomergeFavorites(self):
8124 """Find atoms in favorites that are not in the mergelist and add them
8125 to the world file if necessary."""
8126 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8127 "--oneshot", "--onlydeps", "--pretend"):
8128 if x in self.myopts:
8130 root_config = self.roots[self.target_root]
8131 world_set = root_config.sets["world"]
8133 world_locked = False
8134 if hasattr(world_set, "lock"):
8138 if hasattr(world_set, "load"):
8139 world_set.load() # maybe it's changed on disk
8141 args_set = self._sets["args"]
8142 portdb = self.trees[self.target_root]["porttree"].dbapi
8143 added_favorites = set()
8144 for x in self._set_nodes:
8145 pkg_type, root, pkg_key, pkg_status = x
8146 if pkg_status != "nomerge":
8150 myfavkey = create_world_atom(x, args_set, root_config)
8152 if myfavkey in added_favorites:
8154 added_favorites.add(myfavkey)
8155 except portage.exception.InvalidDependString, e:
8156 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8157 (pkg_key, str(e)), noiselevel=-1)
8158 writemsg("!!! see '%s'\n\n" % os.path.join(
8159 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8162 for k in self._sets:
8163 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8168 all_added.append(SETPREFIX + k)
8169 all_added.extend(added_favorites)
8172 print ">>> Recording %s in \"world\" favorites file..." % \
8173 colorize("INFORM", str(a))
8175 world_set.update(all_added)
8180 def loadResumeCommand(self, resume_data, skip_masked=False):
8182 Add a resume command to the graph and validate it in the process. This
8183 will raise a PackageNotFound exception if a package is not available.
8186 if not isinstance(resume_data, dict):
8189 mergelist = resume_data.get("mergelist")
8190 if not isinstance(mergelist, list):
8193 fakedb = self.mydbapi
8195 serialized_tasks = []
8198 if not (isinstance(x, list) and len(x) == 4):
8200 pkg_type, myroot, pkg_key, action = x
8201 if pkg_type not in self.pkg_tree_map:
8203 if action != "merge":
8205 tree_type = self.pkg_tree_map[pkg_type]
8206 mydb = trees[myroot][tree_type].dbapi
8207 db_keys = list(self._trees_orig[myroot][
8208 tree_type].dbapi._aux_cache_keys)
8210 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8212 # It does no exist or it is corrupt.
8213 if action == "uninstall":
8215 raise portage.exception.PackageNotFound(pkg_key)
8216 installed = action == "uninstall"
8217 built = pkg_type != "ebuild"
8218 root_config = self.roots[myroot]
8219 pkg = Package(built=built, cpv=pkg_key,
8220 installed=installed, metadata=metadata,
8221 operation=action, root_config=root_config,
8223 if pkg_type == "ebuild":
8224 pkgsettings = self.pkgsettings[myroot]
8225 pkgsettings.setcpv(pkg)
8226 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8227 self._pkg_cache[pkg] = pkg
8229 root_config = self.roots[pkg.root]
8230 if "merge" == pkg.operation and \
8231 not visible(root_config.settings, pkg):
8233 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8235 self._unsatisfied_deps_for_display.append(
8236 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8238 fakedb[myroot].cpv_inject(pkg)
8239 serialized_tasks.append(pkg)
8240 self.spinner.update()
8242 if self._unsatisfied_deps_for_display:
8245 if not serialized_tasks or "--nodeps" in self.myopts:
8246 self._serialized_tasks_cache = serialized_tasks
8247 self._scheduler_graph = self.digraph
8249 self._select_package = self._select_pkg_from_graph
8250 self.myparams.add("selective")
8252 favorites = resume_data.get("favorites")
8253 args_set = self._sets["args"]
8254 if isinstance(favorites, list):
8255 args = self._load_favorites(favorites)
8259 for task in serialized_tasks:
8260 if isinstance(task, Package) and \
8261 task.operation == "merge":
8262 if not self._add_pkg(task, None):
8265 # Packages for argument atoms need to be explicitly
8266 # added via _add_pkg() so that they are included in the
8267 # digraph (needed at least for --tree display).
8269 for atom in arg.set:
8270 pkg, existing_node = self._select_package(
8271 arg.root_config.root, atom)
8272 if existing_node is None and \
8274 if not self._add_pkg(pkg, Dependency(atom=atom,
8275 root=pkg.root, parent=arg)):
8278 # Allow unsatisfied deps here to avoid showing a masking
8279 # message for an unsatisfied dep that isn't necessarily
8281 if not self._create_graph(allow_unsatisfied=True):
8283 if masked_tasks or self._unsatisfied_deps:
8284 # This probably means that a required package
8285 # was dropped via --skipfirst. It makes the
8286 # resume list invalid, so convert it to a
8287 # UnsatisfiedResumeDep exception.
8288 raise self.UnsatisfiedResumeDep(self,
8289 masked_tasks + self._unsatisfied_deps)
8290 self._serialized_tasks_cache = None
8293 except self._unknown_internal_error:
8298 def _load_favorites(self, favorites):
8300 Use a list of favorites to resume state from a
8301 previous select_files() call. This creates similar
8302 DependencyArg instances to those that would have
8303 been created by the original select_files() call.
8304 This allows Package instances to be matched with
8305 DependencyArg instances during graph creation.
8307 root_config = self.roots[self.target_root]
8308 getSetAtoms = root_config.setconfig.getSetAtoms
8309 sets = root_config.sets
8312 if not isinstance(x, basestring):
8314 if x in ("system", "world"):
8316 if x.startswith(SETPREFIX):
8317 s = x[len(SETPREFIX):]
8322 # Recursively expand sets so that containment tests in
8323 # self._get_parent_sets() properly match atoms in nested
8324 # sets (like if world contains system).
8325 expanded_set = InternalPackageSet(
8326 initial_atoms=getSetAtoms(s))
8327 self._sets[s] = expanded_set
8328 args.append(SetArg(arg=x, set=expanded_set,
8329 root_config=root_config))
8331 if not portage.isvalidatom(x):
8333 args.append(AtomArg(arg=x, atom=x,
8334 root_config=root_config))
8336 # Create the "args" package set from atoms and
8337 # packages given as arguments.
8338 args_set = self._sets["args"]
8340 if not isinstance(arg, (AtomArg, PackageArg)):
8343 if myatom in args_set:
8345 args_set.add(myatom)
8346 self._set_atoms.update(chain(*self._sets.itervalues()))
8347 atom_arg_map = self._atom_arg_map
8349 for atom in arg.set:
8350 atom_key = (atom, arg.root_config.root)
8351 refs = atom_arg_map.get(atom_key)
8354 atom_arg_map[atom_key] = refs
8359 class UnsatisfiedResumeDep(portage.exception.PortageException):
8361 A dependency of a resume list is not installed. This
8362 can occur when a required package is dropped from the
8363 merge list via --skipfirst.
8365 def __init__(self, depgraph, value):
8366 portage.exception.PortageException.__init__(self, value)
8367 self.depgraph = depgraph
8369 class _internal_exception(portage.exception.PortageException):
8370 def __init__(self, value=""):
8371 portage.exception.PortageException.__init__(self, value)
8373 class _unknown_internal_error(_internal_exception):
8375 Used by the depgraph internally to terminate graph creation.
8376 The specific reason for the failure should have been dumped
8377 to stderr, unfortunately, the exact reason for the failure
8381 class _serialize_tasks_retry(_internal_exception):
8383 This is raised by the _serialize_tasks() method when it needs to
8384 be called again for some reason. The only case that it's currently
8385 used for is when neglected dependencies need to be added to the
8386 graph in order to avoid making a potentially unsafe decision.
8389 class _dep_check_composite_db(portage.dbapi):
8391 A dbapi-like interface that is optimized for use in dep_check() calls.
8392 This is built on top of the existing depgraph package selection logic.
8393 Some packages that have been added to the graph may be masked from this
8394 view in order to influence the atom preference selection that occurs
8397 def __init__(self, depgraph, root):
8398 portage.dbapi.__init__(self)
8399 self._depgraph = depgraph
8401 self._match_cache = {}
8402 self._cpv_pkg_map = {}
8404 def match(self, atom):
8405 ret = self._match_cache.get(atom)
8410 atom = self._dep_expand(atom)
8411 pkg, existing = self._depgraph._select_package(self._root, atom)
8415 # Return the highest available from select_package() as well as
8416 # any matching slots in the graph db.
8418 slots.add(pkg.metadata["SLOT"])
8419 atom_cp = portage.dep_getkey(atom)
8420 if pkg.cp.startswith("virtual/"):
8421 # For new-style virtual lookahead that occurs inside
8422 # dep_check(), examine all slots. This is needed
8423 # so that newer slots will not unnecessarily be pulled in
8424 # when a satisfying lower slot is already installed. For
8425 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8426 # there's no need to pull in a newer slot to satisfy a
8427 # virtual/jdk dependency.
8428 for db, pkg_type, built, installed, db_keys in \
8429 self._depgraph._filtered_trees[self._root]["dbs"]:
8430 for cpv in db.match(atom):
8431 if portage.cpv_getkey(cpv) != pkg.cp:
8433 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8435 if self._visible(pkg):
8436 self._cpv_pkg_map[pkg.cpv] = pkg
8438 slots.remove(pkg.metadata["SLOT"])
8440 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8441 pkg, existing = self._depgraph._select_package(
8442 self._root, slot_atom)
8445 if not self._visible(pkg):
8447 self._cpv_pkg_map[pkg.cpv] = pkg
8450 self._cpv_sort_ascending(ret)
8451 self._match_cache[orig_atom] = ret
8454 def _visible(self, pkg):
8455 if pkg.installed and "selective" not in self._depgraph.myparams:
8457 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8458 except (StopIteration, portage.exception.InvalidDependString):
8465 self._depgraph.pkgsettings[pkg.root], pkg):
8467 except portage.exception.InvalidDependString:
8471 def _dep_expand(self, atom):
8473 This is only needed for old installed packages that may
8474 contain atoms that are not fully qualified with a specific
8475 category. Emulate the cpv_expand() function that's used by
8476 dbapi.match() in cases like this. If there are multiple
8477 matches, it's often due to a new-style virtual that has
8478 been added, so try to filter those out to avoid raising
8481 root_config = self._depgraph.roots[self._root]
8483 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8484 if len(expanded_atoms) > 1:
8485 non_virtual_atoms = []
8486 for x in expanded_atoms:
8487 if not portage.dep_getkey(x).startswith("virtual/"):
8488 non_virtual_atoms.append(x)
8489 if len(non_virtual_atoms) == 1:
8490 expanded_atoms = non_virtual_atoms
8491 if len(expanded_atoms) > 1:
8492 # compatible with portage.cpv_expand()
8493 raise portage.exception.AmbiguousPackageName(
8494 [portage.dep_getkey(x) for x in expanded_atoms])
8496 atom = expanded_atoms[0]
8498 null_atom = insert_category_into_atom(atom, "null")
8499 null_cp = portage.dep_getkey(null_atom)
8500 cat, atom_pn = portage.catsplit(null_cp)
8501 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8503 # Allow the resolver to choose which virtual.
8504 atom = insert_category_into_atom(atom, "virtual")
8506 atom = insert_category_into_atom(atom, "null")
8509 def aux_get(self, cpv, wants):
8510 metadata = self._cpv_pkg_map[cpv].metadata
8511 return [metadata.get(x, "") for x in wants]
8513 class RepoDisplay(object):
8514 def __init__(self, roots):
8515 self._shown_repos = {}
8516 self._unknown_repo = False
8518 for root_config in roots.itervalues():
8519 portdir = root_config.settings.get("PORTDIR")
8521 repo_paths.add(portdir)
8522 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8524 repo_paths.update(overlays.split())
8525 repo_paths = list(repo_paths)
8526 self._repo_paths = repo_paths
8527 self._repo_paths_real = [ os.path.realpath(repo_path) \
8528 for repo_path in repo_paths ]
8530 # pre-allocate index for PORTDIR so that it always has index 0.
8531 for root_config in roots.itervalues():
8532 portdb = root_config.trees["porttree"].dbapi
8533 portdir = portdb.porttree_root
8535 self.repoStr(portdir)
8537 def repoStr(self, repo_path_real):
8540 real_index = self._repo_paths_real.index(repo_path_real)
8541 if real_index == -1:
8543 self._unknown_repo = True
8545 shown_repos = self._shown_repos
8546 repo_paths = self._repo_paths
8547 repo_path = repo_paths[real_index]
8548 index = shown_repos.get(repo_path)
8550 index = len(shown_repos)
8551 shown_repos[repo_path] = index
8557 shown_repos = self._shown_repos
8558 unknown_repo = self._unknown_repo
8559 if shown_repos or self._unknown_repo:
8560 output.append("Portage tree and overlays:\n")
8561 show_repo_paths = list(shown_repos)
8562 for repo_path, repo_index in shown_repos.iteritems():
8563 show_repo_paths[repo_index] = repo_path
8565 for index, repo_path in enumerate(show_repo_paths):
8566 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8568 output.append(" "+teal("[?]") + \
8569 " indicates that the source repository could not be determined\n")
8570 return "".join(output)
8572 class PackageCounters(object):
8582 self.blocks_satisfied = 0
8584 self.restrict_fetch = 0
8585 self.restrict_fetch_satisfied = 0
8586 self.interactive = 0
8589 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8592 myoutput.append("Total: %s package" % total_installs)
8593 if total_installs != 1:
8594 myoutput.append("s")
8595 if total_installs != 0:
8596 myoutput.append(" (")
8597 if self.upgrades > 0:
8598 details.append("%s upgrade" % self.upgrades)
8599 if self.upgrades > 1:
8601 if self.downgrades > 0:
8602 details.append("%s downgrade" % self.downgrades)
8603 if self.downgrades > 1:
8606 details.append("%s new" % self.new)
8607 if self.newslot > 0:
8608 details.append("%s in new slot" % self.newslot)
8609 if self.newslot > 1:
8612 details.append("%s reinstall" % self.reinst)
8616 details.append("%s uninstall" % self.uninst)
8619 if self.interactive > 0:
8620 details.append("%s %s" % (self.interactive,
8621 colorize("WARN", "interactive")))
8622 myoutput.append(", ".join(details))
8623 if total_installs != 0:
8624 myoutput.append(")")
8625 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8626 if self.restrict_fetch:
8627 myoutput.append("\nFetch Restriction: %s package" % \
8628 self.restrict_fetch)
8629 if self.restrict_fetch > 1:
8630 myoutput.append("s")
8631 if self.restrict_fetch_satisfied < self.restrict_fetch:
8632 myoutput.append(bad(" (%s unsatisfied)") % \
8633 (self.restrict_fetch - self.restrict_fetch_satisfied))
8635 myoutput.append("\nConflict: %s block" % \
8638 myoutput.append("s")
8639 if self.blocks_satisfied < self.blocks:
8640 myoutput.append(bad(" (%s unsatisfied)") % \
8641 (self.blocks - self.blocks_satisfied))
8642 return "".join(myoutput)
8644 class PollSelectAdapter(PollConstants):
8647 Use select to emulate a poll object, for
8648 systems that don't support poll().
8652 self._registered = {}
8653 self._select_args = [[], [], []]
8655 def register(self, fd, *args):
8657 Only POLLIN is currently supported!
8661 "register expected at most 2 arguments, got " + \
8662 repr(1 + len(args)))
8664 eventmask = PollConstants.POLLIN | \
8665 PollConstants.POLLPRI | PollConstants.POLLOUT
8669 self._registered[fd] = eventmask
8670 self._select_args = None
8672 def unregister(self, fd):
8673 self._select_args = None
8674 del self._registered[fd]
8676 def poll(self, *args):
8679 "poll expected at most 2 arguments, got " + \
8680 repr(1 + len(args)))
8686 select_args = self._select_args
8687 if select_args is None:
8688 select_args = [self._registered.keys(), [], []]
8690 if timeout is not None:
8691 select_args = select_args[:]
8692 # Translate poll() timeout args to select() timeout args:
8694 # | units | value(s) for indefinite block
8695 # ---------|--------------|------------------------------
8696 # poll | milliseconds | omitted, negative, or None
8697 # ---------|--------------|------------------------------
8698 # select | seconds | omitted
8699 # ---------|--------------|------------------------------
8701 if timeout is not None and timeout < 0:
8703 if timeout is not None:
8704 select_args.append(timeout / 1000)
8706 select_events = select.select(*select_args)
8708 for fd in select_events[0]:
8709 poll_events.append((fd, PollConstants.POLLIN))
8712 class SequentialTaskQueue(SlotObject):
8714 __slots__ = ("max_jobs", "running_tasks") + \
8715 ("_dirty", "_scheduling", "_task_queue")
8717 def __init__(self, **kwargs):
8718 SlotObject.__init__(self, **kwargs)
8719 self._task_queue = deque()
8720 self.running_tasks = set()
8721 if self.max_jobs is None:
8725 def add(self, task):
8726 self._task_queue.append(task)
8729 def addFront(self, task):
8730 self._task_queue.appendleft(task)
8741 if self._scheduling:
8742 # Ignore any recursive schedule() calls triggered via
8743 # self._task_exit().
8746 self._scheduling = True
8748 task_queue = self._task_queue
8749 running_tasks = self.running_tasks
8750 max_jobs = self.max_jobs
8751 state_changed = False
8753 while task_queue and \
8754 (max_jobs is True or len(running_tasks) < max_jobs):
8755 task = task_queue.popleft()
8756 cancelled = getattr(task, "cancelled", None)
8758 running_tasks.add(task)
8759 task.addExitListener(self._task_exit)
8761 state_changed = True
8764 self._scheduling = False
8766 return state_changed
8768 def _task_exit(self, task):
8770 Since we can always rely on exit listeners being called, the set of
8771 running tasks is always pruned automatically and there is never any need
8772 to actively prune it.
8774 self.running_tasks.remove(task)
8775 if self._task_queue:
8779 self._task_queue.clear()
8780 running_tasks = self.running_tasks
8781 while running_tasks:
8782 task = running_tasks.pop()
8783 task.removeExitListener(self._task_exit)
8787 def __nonzero__(self):
8788 return bool(self._task_queue or self.running_tasks)
8791 return len(self._task_queue) + len(self.running_tasks)
8793 _can_poll_device = None
8795 def can_poll_device():
8797 Test if it's possible to use poll() on a device such as a pty. This
8798 is known to fail on Darwin.
8800 @returns: True if poll() on a device succeeds, False otherwise.
8803 global _can_poll_device
8804 if _can_poll_device is not None:
8805 return _can_poll_device
8807 if not hasattr(select, "poll"):
8808 _can_poll_device = False
8809 return _can_poll_device
8812 dev_null = open('/dev/null', 'rb')
8814 _can_poll_device = False
8815 return _can_poll_device
8818 p.register(dev_null.fileno(), PollConstants.POLLIN)
8820 invalid_request = False
8821 for f, event in p.poll():
8822 if event & PollConstants.POLLNVAL:
8823 invalid_request = True
8827 _can_poll_device = not invalid_request
8828 return _can_poll_device
8830 def create_poll_instance():
8832 Create an instance of select.poll, or an instance of
8833 PollSelectAdapter there is no poll() implementation or
8834 it is broken somehow.
8836 if can_poll_device():
8837 return select.poll()
8838 return PollSelectAdapter()
8840 class PollScheduler(object):
8842 class _sched_iface_class(SlotObject):
8843 __slots__ = ("register", "schedule", "unregister")
8847 self._max_load = None
8849 self._poll_event_queue = []
8850 self._poll_event_handlers = {}
8851 self._poll_event_handler_ids = {}
8852 # Increment id for each new handler.
8853 self._event_handler_id = 0
8854 self._poll_obj = create_poll_instance()
8855 self._scheduling = False
8857 def _schedule(self):
8859 Calls _schedule_tasks() and automatically returns early from
8860 any recursive calls to this method that the _schedule_tasks()
8861 call might trigger. This makes _schedule() safe to call from
8862 inside exit listeners.
8864 if self._scheduling:
8866 self._scheduling = True
8868 return self._schedule_tasks()
8870 self._scheduling = False
8872 def _running_job_count(self):
8875 def _can_add_job(self):
8876 max_jobs = self._max_jobs
8877 max_load = self._max_load
8879 if self._max_jobs is not True and \
8880 self._running_job_count() >= self._max_jobs:
8883 if max_load is not None and \
8884 (max_jobs is True or max_jobs > 1) and \
8885 self._running_job_count() >= 1:
8887 avg1, avg5, avg15 = os.getloadavg()
8888 except (AttributeError, OSError), e:
8889 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8894 if avg1 >= max_load:
8899 def _poll(self, timeout=None):
8901 All poll() calls pass through here. The poll events
8902 are added directly to self._poll_event_queue.
8903 In order to avoid endless blocking, this raises
8904 StopIteration if timeout is None and there are
8905 no file descriptors to poll.
8907 if not self._poll_event_handlers:
8909 if timeout is None and \
8910 not self._poll_event_handlers:
8911 raise StopIteration(
8912 "timeout is None and there are no poll() event handlers")
8914 # The following error is known to occur with Linux kernel versions
8917 # select.error: (4, 'Interrupted system call')
8919 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8920 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8921 # without any events.
8924 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8926 except select.error, e:
8927 writemsg_level("\n!!! select error: %s\n" % (e,),
8928 level=logging.ERROR, noiselevel=-1)
8930 if timeout is not None:
8933 def _next_poll_event(self, timeout=None):
8935 Since the _schedule_wait() loop is called by event
8936 handlers from _poll_loop(), maintain a central event
8937 queue for both of them to share events from a single
8938 poll() call. In order to avoid endless blocking, this
8939 raises StopIteration if timeout is None and there are
8940 no file descriptors to poll.
8942 if not self._poll_event_queue:
8944 return self._poll_event_queue.pop()
8946 def _poll_loop(self):
8948 event_handlers = self._poll_event_handlers
8949 event_handled = False
8952 while event_handlers:
8953 f, event = self._next_poll_event()
8954 handler, reg_id = event_handlers[f]
8956 event_handled = True
8957 except StopIteration:
8958 event_handled = True
8960 if not event_handled:
8961 raise AssertionError("tight loop")
8963 def _schedule_yield(self):
8965 Schedule for a short period of time chosen by the scheduler based
8966 on internal state. Synchronous tasks should call this periodically
8967 in order to allow the scheduler to service pending poll events. The
8968 scheduler will call poll() exactly once, without blocking, and any
8969 resulting poll events will be serviced.
8971 event_handlers = self._poll_event_handlers
8974 if not event_handlers:
8975 return bool(events_handled)
8977 if not self._poll_event_queue:
8981 while event_handlers and self._poll_event_queue:
8982 f, event = self._next_poll_event()
8983 handler, reg_id = event_handlers[f]
8986 except StopIteration:
8989 return bool(events_handled)
8991 def _register(self, f, eventmask, handler):
8994 @return: A unique registration id, for use in schedule() or
8997 if f in self._poll_event_handlers:
8998 raise AssertionError("fd %d is already registered" % f)
8999 self._event_handler_id += 1
9000 reg_id = self._event_handler_id
9001 self._poll_event_handler_ids[reg_id] = f
9002 self._poll_event_handlers[f] = (handler, reg_id)
9003 self._poll_obj.register(f, eventmask)
9006 def _unregister(self, reg_id):
9007 f = self._poll_event_handler_ids[reg_id]
9008 self._poll_obj.unregister(f)
9009 del self._poll_event_handlers[f]
9010 del self._poll_event_handler_ids[reg_id]
9012 def _schedule_wait(self, wait_ids):
9014 Schedule until wait_id is not longer registered
9017 @param wait_id: a task id to wait for
9019 event_handlers = self._poll_event_handlers
9020 handler_ids = self._poll_event_handler_ids
9021 event_handled = False
9023 if isinstance(wait_ids, int):
9024 wait_ids = frozenset([wait_ids])
9027 while wait_ids.intersection(handler_ids):
9028 f, event = self._next_poll_event()
9029 handler, reg_id = event_handlers[f]
9031 event_handled = True
9032 except StopIteration:
9033 event_handled = True
9035 return event_handled
9037 class QueueScheduler(PollScheduler):
9040 Add instances of SequentialTaskQueue and then call run(). The
9041 run() method returns when no tasks remain.
9044 def __init__(self, max_jobs=None, max_load=None):
9045 PollScheduler.__init__(self)
9047 if max_jobs is None:
9050 self._max_jobs = max_jobs
9051 self._max_load = max_load
9052 self.sched_iface = self._sched_iface_class(
9053 register=self._register,
9054 schedule=self._schedule_wait,
9055 unregister=self._unregister)
9058 self._schedule_listeners = []
9061 self._queues.append(q)
9063 def remove(self, q):
9064 self._queues.remove(q)
9068 while self._schedule():
9071 while self._running_job_count():
9074 def _schedule_tasks(self):
9077 @returns: True if there may be remaining tasks to schedule,
9080 while self._can_add_job():
9081 n = self._max_jobs - self._running_job_count()
9085 if not self._start_next_job(n):
9088 for q in self._queues:
9093 def _running_job_count(self):
9095 for q in self._queues:
9096 job_count += len(q.running_tasks)
9097 self._jobs = job_count
9100 def _start_next_job(self, n=1):
9102 for q in self._queues:
9103 initial_job_count = len(q.running_tasks)
9105 final_job_count = len(q.running_tasks)
9106 if final_job_count > initial_job_count:
9107 started_count += (final_job_count - initial_job_count)
9108 if started_count >= n:
9110 return started_count
9112 class TaskScheduler(object):
9115 A simple way to handle scheduling of AsynchrousTask instances. Simply
9116 add tasks and call run(). The run() method returns when no tasks remain.
9119 def __init__(self, max_jobs=None, max_load=None):
9120 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9121 self._scheduler = QueueScheduler(
9122 max_jobs=max_jobs, max_load=max_load)
9123 self.sched_iface = self._scheduler.sched_iface
9124 self.run = self._scheduler.run
9125 self._scheduler.add(self._queue)
9127 def add(self, task):
9128 self._queue.add(task)
9130 class JobStatusDisplay(object):
9132 _bound_properties = ("curval", "failed", "running")
9133 _jobs_column_width = 48
9135 # Don't update the display unless at least this much
9136 # time has passed, in units of seconds.
9137 _min_display_latency = 2
9139 _default_term_codes = {
9145 _termcap_name_map = {
9146 'carriage_return' : 'cr',
9151 def __init__(self, out=sys.stdout, quiet=False):
9152 object.__setattr__(self, "out", out)
9153 object.__setattr__(self, "quiet", quiet)
9154 object.__setattr__(self, "maxval", 0)
9155 object.__setattr__(self, "merges", 0)
9156 object.__setattr__(self, "_changed", False)
9157 object.__setattr__(self, "_displayed", False)
9158 object.__setattr__(self, "_last_display_time", 0)
9159 object.__setattr__(self, "width", 80)
9162 isatty = hasattr(out, "isatty") and out.isatty()
9163 object.__setattr__(self, "_isatty", isatty)
9164 if not isatty or not self._init_term():
9166 for k, capname in self._termcap_name_map.iteritems():
9167 term_codes[k] = self._default_term_codes[capname]
9168 object.__setattr__(self, "_term_codes", term_codes)
9170 def _init_term(self):
9172 Initialize term control codes.
9174 @returns: True if term codes were successfully initialized,
9178 term_type = os.environ.get("TERM", "vt100")
9184 curses.setupterm(term_type, self.out.fileno())
9185 tigetstr = curses.tigetstr
9186 except curses.error:
9191 if tigetstr is None:
9195 for k, capname in self._termcap_name_map.iteritems():
9196 code = tigetstr(capname)
9198 code = self._default_term_codes[capname]
9199 term_codes[k] = code
9200 object.__setattr__(self, "_term_codes", term_codes)
9203 def _format_msg(self, msg):
9204 return ">>> %s" % msg
9208 self._term_codes['carriage_return'] + \
9209 self._term_codes['clr_eol'])
9211 self._displayed = False
9213 def _display(self, line):
9214 self.out.write(line)
9216 self._displayed = True
9218 def _update(self, msg):
9221 if not self._isatty:
9222 out.write(self._format_msg(msg) + self._term_codes['newline'])
9224 self._displayed = True
9230 self._display(self._format_msg(msg))
9232 def displayMessage(self, msg):
9234 was_displayed = self._displayed
9236 if self._isatty and self._displayed:
9239 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9241 self._displayed = False
9244 self._changed = True
9250 for name in self._bound_properties:
9251 object.__setattr__(self, name, 0)
9254 self.out.write(self._term_codes['newline'])
9256 self._displayed = False
9258 def __setattr__(self, name, value):
9259 old_value = getattr(self, name)
9260 if value == old_value:
9262 object.__setattr__(self, name, value)
9263 if name in self._bound_properties:
9264 self._property_change(name, old_value, value)
9266 def _property_change(self, name, old_value, new_value):
9267 self._changed = True
9270 def _load_avg_str(self):
9272 avg = os.getloadavg()
9273 except (AttributeError, OSError), e:
9285 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9289 Display status on stdout, but only if something has
9290 changed since the last call.
9296 current_time = time.time()
9297 time_delta = current_time - self._last_display_time
9298 if self._displayed and \
9300 if not self._isatty:
9302 if time_delta < self._min_display_latency:
9305 self._last_display_time = current_time
9306 self._changed = False
9307 self._display_status()
9309 def _display_status(self):
9310 # Don't use len(self._completed_tasks) here since that also
9311 # can include uninstall tasks.
9312 curval_str = str(self.curval)
9313 maxval_str = str(self.maxval)
9314 running_str = str(self.running)
9315 failed_str = str(self.failed)
9316 load_avg_str = self._load_avg_str()
9318 color_output = StringIO.StringIO()
9319 plain_output = StringIO.StringIO()
9320 style_file = portage.output.ConsoleStyleFile(color_output)
9321 style_file.write_listener = plain_output
9322 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9323 style_writer.style_listener = style_file.new_styles
9324 f = formatter.AbstractFormatter(style_writer)
9326 number_style = "INFORM"
9327 f.add_literal_data("Jobs: ")
9328 f.push_style(number_style)
9329 f.add_literal_data(curval_str)
9331 f.add_literal_data(" of ")
9332 f.push_style(number_style)
9333 f.add_literal_data(maxval_str)
9335 f.add_literal_data(" complete")
9338 f.add_literal_data(", ")
9339 f.push_style(number_style)
9340 f.add_literal_data(running_str)
9342 f.add_literal_data(" running")
9345 f.add_literal_data(", ")
9346 f.push_style(number_style)
9347 f.add_literal_data(failed_str)
9349 f.add_literal_data(" failed")
9351 padding = self._jobs_column_width - len(plain_output.getvalue())
9353 f.add_literal_data(padding * " ")
9355 f.add_literal_data("Load avg: ")
9356 f.add_literal_data(load_avg_str)
9358 # Truncate to fit width, to avoid making the terminal scroll if the
9359 # line overflows (happens when the load average is large).
9360 plain_output = plain_output.getvalue()
9361 if self._isatty and len(plain_output) > self.width:
9362 # Use plain_output here since it's easier to truncate
9363 # properly than the color output which contains console
9365 self._update(plain_output[:self.width])
9367 self._update(color_output.getvalue())
9369 xtermTitle(" ".join(plain_output.split()))
9371 class Scheduler(PollScheduler):
9373 _opts_ignore_blockers = \
9374 frozenset(["--buildpkgonly",
9375 "--fetchonly", "--fetch-all-uri",
9376 "--nodeps", "--pretend"])
9378 _opts_no_background = \
9379 frozenset(["--pretend",
9380 "--fetchonly", "--fetch-all-uri"])
9382 _opts_no_restart = frozenset(["--buildpkgonly",
9383 "--fetchonly", "--fetch-all-uri", "--pretend"])
9385 _bad_resume_opts = set(["--ask", "--changelog",
9386 "--resume", "--skipfirst"])
9388 _fetch_log = "/var/log/emerge-fetch.log"
9390 class _iface_class(SlotObject):
9391 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9392 "dblinkElog", "fetch", "register", "schedule",
9393 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9396 class _fetch_iface_class(SlotObject):
9397 __slots__ = ("log_file", "schedule")
9399 _task_queues_class = slot_dict_class(
9400 ("merge", "jobs", "fetch", "unpack"), prefix="")
9402 class _build_opts_class(SlotObject):
9403 __slots__ = ("buildpkg", "buildpkgonly",
9404 "fetch_all_uri", "fetchonly", "pretend")
9406 class _binpkg_opts_class(SlotObject):
9407 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9409 class _pkg_count_class(SlotObject):
9410 __slots__ = ("curval", "maxval")
9412 class _emerge_log_class(SlotObject):
9413 __slots__ = ("xterm_titles",)
9415 def log(self, *pargs, **kwargs):
9416 if not self.xterm_titles:
9417 # Avoid interference with the scheduler's status display.
9418 kwargs.pop("short_msg", None)
9419 emergelog(self.xterm_titles, *pargs, **kwargs)
9421 class _failed_pkg(SlotObject):
9422 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9424 class _ConfigPool(object):
9425 """Interface for a task to temporarily allocate a config
9426 instance from a pool. This allows a task to be constructed
9427 long before the config instance actually becomes needed, like
9428 when prefetchers are constructed for the whole merge list."""
9429 __slots__ = ("_root", "_allocate", "_deallocate")
9430 def __init__(self, root, allocate, deallocate):
9432 self._allocate = allocate
9433 self._deallocate = deallocate
9435 return self._allocate(self._root)
9436 def deallocate(self, settings):
9437 self._deallocate(settings)
9439 class _unknown_internal_error(portage.exception.PortageException):
9441 Used internally to terminate scheduling. The specific reason for
9442 the failure should have been dumped to stderr.
9444 def __init__(self, value=""):
9445 portage.exception.PortageException.__init__(self, value)
9447 def __init__(self, settings, trees, mtimedb, myopts,
9448 spinner, mergelist, favorites, digraph):
9449 PollScheduler.__init__(self)
9450 self.settings = settings
9451 self.target_root = settings["ROOT"]
9453 self.myopts = myopts
9454 self._spinner = spinner
9455 self._mtimedb = mtimedb
9456 self._mergelist = mergelist
9457 self._favorites = favorites
9458 self._args_set = InternalPackageSet(favorites)
9459 self._build_opts = self._build_opts_class()
9460 for k in self._build_opts.__slots__:
9461 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9462 self._binpkg_opts = self._binpkg_opts_class()
9463 for k in self._binpkg_opts.__slots__:
9464 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9467 self._logger = self._emerge_log_class()
9468 self._task_queues = self._task_queues_class()
9469 for k in self._task_queues.allowed_keys:
9470 setattr(self._task_queues, k,
9471 SequentialTaskQueue())
9472 self._status_display = JobStatusDisplay()
9473 self._max_load = myopts.get("--load-average")
9474 max_jobs = myopts.get("--jobs")
9475 if max_jobs is None:
9477 self._set_max_jobs(max_jobs)
9479 # The root where the currently running
9480 # portage instance is installed.
9481 self._running_root = trees["/"]["root_config"]
9483 if settings.get("PORTAGE_DEBUG", "") == "1":
9485 self.pkgsettings = {}
9486 self._config_pool = {}
9487 self._blocker_db = {}
9489 self._config_pool[root] = []
9490 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9492 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9493 schedule=self._schedule_fetch)
9494 self._sched_iface = self._iface_class(
9495 dblinkEbuildPhase=self._dblink_ebuild_phase,
9496 dblinkDisplayMerge=self._dblink_display_merge,
9497 dblinkElog=self._dblink_elog,
9498 fetch=fetch_iface, register=self._register,
9499 schedule=self._schedule_wait,
9500 scheduleSetup=self._schedule_setup,
9501 scheduleUnpack=self._schedule_unpack,
9502 scheduleYield=self._schedule_yield,
9503 unregister=self._unregister)
9505 self._prefetchers = weakref.WeakValueDictionary()
9506 self._pkg_queue = []
9507 self._completed_tasks = set()
9509 self._failed_pkgs = []
9510 self._failed_pkgs_all = []
9511 self._failed_pkgs_die_msgs = []
9512 self._post_mod_echo_msgs = []
9513 self._parallel_fetch = False
9514 merge_count = len([x for x in mergelist \
9515 if isinstance(x, Package) and x.operation == "merge"])
9516 self._pkg_count = self._pkg_count_class(
9517 curval=0, maxval=merge_count)
9518 self._status_display.maxval = self._pkg_count.maxval
9520 # The load average takes some time to respond when new
9521 # jobs are added, so we need to limit the rate of adding
9523 self._job_delay_max = 10
9524 self._job_delay_factor = 1.0
9525 self._job_delay_exp = 1.5
9526 self._previous_job_start_time = None
9528 self._set_digraph(digraph)
9530 # This is used to memoize the _choose_pkg() result when
9531 # no packages can be chosen until one of the existing
9533 self._choose_pkg_return_early = False
9535 features = self.settings.features
9536 if "parallel-fetch" in features and \
9537 not ("--pretend" in self.myopts or \
9538 "--fetch-all-uri" in self.myopts or \
9539 "--fetchonly" in self.myopts):
9540 if "distlocks" not in features:
9541 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9542 portage.writemsg(red("!!!")+" parallel-fetching " + \
9543 "requires the distlocks feature enabled"+"\n",
9545 portage.writemsg(red("!!!")+" you have it disabled, " + \
9546 "thus parallel-fetching is being disabled"+"\n",
9548 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9549 elif len(mergelist) > 1:
9550 self._parallel_fetch = True
9552 if self._parallel_fetch:
9553 # clear out existing fetch log if it exists
9555 open(self._fetch_log, 'w')
9556 except EnvironmentError:
9559 self._running_portage = None
9560 portage_match = self._running_root.trees["vartree"].dbapi.match(
9561 portage.const.PORTAGE_PACKAGE_ATOM)
9563 cpv = portage_match.pop()
9564 self._running_portage = self._pkg(cpv, "installed",
9565 self._running_root, installed=True)
9567 def _poll(self, timeout=None):
9569 PollScheduler._poll(self, timeout=timeout)
9571 def _set_max_jobs(self, max_jobs):
9572 self._max_jobs = max_jobs
9573 self._task_queues.jobs.max_jobs = max_jobs
9575 def _background_mode(self):
9577 Check if background mode is enabled and adjust states as necessary.
9580 @returns: True if background mode is enabled, False otherwise.
9582 background = (self._max_jobs is True or \
9583 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9584 not bool(self._opts_no_background.intersection(self.myopts))
9587 interactive_tasks = self._get_interactive_tasks()
9588 if interactive_tasks:
9590 writemsg_level(">>> Sending package output to stdio due " + \
9591 "to interactive package(s):\n",
9592 level=logging.INFO, noiselevel=-1)
9594 for pkg in interactive_tasks:
9595 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9597 pkg_str += " for " + pkg.root
9600 writemsg_level("".join("%s\n" % (l,) for l in msg),
9601 level=logging.INFO, noiselevel=-1)
9602 if self._max_jobs is True or self._max_jobs > 1:
9603 self._set_max_jobs(1)
9604 writemsg_level(">>> Setting --jobs=1 due " + \
9605 "to the above interactive package(s)\n",
9606 level=logging.INFO, noiselevel=-1)
9608 self._status_display.quiet = \
9610 ("--quiet" in self.myopts and \
9611 "--verbose" not in self.myopts)
9613 self._logger.xterm_titles = \
9614 "notitles" not in self.settings.features and \
9615 self._status_display.quiet
9619 def _get_interactive_tasks(self):
9620 from portage import flatten
9621 from portage.dep import use_reduce, paren_reduce
9622 interactive_tasks = []
9623 for task in self._mergelist:
9624 if not (isinstance(task, Package) and \
9625 task.operation == "merge"):
9628 properties = flatten(use_reduce(paren_reduce(
9629 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9630 except portage.exception.InvalidDependString, e:
9631 show_invalid_depstring_notice(task,
9632 task.metadata["PROPERTIES"], str(e))
9633 raise self._unknown_internal_error()
9634 if "interactive" in properties:
9635 interactive_tasks.append(task)
9636 return interactive_tasks
9638 def _set_digraph(self, digraph):
9639 if "--nodeps" in self.myopts or \
9640 (self._max_jobs is not True and self._max_jobs < 2):
9642 self._digraph = None
9645 self._digraph = digraph
9646 self._prune_digraph()
9648 def _prune_digraph(self):
9650 Prune any root nodes that are irrelevant.
9653 graph = self._digraph
9654 completed_tasks = self._completed_tasks
9655 removed_nodes = set()
9657 for node in graph.root_nodes():
9658 if not isinstance(node, Package) or \
9659 (node.installed and node.operation == "nomerge") or \
9661 node in completed_tasks:
9662 removed_nodes.add(node)
9664 graph.difference_update(removed_nodes)
9665 if not removed_nodes:
9667 removed_nodes.clear()
9669 class _pkg_failure(portage.exception.PortageException):
9671 An instance of this class is raised by unmerge() when
9672 an uninstallation fails.
9675 def __init__(self, *pargs):
9676 portage.exception.PortageException.__init__(self, pargs)
9678 self.status = pargs[0]
9680 def _schedule_fetch(self, fetcher):
9682 Schedule a fetcher on the fetch queue, in order to
9683 serialize access to the fetch log.
9685 self._task_queues.fetch.addFront(fetcher)
9687 def _schedule_setup(self, setup_phase):
9689 Schedule a setup phase on the merge queue, in order to
9690 serialize unsandboxed access to the live filesystem.
9692 self._task_queues.merge.addFront(setup_phase)
9695 def _schedule_unpack(self, unpack_phase):
9697 Schedule an unpack phase on the unpack queue, in order
9698 to serialize $DISTDIR access for live ebuilds.
9700 self._task_queues.unpack.add(unpack_phase)
9702 def _find_blockers(self, new_pkg):
9704 Returns a callable which should be called only when
9705 the vdb lock has been acquired.
9708 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9711 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9712 if self._opts_ignore_blockers.intersection(self.myopts):
9715 # Call gc.collect() here to avoid heap overflow that
9716 # triggers 'Cannot allocate memory' errors (reported
9721 blocker_db = self._blocker_db[new_pkg.root]
9723 blocker_dblinks = []
9724 for blocking_pkg in blocker_db.findInstalledBlockers(
9725 new_pkg, acquire_lock=acquire_lock):
9726 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9728 if new_pkg.cpv == blocking_pkg.cpv:
9730 blocker_dblinks.append(portage.dblink(
9731 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9732 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9733 vartree=self.trees[blocking_pkg.root]["vartree"]))
9737 return blocker_dblinks
9739 def _dblink_pkg(self, pkg_dblink):
9740 cpv = pkg_dblink.mycpv
9741 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9742 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9743 installed = type_name == "installed"
9744 return self._pkg(cpv, type_name, root_config, installed=installed)
9746 def _append_to_log_path(self, log_path, msg):
9747 f = open(log_path, 'a')
9753 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9755 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9758 background = self._background
9760 if background and log_path is not None:
9761 log_file = open(log_path, 'a')
9766 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9768 if log_file is not None:
9771 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9772 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9773 background = self._background
9775 if log_path is None:
9776 if not (background and level < logging.WARN):
9777 portage.util.writemsg_level(msg,
9778 level=level, noiselevel=noiselevel)
9781 portage.util.writemsg_level(msg,
9782 level=level, noiselevel=noiselevel)
9783 self._append_to_log_path(log_path, msg)
9785 def _dblink_ebuild_phase(self,
9786 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9788 Using this callback for merge phases allows the scheduler
9789 to run while these phases execute asynchronously, and allows
9790 the scheduler control output handling.
9793 scheduler = self._sched_iface
9794 settings = pkg_dblink.settings
9795 pkg = self._dblink_pkg(pkg_dblink)
9796 background = self._background
9797 log_path = settings.get("PORTAGE_LOG_FILE")
9799 ebuild_phase = EbuildPhase(background=background,
9800 pkg=pkg, phase=phase, scheduler=scheduler,
9801 settings=settings, tree=pkg_dblink.treetype)
9802 ebuild_phase.start()
9805 return ebuild_phase.returncode
9807 def _check_manifests(self):
9808 # Verify all the manifests now so that the user is notified of failure
9809 # as soon as possible.
9810 if "strict" not in self.settings.features or \
9811 "--fetchonly" in self.myopts or \
9812 "--fetch-all-uri" in self.myopts:
9815 shown_verifying_msg = False
9817 for myroot, pkgsettings in self.pkgsettings.iteritems():
9818 quiet_config = portage.config(clone=pkgsettings)
9819 quiet_config["PORTAGE_QUIET"] = "1"
9820 quiet_config.backup_changes("PORTAGE_QUIET")
9821 quiet_settings[myroot] = quiet_config
9824 for x in self._mergelist:
9825 if not isinstance(x, Package) or \
9826 x.type_name != "ebuild":
9829 if not shown_verifying_msg:
9830 shown_verifying_msg = True
9831 self._status_msg("Verifying ebuild manifests")
9833 root_config = x.root_config
9834 portdb = root_config.trees["porttree"].dbapi
9835 quiet_config = quiet_settings[root_config.root]
9836 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9837 if not portage.digestcheck([], quiet_config, strict=True):
9842 def _add_prefetchers(self):
9844 if not self._parallel_fetch:
9847 if self._parallel_fetch:
9848 self._status_msg("Starting parallel fetch")
9850 prefetchers = self._prefetchers
9851 getbinpkg = "--getbinpkg" in self.myopts
9853 # In order to avoid "waiting for lock" messages
9854 # at the beginning, which annoy users, never
9855 # spawn a prefetcher for the first package.
9856 for pkg in self._mergelist[1:]:
9857 prefetcher = self._create_prefetcher(pkg)
9858 if prefetcher is not None:
9859 self._task_queues.fetch.add(prefetcher)
9860 prefetchers[pkg] = prefetcher
9862 def _create_prefetcher(self, pkg):
9864 @return: a prefetcher, or None if not applicable
9868 if not isinstance(pkg, Package):
9871 elif pkg.type_name == "ebuild":
9873 prefetcher = EbuildFetcher(background=True,
9874 config_pool=self._ConfigPool(pkg.root,
9875 self._allocate_config, self._deallocate_config),
9876 fetchonly=1, logfile=self._fetch_log,
9877 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9879 elif pkg.type_name == "binary" and \
9880 "--getbinpkg" in self.myopts and \
9881 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9883 prefetcher = BinpkgPrefetcher(background=True,
9884 pkg=pkg, scheduler=self._sched_iface)
9888 def _is_restart_scheduled(self):
9890 Check if the merge list contains a replacement
9891 for the current running instance, that will result
9892 in restart after merge.
9894 @returns: True if a restart is scheduled, False otherwise.
9896 if self._opts_no_restart.intersection(self.myopts):
9899 mergelist = self._mergelist
9901 for i, pkg in enumerate(mergelist):
9902 if self._is_restart_necessary(pkg) and \
9903 i != len(mergelist) - 1:
9908 def _is_restart_necessary(self, pkg):
9910 @return: True if merging the given package
9911 requires restart, False otherwise.
9914 # Figure out if we need a restart.
9915 if pkg.root == self._running_root.root and \
9916 portage.match_from_list(
9917 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9918 if self._running_portage:
9919 return pkg.cpv != self._running_portage.cpv
9923 def _restart_if_necessary(self, pkg):
9925 Use execv() to restart emerge. This happens
9926 if portage upgrades itself and there are
9927 remaining packages in the list.
9930 if self._opts_no_restart.intersection(self.myopts):
9933 if not self._is_restart_necessary(pkg):
9936 if pkg == self._mergelist[-1]:
9939 self._main_loop_cleanup()
9941 logger = self._logger
9942 pkg_count = self._pkg_count
9943 mtimedb = self._mtimedb
9944 bad_resume_opts = self._bad_resume_opts
9946 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9947 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9949 logger.log(" *** RESTARTING " + \
9950 "emerge via exec() after change of " + \
9953 mtimedb["resume"]["mergelist"].remove(list(pkg))
9955 portage.run_exitfuncs()
9956 mynewargv = [sys.argv[0], "--resume"]
9957 resume_opts = self.myopts.copy()
9958 # For automatic resume, we need to prevent
9959 # any of bad_resume_opts from leaking in
9960 # via EMERGE_DEFAULT_OPTS.
9961 resume_opts["--ignore-default-opts"] = True
9962 for myopt, myarg in resume_opts.iteritems():
9963 if myopt not in bad_resume_opts:
9965 mynewargv.append(myopt)
9967 mynewargv.append(myopt +"="+ str(myarg))
9968 # priority only needs to be adjusted on the first run
9969 os.environ["PORTAGE_NICENESS"] = "0"
9970 os.execv(mynewargv[0], mynewargv)
9974 if "--resume" in self.myopts:
9976 portage.writemsg_stdout(
9977 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9978 self._logger.log(" *** Resuming merge...")
9980 self._save_resume_list()
9983 self._background = self._background_mode()
9984 except self._unknown_internal_error:
9987 for root in self.trees:
9988 root_config = self.trees[root]["root_config"]
9990 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
9991 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
9992 # for ensuring sane $PWD (bug #239560) and storing elog messages.
9993 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
9994 if not tmpdir or not os.path.isdir(tmpdir):
9995 msg = "The directory specified in your " + \
9996 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
9997 "does not exist. Please create this " + \
9998 "directory or correct your PORTAGE_TMPDIR setting."
9999 msg = textwrap.wrap(msg, 70)
10000 out = portage.output.EOutput()
10005 if self._background:
10006 root_config.settings.unlock()
10007 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10008 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10009 root_config.settings.lock()
10011 self.pkgsettings[root] = portage.config(
10012 clone=root_config.settings)
10014 rval = self._check_manifests()
10015 if rval != os.EX_OK:
10018 keep_going = "--keep-going" in self.myopts
10019 fetchonly = self._build_opts.fetchonly
10020 mtimedb = self._mtimedb
10021 failed_pkgs = self._failed_pkgs
10024 rval = self._merge()
10025 if rval == os.EX_OK or fetchonly or not keep_going:
10027 if "resume" not in mtimedb:
10029 mergelist = self._mtimedb["resume"].get("mergelist")
10033 if not failed_pkgs:
10036 for failed_pkg in failed_pkgs:
10037 mergelist.remove(list(failed_pkg.pkg))
10039 self._failed_pkgs_all.extend(failed_pkgs)
10045 if not self._calc_resume_list():
10048 clear_caches(self.trees)
10049 if not self._mergelist:
10052 self._save_resume_list()
10053 self._pkg_count.curval = 0
10054 self._pkg_count.maxval = len([x for x in self._mergelist \
10055 if isinstance(x, Package) and x.operation == "merge"])
10056 self._status_display.maxval = self._pkg_count.maxval
10058 self._logger.log(" *** Finished. Cleaning up...")
10061 self._failed_pkgs_all.extend(failed_pkgs)
10064 background = self._background
10065 failure_log_shown = False
10066 if background and len(self._failed_pkgs_all) == 1:
10067 # If only one package failed then just show it's
10068 # whole log for easy viewing.
10069 failed_pkg = self._failed_pkgs_all[-1]
10070 build_dir = failed_pkg.build_dir
10073 log_paths = [failed_pkg.build_log]
10075 log_path = self._locate_failure_log(failed_pkg)
10076 if log_path is not None:
10078 log_file = open(log_path, 'rb')
10082 if log_file is not None:
10084 for line in log_file:
10085 writemsg_level(line, noiselevel=-1)
10088 failure_log_shown = True
10090 # Dump mod_echo output now since it tends to flood the terminal.
10091 # This allows us to avoid having more important output, generated
10092 # later, from being swept away by the mod_echo output.
10093 mod_echo_output = _flush_elog_mod_echo()
10095 if background and not failure_log_shown and \
10096 self._failed_pkgs_all and \
10097 self._failed_pkgs_die_msgs and \
10098 not mod_echo_output:
10100 printer = portage.output.EOutput()
10101 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10103 if mysettings["ROOT"] != "/":
10104 root_msg = " merged to %s" % mysettings["ROOT"]
10106 printer.einfo("Error messages for package %s%s:" % \
10107 (colorize("INFORM", key), root_msg))
10109 for phase in portage.const.EBUILD_PHASES:
10110 if phase not in logentries:
10112 for msgtype, msgcontent in logentries[phase]:
10113 if isinstance(msgcontent, basestring):
10114 msgcontent = [msgcontent]
10115 for line in msgcontent:
10116 printer.eerror(line.strip("\n"))
10118 if self._post_mod_echo_msgs:
10119 for msg in self._post_mod_echo_msgs:
10122 if len(self._failed_pkgs_all) > 1:
10123 msg = "The following packages have " + \
10124 "failed to build or install:"
10125 prefix = bad(" * ")
10126 writemsg(prefix + "\n", noiselevel=-1)
10127 from textwrap import wrap
10128 for line in wrap(msg, 72):
10129 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10130 writemsg(prefix + "\n", noiselevel=-1)
10131 for failed_pkg in self._failed_pkgs_all:
10132 writemsg("%s\t%s\n" % (prefix,
10133 colorize("INFORM", str(failed_pkg.pkg))),
10135 writemsg(prefix + "\n", noiselevel=-1)
10139 def _elog_listener(self, mysettings, key, logentries, fulltext):
10140 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10142 self._failed_pkgs_die_msgs.append(
10143 (mysettings, key, errors))
10145 def _locate_failure_log(self, failed_pkg):
10147 build_dir = failed_pkg.build_dir
10150 log_paths = [failed_pkg.build_log]
10152 for log_path in log_paths:
10157 log_size = os.stat(log_path).st_size
10168 def _add_packages(self):
10169 pkg_queue = self._pkg_queue
10170 for pkg in self._mergelist:
10171 if isinstance(pkg, Package):
10172 pkg_queue.append(pkg)
10173 elif isinstance(pkg, Blocker):
10176 def _merge_exit(self, merge):
10177 self._do_merge_exit(merge)
10178 self._deallocate_config(merge.merge.settings)
10179 if merge.returncode == os.EX_OK and \
10180 not merge.merge.pkg.installed:
10181 self._status_display.curval += 1
10182 self._status_display.merges = len(self._task_queues.merge)
10185 def _do_merge_exit(self, merge):
10186 pkg = merge.merge.pkg
10187 if merge.returncode != os.EX_OK:
10188 settings = merge.merge.settings
10189 build_dir = settings.get("PORTAGE_BUILDDIR")
10190 build_log = settings.get("PORTAGE_LOG_FILE")
10192 self._failed_pkgs.append(self._failed_pkg(
10193 build_dir=build_dir, build_log=build_log,
10195 returncode=merge.returncode))
10196 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10198 self._status_display.failed = len(self._failed_pkgs)
10201 self._task_complete(pkg)
10202 pkg_to_replace = merge.merge.pkg_to_replace
10203 if pkg_to_replace is not None:
10204 # When a package is replaced, mark it's uninstall
10205 # task complete (if any).
10206 uninst_hash_key = \
10207 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10208 self._task_complete(uninst_hash_key)
10213 self._restart_if_necessary(pkg)
10215 # Call mtimedb.commit() after each merge so that
10216 # --resume still works after being interrupted
10217 # by reboot, sigkill or similar.
10218 mtimedb = self._mtimedb
10219 mtimedb["resume"]["mergelist"].remove(list(pkg))
10220 if not mtimedb["resume"]["mergelist"]:
10221 del mtimedb["resume"]
10224 def _build_exit(self, build):
10225 if build.returncode == os.EX_OK:
10227 merge = PackageMerge(merge=build)
10228 merge.addExitListener(self._merge_exit)
10229 self._task_queues.merge.add(merge)
10230 self._status_display.merges = len(self._task_queues.merge)
10232 settings = build.settings
10233 build_dir = settings.get("PORTAGE_BUILDDIR")
10234 build_log = settings.get("PORTAGE_LOG_FILE")
10236 self._failed_pkgs.append(self._failed_pkg(
10237 build_dir=build_dir, build_log=build_log,
10239 returncode=build.returncode))
10240 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10242 self._status_display.failed = len(self._failed_pkgs)
10243 self._deallocate_config(build.settings)
10245 self._status_display.running = self._jobs
10248 def _extract_exit(self, build):
10249 self._build_exit(build)
10251 def _task_complete(self, pkg):
10252 self._completed_tasks.add(pkg)
10253 self._choose_pkg_return_early = False
10257 self._add_prefetchers()
10258 self._add_packages()
10259 pkg_queue = self._pkg_queue
10260 failed_pkgs = self._failed_pkgs
10261 portage.locks._quiet = self._background
10262 portage.elog._emerge_elog_listener = self._elog_listener
10268 self._main_loop_cleanup()
10269 portage.locks._quiet = False
10270 portage.elog._emerge_elog_listener = None
10272 rval = failed_pkgs[-1].returncode
10276 def _main_loop_cleanup(self):
10277 del self._pkg_queue[:]
10278 self._completed_tasks.clear()
10279 self._choose_pkg_return_early = False
10280 self._status_display.reset()
10281 self._digraph = None
10282 self._task_queues.fetch.clear()
10284 def _choose_pkg(self):
10286 Choose a task that has all it's dependencies satisfied.
10289 if self._choose_pkg_return_early:
10292 if self._digraph is None:
10293 if (self._jobs or self._task_queues.merge) and \
10294 not ("--nodeps" in self.myopts and \
10295 (self._max_jobs is True or self._max_jobs > 1)):
10296 self._choose_pkg_return_early = True
10298 return self._pkg_queue.pop(0)
10300 if not (self._jobs or self._task_queues.merge):
10301 return self._pkg_queue.pop(0)
10303 self._prune_digraph()
10306 later = set(self._pkg_queue)
10307 for pkg in self._pkg_queue:
10309 if not self._dependent_on_scheduled_merges(pkg, later):
10313 if chosen_pkg is not None:
10314 self._pkg_queue.remove(chosen_pkg)
10316 if chosen_pkg is None:
10317 # There's no point in searching for a package to
10318 # choose until at least one of the existing jobs
10320 self._choose_pkg_return_early = True
10324 def _dependent_on_scheduled_merges(self, pkg, later):
10326 Traverse the subgraph of the given packages deep dependencies
10327 to see if it contains any scheduled merges.
10328 @param pkg: a package to check dependencies for
10330 @param later: packages for which dependence should be ignored
10331 since they will be merged later than pkg anyway and therefore
10332 delaying the merge of pkg will not result in a more optimal
10336 @returns: True if the package is dependent, False otherwise.
10339 graph = self._digraph
10340 completed_tasks = self._completed_tasks
10343 traversed_nodes = set([pkg])
10344 direct_deps = graph.child_nodes(pkg)
10345 node_stack = direct_deps
10346 direct_deps = frozenset(direct_deps)
10348 node = node_stack.pop()
10349 if node in traversed_nodes:
10351 traversed_nodes.add(node)
10352 if not ((node.installed and node.operation == "nomerge") or \
10353 (node.operation == "uninstall" and \
10354 node not in direct_deps) or \
10355 node in completed_tasks or \
10359 node_stack.extend(graph.child_nodes(node))
10363 def _allocate_config(self, root):
10365 Allocate a unique config instance for a task in order
10366 to prevent interference between parallel tasks.
10368 if self._config_pool[root]:
10369 temp_settings = self._config_pool[root].pop()
10371 temp_settings = portage.config(clone=self.pkgsettings[root])
10372 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10373 # performance reasons, call it here to make sure all settings from the
10374 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10375 temp_settings.reload()
10376 temp_settings.reset()
10377 return temp_settings
10379 def _deallocate_config(self, settings):
10380 self._config_pool[settings["ROOT"]].append(settings)
10382 def _main_loop(self):
10384 # Only allow 1 job max if a restart is scheduled
10385 # due to portage update.
10386 if self._is_restart_scheduled() or \
10387 self._opts_no_background.intersection(self.myopts):
10388 self._set_max_jobs(1)
10390 merge_queue = self._task_queues.merge
10392 while self._schedule():
10393 if self._poll_event_handlers:
10398 if not (self._jobs or merge_queue):
10400 if self._poll_event_handlers:
10403 def _keep_scheduling(self):
10404 return bool(self._pkg_queue and \
10405 not (self._failed_pkgs and not self._build_opts.fetchonly))
10407 def _schedule_tasks(self):
10408 self._schedule_tasks_imp()
10409 self._status_display.display()
10412 for q in self._task_queues.values():
10416 # Cancel prefetchers if they're the only reason
10417 # the main poll loop is still running.
10418 if self._failed_pkgs and not self._build_opts.fetchonly and \
10419 not (self._jobs or self._task_queues.merge) and \
10420 self._task_queues.fetch:
10421 self._task_queues.fetch.clear()
10425 self._schedule_tasks_imp()
10426 self._status_display.display()
10428 return self._keep_scheduling()
10430 def _job_delay(self):
10433 @returns: True if job scheduling should be delayed, False otherwise.
10436 if self._jobs and self._max_load is not None:
10438 current_time = time.time()
10440 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10441 if delay > self._job_delay_max:
10442 delay = self._job_delay_max
10443 if (current_time - self._previous_job_start_time) < delay:
10448 def _schedule_tasks_imp(self):
10451 @returns: True if state changed, False otherwise.
10458 if not self._keep_scheduling():
10459 return bool(state_change)
10461 if self._choose_pkg_return_early or \
10462 not self._can_add_job() or \
10464 return bool(state_change)
10466 pkg = self._choose_pkg()
10468 return bool(state_change)
10472 if not pkg.installed:
10473 self._pkg_count.curval += 1
10475 task = self._task(pkg)
10478 merge = PackageMerge(merge=task)
10479 merge.addExitListener(self._merge_exit)
10480 self._task_queues.merge.add(merge)
10484 self._previous_job_start_time = time.time()
10485 self._status_display.running = self._jobs
10486 task.addExitListener(self._extract_exit)
10487 self._task_queues.jobs.add(task)
10491 self._previous_job_start_time = time.time()
10492 self._status_display.running = self._jobs
10493 task.addExitListener(self._build_exit)
10494 self._task_queues.jobs.add(task)
10496 return bool(state_change)
10498 def _task(self, pkg):
10500 pkg_to_replace = None
10501 if pkg.operation != "uninstall":
10502 vardb = pkg.root_config.trees["vartree"].dbapi
10503 previous_cpv = vardb.match(pkg.slot_atom)
10505 previous_cpv = previous_cpv.pop()
10506 pkg_to_replace = self._pkg(previous_cpv,
10507 "installed", pkg.root_config, installed=True)
10509 task = MergeListItem(args_set=self._args_set,
10510 background=self._background, binpkg_opts=self._binpkg_opts,
10511 build_opts=self._build_opts,
10512 config_pool=self._ConfigPool(pkg.root,
10513 self._allocate_config, self._deallocate_config),
10514 emerge_opts=self.myopts,
10515 find_blockers=self._find_blockers(pkg), logger=self._logger,
10516 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10517 pkg_to_replace=pkg_to_replace,
10518 prefetcher=self._prefetchers.get(pkg),
10519 scheduler=self._sched_iface,
10520 settings=self._allocate_config(pkg.root),
10521 statusMessage=self._status_msg,
10522 world_atom=self._world_atom)
10526 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10527 pkg = failed_pkg.pkg
10528 msg = "%s to %s %s" % \
10529 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10530 if pkg.root != "/":
10531 msg += " %s %s" % (preposition, pkg.root)
10533 log_path = self._locate_failure_log(failed_pkg)
10534 if log_path is not None:
10535 msg += ", Log file:"
10536 self._status_msg(msg)
10538 if log_path is not None:
10539 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10541 def _status_msg(self, msg):
10543 Display a brief status message (no newlines) in the status display.
10544 This is called by tasks to provide feedback to the user. This
10545 delegates the resposibility of generating \r and \n control characters,
10546 to guarantee that lines are created or erased when necessary and
10550 @param msg: a brief status message (no newlines allowed)
10552 if not self._background:
10553 writemsg_level("\n")
10554 self._status_display.displayMessage(msg)
10556 def _save_resume_list(self):
10558 Do this before verifying the ebuild Manifests since it might
10559 be possible for the user to use --resume --skipfirst get past
10560 a non-essential package with a broken digest.
10562 mtimedb = self._mtimedb
10563 mtimedb["resume"]["mergelist"] = [list(x) \
10564 for x in self._mergelist \
10565 if isinstance(x, Package) and x.operation == "merge"]
10569 def _calc_resume_list(self):
10571 Use the current resume list to calculate a new one,
10572 dropping any packages with unsatisfied deps.
10574 @returns: True if successful, False otherwise.
10576 print colorize("GOOD", "*** Resuming merge...")
10578 if self._show_list():
10579 if "--tree" in self.myopts:
10580 portage.writemsg_stdout("\n" + \
10581 darkgreen("These are the packages that " + \
10582 "would be merged, in reverse order:\n\n"))
10585 portage.writemsg_stdout("\n" + \
10586 darkgreen("These are the packages that " + \
10587 "would be merged, in order:\n\n"))
10589 show_spinner = "--quiet" not in self.myopts and \
10590 "--nodeps" not in self.myopts
10593 print "Calculating dependencies ",
10595 myparams = create_depgraph_params(self.myopts, None)
10599 success, mydepgraph, dropped_tasks = resume_depgraph(
10600 self.settings, self.trees, self._mtimedb, self.myopts,
10601 myparams, self._spinner, skip_unsatisfied=True)
10602 except depgraph.UnsatisfiedResumeDep, e:
10603 mydepgraph = e.depgraph
10604 dropped_tasks = set()
10607 print "\b\b... done!"
10610 def unsatisfied_resume_dep_msg():
10611 mydepgraph.display_problems()
10612 out = portage.output.EOutput()
10613 out.eerror("One or more packages are either masked or " + \
10614 "have missing dependencies:")
10617 show_parents = set()
10618 for dep in e.value:
10619 if dep.parent in show_parents:
10621 show_parents.add(dep.parent)
10622 if dep.atom is None:
10623 out.eerror(indent + "Masked package:")
10624 out.eerror(2 * indent + str(dep.parent))
10627 out.eerror(indent + str(dep.atom) + " pulled in by:")
10628 out.eerror(2 * indent + str(dep.parent))
10630 msg = "The resume list contains packages " + \
10631 "that are either masked or have " + \
10632 "unsatisfied dependencies. " + \
10633 "Please restart/continue " + \
10634 "the operation manually, or use --skipfirst " + \
10635 "to skip the first package in the list and " + \
10636 "any other packages that may be " + \
10637 "masked or have missing dependencies."
10638 for line in textwrap.wrap(msg, 72):
10640 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10643 if success and self._show_list():
10644 mylist = mydepgraph.altlist()
10646 if "--tree" in self.myopts:
10648 mydepgraph.display(mylist, favorites=self._favorites)
10651 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10653 mydepgraph.display_problems()
10655 mylist = mydepgraph.altlist()
10656 mydepgraph.break_refs(mylist)
10657 mydepgraph.break_refs(dropped_tasks)
10658 self._mergelist = mylist
10659 self._set_digraph(mydepgraph.schedulerGraph())
10662 for task in dropped_tasks:
10663 if not (isinstance(task, Package) and task.operation == "merge"):
10666 msg = "emerge --keep-going:" + \
10668 if pkg.root != "/":
10669 msg += " for %s" % (pkg.root,)
10670 msg += " dropped due to unsatisfied dependency."
10671 for line in textwrap.wrap(msg, msg_width):
10672 eerror(line, phase="other", key=pkg.cpv)
10673 settings = self.pkgsettings[pkg.root]
10674 # Ensure that log collection from $T is disabled inside
10675 # elog_process(), since any logs that might exist are
10677 settings.pop("T", None)
10678 portage.elog.elog_process(pkg.cpv, settings)
10679 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10683 def _show_list(self):
10684 myopts = self.myopts
10685 if "--quiet" not in myopts and \
10686 ("--ask" in myopts or "--tree" in myopts or \
10687 "--verbose" in myopts):
10691 def _world_atom(self, pkg):
10693 Add the package to the world file, but only if
10694 it's supposed to be added. Otherwise, do nothing.
10697 if set(("--buildpkgonly", "--fetchonly",
10699 "--oneshot", "--onlydeps",
10700 "--pretend")).intersection(self.myopts):
10703 if pkg.root != self.target_root:
10706 args_set = self._args_set
10707 if not args_set.findAtomForPackage(pkg):
10710 logger = self._logger
10711 pkg_count = self._pkg_count
10712 root_config = pkg.root_config
10713 world_set = root_config.sets["world"]
10714 world_locked = False
10715 if hasattr(world_set, "lock"):
10717 world_locked = True
10720 if hasattr(world_set, "load"):
10721 world_set.load() # maybe it's changed on disk
10723 atom = create_world_atom(pkg, args_set, root_config)
10725 if hasattr(world_set, "add"):
10726 self._status_msg(('Recording %s in "world" ' + \
10727 'favorites file...') % atom)
10728 logger.log(" === (%s of %s) Updating world file (%s)" % \
10729 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10730 world_set.add(atom)
10732 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10733 (atom,), level=logging.WARN, noiselevel=-1)
10738 def _pkg(self, cpv, type_name, root_config, installed=False):
10740 Get a package instance from the cache, or create a new
10741 one if necessary. Raises KeyError from aux_get if it
10742 failures for some reason (package does not exist or is
10745 operation = "merge"
10747 operation = "nomerge"
10749 if self._digraph is not None:
10750 # Reuse existing instance when available.
10751 pkg = self._digraph.get(
10752 (type_name, root_config.root, cpv, operation))
10753 if pkg is not None:
10756 tree_type = depgraph.pkg_tree_map[type_name]
10757 db = root_config.trees[tree_type].dbapi
10758 db_keys = list(self.trees[root_config.root][
10759 tree_type].dbapi._aux_cache_keys)
10760 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10761 pkg = Package(cpv=cpv, metadata=metadata,
10762 root_config=root_config, installed=installed)
10763 if type_name == "ebuild":
10764 settings = self.pkgsettings[root_config.root]
10765 settings.setcpv(pkg)
10766 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10770 class MetadataRegen(PollScheduler):
10772 def __init__(self, portdb, max_jobs=None, max_load=None):
10773 PollScheduler.__init__(self)
10774 self._portdb = portdb
10776 if max_jobs is None:
10779 self._max_jobs = max_jobs
10780 self._max_load = max_load
10781 self._sched_iface = self._sched_iface_class(
10782 register=self._register,
10783 schedule=self._schedule_wait,
10784 unregister=self._unregister)
10786 self._valid_pkgs = set()
10787 self._process_iter = self._iter_metadata_processes()
10789 def _iter_metadata_processes(self):
10790 portdb = self._portdb
10791 valid_pkgs = self._valid_pkgs
10792 every_cp = portdb.cp_all()
10793 every_cp.sort(reverse=True)
10796 cp = every_cp.pop()
10797 portage.writemsg_stdout("Processing %s\n" % cp)
10798 cpv_list = portdb.cp_list(cp)
10799 for cpv in cpv_list:
10800 valid_pkgs.add(cpv)
10801 ebuild_path, repo_path = portdb.findname2(cpv)
10802 metadata_process = portdb._metadata_process(
10803 cpv, ebuild_path, repo_path)
10804 if metadata_process is None:
10806 yield metadata_process
10810 portdb = self._portdb
10811 from portage.cache.cache_errors import CacheError
10814 for mytree in portdb.porttrees:
10816 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10817 except CacheError, e:
10818 portage.writemsg("Error listing cache entries for " + \
10819 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10824 while self._schedule():
10831 for y in self._valid_pkgs:
10832 for mytree in portdb.porttrees:
10833 if portdb.findname2(y, mytree=mytree)[0]:
10834 dead_nodes[mytree].discard(y)
10836 for mytree, nodes in dead_nodes.iteritems():
10837 auxdb = portdb.auxdb[mytree]
10841 except (KeyError, CacheError):
10844 def _schedule_tasks(self):
10847 @returns: True if there may be remaining tasks to schedule,
10850 while self._can_add_job():
10852 metadata_process = self._process_iter.next()
10853 except StopIteration:
10857 metadata_process.scheduler = self._sched_iface
10858 metadata_process.addExitListener(self._metadata_exit)
10859 metadata_process.start()
10862 def _metadata_exit(self, metadata_process):
10864 if metadata_process.returncode != os.EX_OK:
10865 self._valid_pkgs.discard(metadata_process.cpv)
10866 portage.writemsg("Error processing %s, continuing...\n" % \
10867 (metadata_process.cpv,))
10870 class UninstallFailure(portage.exception.PortageException):
10872 An instance of this class is raised by unmerge() when
10873 an uninstallation fails.
10876 def __init__(self, *pargs):
10877 portage.exception.PortageException.__init__(self, pargs)
10879 self.status = pargs[0]
10881 def unmerge(root_config, myopts, unmerge_action,
10882 unmerge_files, ldpath_mtimes, autoclean=0,
10883 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10884 scheduler=None, writemsg_level=portage.util.writemsg_level):
10886 quiet = "--quiet" in myopts
10887 settings = root_config.settings
10888 sets = root_config.sets
10889 vartree = root_config.trees["vartree"]
10890 candidate_catpkgs=[]
10892 xterm_titles = "notitles" not in settings.features
10893 out = portage.output.EOutput()
10895 db_keys = list(vartree.dbapi._aux_cache_keys)
10898 pkg = pkg_cache.get(cpv)
10900 pkg = Package(cpv=cpv, installed=True,
10901 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10902 root_config=root_config,
10903 type_name="installed")
10904 pkg_cache[cpv] = pkg
10907 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10909 # At least the parent needs to exist for the lock file.
10910 portage.util.ensure_dirs(vdb_path)
10911 except portage.exception.PortageException:
10915 if os.access(vdb_path, os.W_OK):
10916 vdb_lock = portage.locks.lockdir(vdb_path)
10917 realsyslist = sets["system"].getAtoms()
10919 for x in realsyslist:
10920 mycp = portage.dep_getkey(x)
10921 if mycp in settings.getvirtuals():
10923 for provider in settings.getvirtuals()[mycp]:
10924 if vartree.dbapi.match(provider):
10925 providers.append(provider)
10926 if len(providers) == 1:
10927 syslist.extend(providers)
10929 syslist.append(mycp)
10931 mysettings = portage.config(clone=settings)
10933 if not unmerge_files:
10934 if unmerge_action == "unmerge":
10936 print bold("emerge unmerge") + " can only be used with specific package names"
10942 localtree = vartree
10943 # process all arguments and add all
10944 # valid db entries to candidate_catpkgs
10946 if not unmerge_files:
10947 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10949 #we've got command-line arguments
10950 if not unmerge_files:
10951 print "\nNo packages to unmerge have been provided.\n"
10953 for x in unmerge_files:
10954 arg_parts = x.split('/')
10955 if x[0] not in [".","/"] and \
10956 arg_parts[-1][-7:] != ".ebuild":
10957 #possible cat/pkg or dep; treat as such
10958 candidate_catpkgs.append(x)
10959 elif unmerge_action in ["prune","clean"]:
10960 print "\n!!! Prune and clean do not accept individual" + \
10961 " ebuilds as arguments;\n skipping.\n"
10964 # it appears that the user is specifying an installed
10965 # ebuild and we're in "unmerge" mode, so it's ok.
10966 if not os.path.exists(x):
10967 print "\n!!! The path '"+x+"' doesn't exist.\n"
10970 absx = os.path.abspath(x)
10971 sp_absx = absx.split("/")
10972 if sp_absx[-1][-7:] == ".ebuild":
10974 absx = "/".join(sp_absx)
10976 sp_absx_len = len(sp_absx)
10978 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10979 vdb_len = len(vdb_path)
10981 sp_vdb = vdb_path.split("/")
10982 sp_vdb_len = len(sp_vdb)
10984 if not os.path.exists(absx+"/CONTENTS"):
10985 print "!!! Not a valid db dir: "+str(absx)
10988 if sp_absx_len <= sp_vdb_len:
10989 # The Path is shorter... so it can't be inside the vdb.
10992 print "\n!!!",x,"cannot be inside "+ \
10993 vdb_path+"; aborting.\n"
10996 for idx in range(0,sp_vdb_len):
10997 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11000 print "\n!!!", x, "is not inside "+\
11001 vdb_path+"; aborting.\n"
11004 print "="+"/".join(sp_absx[sp_vdb_len:])
11005 candidate_catpkgs.append(
11006 "="+"/".join(sp_absx[sp_vdb_len:]))
11009 if (not "--quiet" in myopts):
11011 if settings["ROOT"] != "/":
11012 writemsg_level(darkgreen(newline+ \
11013 ">>> Using system located in ROOT tree %s\n" % \
11016 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11017 not ("--quiet" in myopts):
11018 writemsg_level(darkgreen(newline+\
11019 ">>> These are the packages that would be unmerged:\n"))
11021 # Preservation of order is required for --depclean and --prune so
11022 # that dependencies are respected. Use all_selected to eliminate
11023 # duplicate packages since the same package may be selected by
11026 all_selected = set()
11027 for x in candidate_catpkgs:
11028 # cycle through all our candidate deps and determine
11029 # what will and will not get unmerged
11031 mymatch = vartree.dbapi.match(x)
11032 except portage.exception.AmbiguousPackageName, errpkgs:
11033 print "\n\n!!! The short ebuild name \"" + \
11034 x + "\" is ambiguous. Please specify"
11035 print "!!! one of the following fully-qualified " + \
11036 "ebuild names instead:\n"
11037 for i in errpkgs[0]:
11038 print " " + green(i)
11042 if not mymatch and x[0] not in "<>=~":
11043 mymatch = localtree.dep_match(x)
11045 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11046 (x, unmerge_action), noiselevel=-1)
11050 {"protected": set(), "selected": set(), "omitted": set()})
11051 mykey = len(pkgmap) - 1
11052 if unmerge_action=="unmerge":
11054 if y not in all_selected:
11055 pkgmap[mykey]["selected"].add(y)
11056 all_selected.add(y)
11057 elif unmerge_action == "prune":
11058 if len(mymatch) == 1:
11060 best_version = mymatch[0]
11061 best_slot = vartree.getslot(best_version)
11062 best_counter = vartree.dbapi.cpv_counter(best_version)
11063 for mypkg in mymatch[1:]:
11064 myslot = vartree.getslot(mypkg)
11065 mycounter = vartree.dbapi.cpv_counter(mypkg)
11066 if (myslot == best_slot and mycounter > best_counter) or \
11067 mypkg == portage.best([mypkg, best_version]):
11068 if myslot == best_slot:
11069 if mycounter < best_counter:
11070 # On slot collision, keep the one with the
11071 # highest counter since it is the most
11072 # recently installed.
11074 best_version = mypkg
11076 best_counter = mycounter
11077 pkgmap[mykey]["protected"].add(best_version)
11078 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11079 if mypkg != best_version and mypkg not in all_selected)
11080 all_selected.update(pkgmap[mykey]["selected"])
11082 # unmerge_action == "clean"
11084 for mypkg in mymatch:
11085 if unmerge_action == "clean":
11086 myslot = localtree.getslot(mypkg)
11088 # since we're pruning, we don't care about slots
11089 # and put all the pkgs in together
11091 if myslot not in slotmap:
11092 slotmap[myslot] = {}
11093 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11095 for mypkg in vartree.dbapi.cp_list(
11096 portage.dep_getkey(mymatch[0])):
11097 myslot = vartree.getslot(mypkg)
11098 if myslot not in slotmap:
11099 slotmap[myslot] = {}
11100 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11102 for myslot in slotmap:
11103 counterkeys = slotmap[myslot].keys()
11104 if not counterkeys:
11107 pkgmap[mykey]["protected"].add(
11108 slotmap[myslot][counterkeys[-1]])
11109 del counterkeys[-1]
11111 for counter in counterkeys[:]:
11112 mypkg = slotmap[myslot][counter]
11113 if mypkg not in mymatch:
11114 counterkeys.remove(counter)
11115 pkgmap[mykey]["protected"].add(
11116 slotmap[myslot][counter])
11118 #be pretty and get them in order of merge:
11119 for ckey in counterkeys:
11120 mypkg = slotmap[myslot][ckey]
11121 if mypkg not in all_selected:
11122 pkgmap[mykey]["selected"].add(mypkg)
11123 all_selected.add(mypkg)
11124 # ok, now the last-merged package
11125 # is protected, and the rest are selected
11126 numselected = len(all_selected)
11127 if global_unmerge and not numselected:
11128 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11131 if not numselected:
11132 portage.writemsg_stdout(
11133 "\n>>> No packages selected for removal by " + \
11134 unmerge_action + "\n")
11138 vartree.dbapi.flush_cache()
11139 portage.locks.unlockdir(vdb_lock)
11141 from portage.sets.base import EditablePackageSet
11143 # generate a list of package sets that are directly or indirectly listed in "world",
11144 # as there is no persistent list of "installed" sets
11145 installed_sets = ["world"]
11150 pos = len(installed_sets)
11151 for s in installed_sets[pos - 1:]:
11154 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11157 installed_sets += candidates
11158 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11161 # we don't want to unmerge packages that are still listed in user-editable package sets
11162 # listed in "world" as they would be remerged on the next update of "world" or the
11163 # relevant package sets.
11164 unknown_sets = set()
11165 for cp in xrange(len(pkgmap)):
11166 for cpv in pkgmap[cp]["selected"].copy():
11170 # It could have been uninstalled
11171 # by a concurrent process.
11174 if unmerge_action != "clean" and \
11175 root_config.root == "/" and \
11176 portage.match_from_list(
11177 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11178 msg = ("Not unmerging package %s since there is no valid " + \
11179 "reason for portage to unmerge itself.") % (pkg.cpv,)
11180 for line in textwrap.wrap(msg, 75):
11182 # adjust pkgmap so the display output is correct
11183 pkgmap[cp]["selected"].remove(cpv)
11184 all_selected.remove(cpv)
11185 pkgmap[cp]["protected"].add(cpv)
11189 for s in installed_sets:
11190 # skip sets that the user requested to unmerge, and skip world
11191 # unless we're unmerging a package set (as the package would be
11192 # removed from "world" later on)
11193 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11197 if s in unknown_sets:
11199 unknown_sets.add(s)
11200 out = portage.output.EOutput()
11201 out.eerror(("Unknown set '@%s' in " + \
11202 "%svar/lib/portage/world_sets") % \
11203 (s, root_config.root))
11206 # only check instances of EditablePackageSet as other classes are generally used for
11207 # special purposes and can be ignored here (and are usually generated dynamically, so the
11208 # user can't do much about them anyway)
11209 if isinstance(sets[s], EditablePackageSet):
11211 # This is derived from a snippet of code in the
11212 # depgraph._iter_atoms_for_pkg() method.
11213 for atom in sets[s].iterAtomsForPackage(pkg):
11214 inst_matches = vartree.dbapi.match(atom)
11215 inst_matches.reverse() # descending order
11217 for inst_cpv in inst_matches:
11219 inst_pkg = _pkg(inst_cpv)
11221 # It could have been uninstalled
11222 # by a concurrent process.
11225 if inst_pkg.cp != atom.cp:
11227 if pkg >= inst_pkg:
11228 # This is descending order, and we're not
11229 # interested in any versions <= pkg given.
11231 if pkg.slot_atom != inst_pkg.slot_atom:
11232 higher_slot = inst_pkg
11234 if higher_slot is None:
11238 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11239 #print colorize("WARN", "but still listed in the following package sets:")
11240 #print " %s\n" % ", ".join(parents)
11241 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11242 print colorize("WARN", "still referenced by the following package sets:")
11243 print " %s\n" % ", ".join(parents)
11244 # adjust pkgmap so the display output is correct
11245 pkgmap[cp]["selected"].remove(cpv)
11246 all_selected.remove(cpv)
11247 pkgmap[cp]["protected"].add(cpv)
11251 numselected = len(all_selected)
11252 if not numselected:
11254 "\n>>> No packages selected for removal by " + \
11255 unmerge_action + "\n")
11258 # Unmerge order only matters in some cases
11262 selected = d["selected"]
11265 cp = portage.cpv_getkey(iter(selected).next())
11266 cp_dict = unordered.get(cp)
11267 if cp_dict is None:
11269 unordered[cp] = cp_dict
11272 for k, v in d.iteritems():
11273 cp_dict[k].update(v)
11274 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11276 for x in xrange(len(pkgmap)):
11277 selected = pkgmap[x]["selected"]
11280 for mytype, mylist in pkgmap[x].iteritems():
11281 if mytype == "selected":
11283 mylist.difference_update(all_selected)
11284 cp = portage.cpv_getkey(iter(selected).next())
11285 for y in localtree.dep_match(cp):
11286 if y not in pkgmap[x]["omitted"] and \
11287 y not in pkgmap[x]["selected"] and \
11288 y not in pkgmap[x]["protected"] and \
11289 y not in all_selected:
11290 pkgmap[x]["omitted"].add(y)
11291 if global_unmerge and not pkgmap[x]["selected"]:
11292 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11294 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11295 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11296 "'%s' is part of your system profile.\n" % cp),
11297 level=logging.WARNING, noiselevel=-1)
11298 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11299 "be damaging to your system.\n\n"),
11300 level=logging.WARNING, noiselevel=-1)
11301 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11302 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11303 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11305 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11307 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11308 for mytype in ["selected","protected","omitted"]:
11310 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11311 if pkgmap[x][mytype]:
11312 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11313 sorted_pkgs.sort(portage.pkgcmp)
11314 for pn, ver, rev in sorted_pkgs:
11318 myversion = ver + "-" + rev
11319 if mytype == "selected":
11321 colorize("UNMERGE_WARN", myversion + " "),
11325 colorize("GOOD", myversion + " "), noiselevel=-1)
11327 writemsg_level("none ", noiselevel=-1)
11329 writemsg_level("\n", noiselevel=-1)
11331 writemsg_level("\n", noiselevel=-1)
11333 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11334 " packages are slated for removal.\n")
11335 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11336 " and " + colorize("GOOD", "'omitted'") + \
11337 " packages will not be removed.\n\n")
11339 if "--pretend" in myopts:
11340 #we're done... return
11342 if "--ask" in myopts:
11343 if userquery("Would you like to unmerge these packages?")=="No":
11344 # enter pretend mode for correct formatting of results
11345 myopts["--pretend"] = True
11350 #the real unmerging begins, after a short delay....
11351 if clean_delay and not autoclean:
11352 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11354 for x in xrange(len(pkgmap)):
11355 for y in pkgmap[x]["selected"]:
11356 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11357 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11358 mysplit = y.split("/")
11360 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11361 mysettings, unmerge_action not in ["clean","prune"],
11362 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11363 scheduler=scheduler)
11365 if retval != os.EX_OK:
11366 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11368 raise UninstallFailure(retval)
11371 if clean_world and hasattr(sets["world"], "cleanPackage"):
11372 sets["world"].cleanPackage(vartree.dbapi, y)
11373 emergelog(xterm_titles, " >>> unmerge success: "+y)
11374 if clean_world and hasattr(sets["world"], "remove"):
11375 for s in root_config.setconfig.active:
11376 sets["world"].remove(SETPREFIX+s)
11379 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11381 if os.path.exists("/usr/bin/install-info"):
11382 out = portage.output.EOutput()
11387 inforoot=normpath(root+z)
11388 if os.path.isdir(inforoot):
11389 infomtime = long(os.stat(inforoot).st_mtime)
11390 if inforoot not in prev_mtimes or \
11391 prev_mtimes[inforoot] != infomtime:
11392 regen_infodirs.append(inforoot)
11394 if not regen_infodirs:
11395 portage.writemsg_stdout("\n")
11396 out.einfo("GNU info directory index is up-to-date.")
11398 portage.writemsg_stdout("\n")
11399 out.einfo("Regenerating GNU info directory index...")
11401 dir_extensions = ("", ".gz", ".bz2")
11405 for inforoot in regen_infodirs:
11409 if not os.path.isdir(inforoot) or \
11410 not os.access(inforoot, os.W_OK):
11413 file_list = os.listdir(inforoot)
11415 dir_file = os.path.join(inforoot, "dir")
11416 moved_old_dir = False
11417 processed_count = 0
11418 for x in file_list:
11419 if x.startswith(".") or \
11420 os.path.isdir(os.path.join(inforoot, x)):
11422 if x.startswith("dir"):
11424 for ext in dir_extensions:
11425 if x == "dir" + ext or \
11426 x == "dir" + ext + ".old":
11431 if processed_count == 0:
11432 for ext in dir_extensions:
11434 os.rename(dir_file + ext, dir_file + ext + ".old")
11435 moved_old_dir = True
11436 except EnvironmentError, e:
11437 if e.errno != errno.ENOENT:
11440 processed_count += 1
11441 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11442 existsstr="already exists, for file `"
11444 if re.search(existsstr,myso):
11445 # Already exists... Don't increment the count for this.
11447 elif myso[:44]=="install-info: warning: no info dir entry in ":
11448 # This info file doesn't contain a DIR-header: install-info produces this
11449 # (harmless) warning (the --quiet switch doesn't seem to work).
11450 # Don't increment the count for this.
11453 badcount=badcount+1
11454 errmsg += myso + "\n"
11457 if moved_old_dir and not os.path.exists(dir_file):
11458 # We didn't generate a new dir file, so put the old file
11459 # back where it was originally found.
11460 for ext in dir_extensions:
11462 os.rename(dir_file + ext + ".old", dir_file + ext)
11463 except EnvironmentError, e:
11464 if e.errno != errno.ENOENT:
11468 # Clean dir.old cruft so that they don't prevent
11469 # unmerge of otherwise empty directories.
11470 for ext in dir_extensions:
11472 os.unlink(dir_file + ext + ".old")
11473 except EnvironmentError, e:
11474 if e.errno != errno.ENOENT:
11478 #update mtime so we can potentially avoid regenerating.
11479 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11482 out.eerror("Processed %d info files; %d errors." % \
11483 (icount, badcount))
11484 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11487 out.einfo("Processed %d info files." % (icount,))
11490 def display_news_notification(root_config, myopts):
11491 target_root = root_config.root
11492 trees = root_config.trees
11493 settings = trees["vartree"].settings
11494 portdb = trees["porttree"].dbapi
11495 vardb = trees["vartree"].dbapi
11496 NEWS_PATH = os.path.join("metadata", "news")
11497 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11498 newsReaderDisplay = False
11499 update = "--pretend" not in myopts
11501 for repo in portdb.getRepositories():
11502 unreadItems = checkUpdatedNewsItems(
11503 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11505 if not newsReaderDisplay:
11506 newsReaderDisplay = True
11508 print colorize("WARN", " * IMPORTANT:"),
11509 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11512 if newsReaderDisplay:
11513 print colorize("WARN", " *"),
11514 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11517 def display_preserved_libs(vardbapi):
11520 # Ensure the registry is consistent with existing files.
11521 vardbapi.plib_registry.pruneNonExisting()
11523 if vardbapi.plib_registry.hasEntries():
11525 print colorize("WARN", "!!!") + " existing preserved libs:"
11526 plibdata = vardbapi.plib_registry.getPreservedLibs()
11527 linkmap = vardbapi.linkmap
11530 linkmap_broken = False
11534 except portage.exception.CommandNotFound, e:
11535 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11536 level=logging.ERROR, noiselevel=-1)
11538 linkmap_broken = True
11540 search_for_owners = set()
11541 for cpv in plibdata:
11542 internal_plib_keys = set(linkmap._obj_key(f) \
11543 for f in plibdata[cpv])
11544 for f in plibdata[cpv]:
11545 if f in consumer_map:
11548 for c in linkmap.findConsumers(f):
11549 # Filter out any consumers that are also preserved libs
11550 # belonging to the same package as the provider.
11551 if linkmap._obj_key(c) not in internal_plib_keys:
11552 consumers.append(c)
11554 consumer_map[f] = consumers
11555 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11557 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11559 for cpv in plibdata:
11560 print colorize("WARN", ">>>") + " package: %s" % cpv
11562 for f in plibdata[cpv]:
11563 obj_key = linkmap._obj_key(f)
11564 alt_paths = samefile_map.get(obj_key)
11565 if alt_paths is None:
11567 samefile_map[obj_key] = alt_paths
11570 for alt_paths in samefile_map.itervalues():
11571 alt_paths = sorted(alt_paths)
11572 for p in alt_paths:
11573 print colorize("WARN", " * ") + " - %s" % (p,)
11575 consumers = consumer_map.get(f, [])
11576 for c in consumers[:MAX_DISPLAY]:
11577 print colorize("WARN", " * ") + " used by %s (%s)" % \
11578 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11579 if len(consumers) == MAX_DISPLAY + 1:
11580 print colorize("WARN", " * ") + " used by %s (%s)" % \
11581 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11582 for x in owners.get(consumers[MAX_DISPLAY], [])))
11583 elif len(consumers) > MAX_DISPLAY:
11584 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11585 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11588 def _flush_elog_mod_echo():
11590 Dump the mod_echo output now so that our other
11591 notifications are shown last.
11593 @returns: True if messages were shown, False otherwise.
11595 messages_shown = False
11597 from portage.elog import mod_echo
11598 except ImportError:
11599 pass # happens during downgrade to a version without the module
11601 messages_shown = bool(mod_echo._items)
11602 mod_echo.finalize()
11603 return messages_shown
11605 def post_emerge(root_config, myopts, mtimedb, retval):
11607 Misc. things to run at the end of a merge session.
11610 Update Config Files
11613 Display preserved libs warnings
11616 @param trees: A dictionary mapping each ROOT to it's package databases
11618 @param mtimedb: The mtimeDB to store data needed across merge invocations
11619 @type mtimedb: MtimeDB class instance
11620 @param retval: Emerge's return value
11624 1. Calls sys.exit(retval)
11627 target_root = root_config.root
11628 trees = { target_root : root_config.trees }
11629 vardbapi = trees[target_root]["vartree"].dbapi
11630 settings = vardbapi.settings
11631 info_mtimes = mtimedb["info"]
11633 # Load the most current variables from ${ROOT}/etc/profile.env
11636 settings.regenerate()
11639 config_protect = settings.get("CONFIG_PROTECT","").split()
11640 infodirs = settings.get("INFOPATH","").split(":") + \
11641 settings.get("INFODIR","").split(":")
11645 if retval == os.EX_OK:
11646 exit_msg = " *** exiting successfully."
11648 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11649 emergelog("notitles" not in settings.features, exit_msg)
11651 _flush_elog_mod_echo()
11653 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11654 if counter_hash is not None and \
11655 counter_hash == vardbapi._counter_hash():
11656 # If vdb state has not changed then there's nothing else to do.
11659 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11660 portage.util.ensure_dirs(vdb_path)
11662 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11663 vdb_lock = portage.locks.lockdir(vdb_path)
11667 if "noinfo" not in settings.features:
11668 chk_updated_info_files(target_root,
11669 infodirs, info_mtimes, retval)
11673 portage.locks.unlockdir(vdb_lock)
11675 chk_updated_cfg_files(target_root, config_protect)
11677 display_news_notification(root_config, myopts)
11678 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11679 display_preserved_libs(vardbapi)
11684 def chk_updated_cfg_files(target_root, config_protect):
11686 #number of directories with some protect files in them
11688 for x in config_protect:
11689 x = os.path.join(target_root, x.lstrip(os.path.sep))
11690 if not os.access(x, os.W_OK):
11691 # Avoid Permission denied errors generated
11695 mymode = os.lstat(x).st_mode
11698 if stat.S_ISLNK(mymode):
11699 # We want to treat it like a directory if it
11700 # is a symlink to an existing directory.
11702 real_mode = os.stat(x).st_mode
11703 if stat.S_ISDIR(real_mode):
11707 if stat.S_ISDIR(mymode):
11708 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11710 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11711 os.path.split(x.rstrip(os.path.sep))
11712 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11713 a = commands.getstatusoutput(mycommand)
11715 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11717 # Show the error message alone, sending stdout to /dev/null.
11718 os.system(mycommand + " 1>/dev/null")
11720 files = a[1].split('\0')
11721 # split always produces an empty string as the last element
11722 if files and not files[-1]:
11726 print "\n"+colorize("WARN", " * IMPORTANT:"),
11727 if stat.S_ISDIR(mymode):
11728 print "%d config files in '%s' need updating." % \
11731 print "config file '%s' needs updating." % x
11734 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11735 " section of the " + bold("emerge")
11736 print " "+yellow("*")+" man page to learn how to update config files."
11738 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11741 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11742 Returns the number of unread (yet relevent) items.
11744 @param portdb: a portage tree database
11745 @type portdb: pordbapi
11746 @param vardb: an installed package database
11747 @type vardb: vardbapi
11750 @param UNREAD_PATH:
11756 1. The number of unread but relevant news items.
11759 from portage.news import NewsManager
11760 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11761 return manager.getUnreadItems( repo_id, update=update )
11763 def insert_category_into_atom(atom, category):
11764 alphanum = re.search(r'\w', atom)
11766 ret = atom[:alphanum.start()] + "%s/" % category + \
11767 atom[alphanum.start():]
11772 def is_valid_package_atom(x):
11774 alphanum = re.search(r'\w', x)
11776 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11777 return portage.isvalidatom(x)
11779 def show_blocker_docs_link():
11781 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11782 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11784 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11787 def show_mask_docs():
11788 print "For more information, see the MASKED PACKAGES section in the emerge"
11789 print "man page or refer to the Gentoo Handbook."
11791 def action_sync(settings, trees, mtimedb, myopts, myaction):
11792 xterm_titles = "notitles" not in settings.features
11793 emergelog(xterm_titles, " === sync")
11794 myportdir = settings.get("PORTDIR", None)
11795 out = portage.output.EOutput()
11797 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11799 if myportdir[-1]=="/":
11800 myportdir=myportdir[:-1]
11801 if not os.path.exists(myportdir):
11802 print ">>>",myportdir,"not found, creating it."
11803 os.makedirs(myportdir,0755)
11804 syncuri = settings.get("SYNC", "").strip()
11806 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11807 noiselevel=-1, level=logging.ERROR)
11810 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
11811 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
11814 updatecache_flg = False
11815 if myaction == "metadata":
11816 print "skipping sync"
11817 updatecache_flg = True
11818 elif ".git" in vcs_dirs:
11819 # Update existing git repository, and ignore the syncuri. We are
11820 # going to trust the user and assume that the user is in the branch
11821 # that he/she wants updated. We'll let the user manage branches with
11823 msg = ">>> Starting git pull in %s..." % myportdir
11824 emergelog(xterm_titles, msg )
11825 writemsg_level(msg + "\n")
11826 exitcode = portage.spawn("cd %s ; git pull" % \
11827 (portage._shell_quote(myportdir),), settings, free=1)
11828 if exitcode != os.EX_OK:
11829 msg = "!!! git pull error in %s." % myportdir
11830 emergelog(xterm_titles, msg)
11831 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
11833 msg = ">>> Git pull in %s successful" % myportdir
11834 emergelog(xterm_titles, msg)
11835 writemsg_level(msg + "\n")
11837 elif syncuri[:8]=="rsync://":
11838 for vcs_dir in vcs_dirs:
11839 writemsg_level(("!!! %s appears to be under revision " + \
11840 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
11841 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
11843 if not os.path.exists("/usr/bin/rsync"):
11844 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11845 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11850 import shlex, StringIO
11851 if settings["PORTAGE_RSYNC_OPTS"] == "":
11852 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11853 rsync_opts.extend([
11854 "--recursive", # Recurse directories
11855 "--links", # Consider symlinks
11856 "--safe-links", # Ignore links outside of tree
11857 "--perms", # Preserve permissions
11858 "--times", # Preserive mod times
11859 "--compress", # Compress the data transmitted
11860 "--force", # Force deletion on non-empty dirs
11861 "--whole-file", # Don't do block transfers, only entire files
11862 "--delete", # Delete files that aren't in the master tree
11863 "--stats", # Show final statistics about what was transfered
11864 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11865 "--exclude=/distfiles", # Exclude distfiles from consideration
11866 "--exclude=/local", # Exclude local from consideration
11867 "--exclude=/packages", # Exclude packages from consideration
11871 # The below validation is not needed when using the above hardcoded
11874 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11875 lexer = shlex.shlex(StringIO.StringIO(
11876 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11877 lexer.whitespace_split = True
11878 rsync_opts.extend(lexer)
11881 for opt in ("--recursive", "--times"):
11882 if opt not in rsync_opts:
11883 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11884 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11885 rsync_opts.append(opt)
11887 for exclude in ("distfiles", "local", "packages"):
11888 opt = "--exclude=/%s" % exclude
11889 if opt not in rsync_opts:
11890 portage.writemsg(yellow("WARNING:") + \
11891 " adding required option %s not included in " % opt + \
11892 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11893 rsync_opts.append(opt)
11895 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11896 def rsync_opt_startswith(opt_prefix):
11897 for x in rsync_opts:
11898 if x.startswith(opt_prefix):
11902 if not rsync_opt_startswith("--timeout="):
11903 rsync_opts.append("--timeout=%d" % mytimeout)
11905 for opt in ("--compress", "--whole-file"):
11906 if opt not in rsync_opts:
11907 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11908 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11909 rsync_opts.append(opt)
11911 if "--quiet" in myopts:
11912 rsync_opts.append("--quiet") # Shut up a lot
11914 rsync_opts.append("--verbose") # Print filelist
11916 if "--verbose" in myopts:
11917 rsync_opts.append("--progress") # Progress meter for each file
11919 if "--debug" in myopts:
11920 rsync_opts.append("--checksum") # Force checksum on all files
11922 # Real local timestamp file.
11923 servertimestampfile = os.path.join(
11924 myportdir, "metadata", "timestamp.chk")
11926 content = portage.util.grabfile(servertimestampfile)
11930 mytimestamp = time.mktime(time.strptime(content[0],
11931 "%a, %d %b %Y %H:%M:%S +0000"))
11932 except (OverflowError, ValueError):
11937 rsync_initial_timeout = \
11938 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11940 rsync_initial_timeout = 15
11943 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11944 except SystemExit, e:
11945 raise # Needed else can't exit
11947 maxretries=3 #default number of retries
11950 user_name, hostname, port = re.split(
11951 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11954 if user_name is None:
11956 updatecache_flg=True
11957 all_rsync_opts = set(rsync_opts)
11958 lexer = shlex.shlex(StringIO.StringIO(
11959 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11960 lexer.whitespace_split = True
11961 extra_rsync_opts = list(lexer)
11963 all_rsync_opts.update(extra_rsync_opts)
11964 family = socket.AF_INET
11965 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11966 family = socket.AF_INET
11967 elif socket.has_ipv6 and \
11968 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11969 family = socket.AF_INET6
11971 SERVER_OUT_OF_DATE = -1
11972 EXCEEDED_MAX_RETRIES = -2
11978 for addrinfo in socket.getaddrinfo(
11979 hostname, None, family, socket.SOCK_STREAM):
11980 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11981 # IPv6 addresses need to be enclosed in square brackets
11982 ips.append("[%s]" % addrinfo[4][0])
11984 ips.append(addrinfo[4][0])
11985 from random import shuffle
11987 except SystemExit, e:
11988 raise # Needed else can't exit
11989 except Exception, e:
11990 print "Notice:",str(e)
11995 dosyncuri = syncuri.replace(
11996 "//" + user_name + hostname + port + "/",
11997 "//" + user_name + ips[0] + port + "/", 1)
11998 except SystemExit, e:
11999 raise # Needed else can't exit
12000 except Exception, e:
12001 print "Notice:",str(e)
12005 if "--ask" in myopts:
12006 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12011 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12012 if "--quiet" not in myopts:
12013 print ">>> Starting rsync with "+dosyncuri+"..."
12015 emergelog(xterm_titles,
12016 ">>> Starting retry %d of %d with %s" % \
12017 (retries,maxretries,dosyncuri))
12018 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12020 if mytimestamp != 0 and "--quiet" not in myopts:
12021 print ">>> Checking server timestamp ..."
12023 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12025 if "--debug" in myopts:
12028 exitcode = os.EX_OK
12029 servertimestamp = 0
12030 # Even if there's no timestamp available locally, fetch the
12031 # timestamp anyway as an initial probe to verify that the server is
12032 # responsive. This protects us from hanging indefinitely on a
12033 # connection attempt to an unresponsive server which rsync's
12034 # --timeout option does not prevent.
12036 # Temporary file for remote server timestamp comparison.
12037 from tempfile import mkstemp
12038 fd, tmpservertimestampfile = mkstemp()
12040 mycommand = rsynccommand[:]
12041 mycommand.append(dosyncuri.rstrip("/") + \
12042 "/metadata/timestamp.chk")
12043 mycommand.append(tmpservertimestampfile)
12047 def timeout_handler(signum, frame):
12048 raise portage.exception.PortageException("timed out")
12049 signal.signal(signal.SIGALRM, timeout_handler)
12050 # Timeout here in case the server is unresponsive. The
12051 # --timeout rsync option doesn't apply to the initial
12052 # connection attempt.
12053 if rsync_initial_timeout:
12054 signal.alarm(rsync_initial_timeout)
12056 mypids.extend(portage.process.spawn(
12057 mycommand, env=settings.environ(), returnpid=True))
12058 exitcode = os.waitpid(mypids[0], 0)[1]
12059 content = portage.grabfile(tmpservertimestampfile)
12061 if rsync_initial_timeout:
12064 os.unlink(tmpservertimestampfile)
12067 except portage.exception.PortageException, e:
12071 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12072 os.kill(mypids[0], signal.SIGTERM)
12073 os.waitpid(mypids[0], 0)
12074 # This is the same code rsync uses for timeout.
12077 if exitcode != os.EX_OK:
12078 if exitcode & 0xff:
12079 exitcode = (exitcode & 0xff) << 8
12081 exitcode = exitcode >> 8
12083 portage.process.spawned_pids.remove(mypids[0])
12086 servertimestamp = time.mktime(time.strptime(
12087 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12088 except (OverflowError, ValueError):
12090 del mycommand, mypids, content
12091 if exitcode == os.EX_OK:
12092 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12093 emergelog(xterm_titles,
12094 ">>> Cancelling sync -- Already current.")
12097 print ">>> Timestamps on the server and in the local repository are the same."
12098 print ">>> Cancelling all further sync action. You are already up to date."
12100 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12104 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12105 emergelog(xterm_titles,
12106 ">>> Server out of date: %s" % dosyncuri)
12109 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12111 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12114 exitcode = SERVER_OUT_OF_DATE
12115 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12117 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12118 exitcode = portage.process.spawn(mycommand,
12119 env=settings.environ())
12120 if exitcode in [0,1,3,4,11,14,20,21]:
12122 elif exitcode in [1,3,4,11,14,20,21]:
12125 # Code 2 indicates protocol incompatibility, which is expected
12126 # for servers with protocol < 29 that don't support
12127 # --prune-empty-directories. Retry for a server that supports
12128 # at least rsync protocol version 29 (>=rsync-2.6.4).
12133 if retries<=maxretries:
12134 print ">>> Retrying..."
12139 updatecache_flg=False
12140 exitcode = EXCEEDED_MAX_RETRIES
12144 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12145 elif exitcode == SERVER_OUT_OF_DATE:
12147 elif exitcode == EXCEEDED_MAX_RETRIES:
12149 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12154 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12155 msg.append("that your SYNC statement is proper.")
12156 msg.append("SYNC=" + settings["SYNC"])
12158 msg.append("Rsync has reported that there is a File IO error. Normally")
12159 msg.append("this means your disk is full, but can be caused by corruption")
12160 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12161 msg.append("and try again after the problem has been fixed.")
12162 msg.append("PORTDIR=" + settings["PORTDIR"])
12164 msg.append("Rsync was killed before it finished.")
12166 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12167 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12168 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12169 msg.append("temporary problem unless complications exist with your network")
12170 msg.append("(and possibly your system's filesystem) configuration.")
12174 elif syncuri[:6]=="cvs://":
12175 if not os.path.exists("/usr/bin/cvs"):
12176 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12177 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12179 cvsroot=syncuri[6:]
12180 cvsdir=os.path.dirname(myportdir)
12181 if not os.path.exists(myportdir+"/CVS"):
12183 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12184 if os.path.exists(cvsdir+"/gentoo-x86"):
12185 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12188 os.rmdir(myportdir)
12190 if e.errno != errno.ENOENT:
12192 "!!! existing '%s' directory; exiting.\n" % myportdir)
12195 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12196 print "!!! cvs checkout error; exiting."
12198 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12201 print ">>> Starting cvs update with "+syncuri+"..."
12202 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
12203 myportdir, settings, free=1)
12204 if retval != os.EX_OK:
12206 dosyncuri = syncuri
12208 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12209 noiselevel=-1, level=logging.ERROR)
12212 if updatecache_flg and \
12213 myaction != "metadata" and \
12214 "metadata-transfer" not in settings.features:
12215 updatecache_flg = False
12217 # Reload the whole config from scratch.
12218 settings, trees, mtimedb = load_emerge_config(trees=trees)
12219 root_config = trees[settings["ROOT"]]["root_config"]
12220 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12222 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12223 action_metadata(settings, portdb, myopts)
12225 if portage._global_updates(trees, mtimedb["updates"]):
12227 # Reload the whole config from scratch.
12228 settings, trees, mtimedb = load_emerge_config(trees=trees)
12229 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12230 root_config = trees[settings["ROOT"]]["root_config"]
12232 mybestpv = portdb.xmatch("bestmatch-visible",
12233 portage.const.PORTAGE_PACKAGE_ATOM)
12234 mypvs = portage.best(
12235 trees[settings["ROOT"]]["vartree"].dbapi.match(
12236 portage.const.PORTAGE_PACKAGE_ATOM))
12238 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12240 if myaction != "metadata":
12241 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12242 retval = portage.process.spawn(
12243 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12244 dosyncuri], env=settings.environ())
12245 if retval != os.EX_OK:
12246 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12248 if(mybestpv != mypvs) and not "--quiet" in myopts:
12250 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12251 print red(" * ")+"that you update portage now, before any other packages are updated."
12253 print red(" * ")+"To update portage, run 'emerge portage' now."
12256 display_news_notification(root_config, myopts)
12259 def action_metadata(settings, portdb, myopts):
12260 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12261 old_umask = os.umask(0002)
12262 cachedir = os.path.normpath(settings.depcachedir)
12263 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12264 "/lib", "/opt", "/proc", "/root", "/sbin",
12265 "/sys", "/tmp", "/usr", "/var"]:
12266 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12267 "ROOT DIRECTORY ON YOUR SYSTEM."
12268 print >> sys.stderr, \
12269 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12271 if not os.path.exists(cachedir):
12274 ec = portage.eclass_cache.cache(portdb.porttree_root)
12275 myportdir = os.path.realpath(settings["PORTDIR"])
12276 cm = settings.load_best_module("portdbapi.metadbmodule")(
12277 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12279 from portage.cache import util
12281 class percentage_noise_maker(util.quiet_mirroring):
12282 def __init__(self, dbapi):
12284 self.cp_all = dbapi.cp_all()
12285 l = len(self.cp_all)
12286 self.call_update_min = 100000000
12287 self.min_cp_all = l/100.0
12291 def __iter__(self):
12292 for x in self.cp_all:
12294 if self.count > self.min_cp_all:
12295 self.call_update_min = 0
12297 for y in self.dbapi.cp_list(x):
12299 self.call_update_mine = 0
12301 def update(self, *arg):
12302 try: self.pstr = int(self.pstr) + 1
12303 except ValueError: self.pstr = 1
12304 sys.stdout.write("%s%i%%" % \
12305 ("\b" * (len(str(self.pstr))+1), self.pstr))
12307 self.call_update_min = 10000000
12309 def finish(self, *arg):
12310 sys.stdout.write("\b\b\b\b100%\n")
12313 if "--quiet" in myopts:
12314 def quicky_cpv_generator(cp_all_list):
12315 for x in cp_all_list:
12316 for y in portdb.cp_list(x):
12318 source = quicky_cpv_generator(portdb.cp_all())
12319 noise_maker = portage.cache.util.quiet_mirroring()
12321 noise_maker = source = percentage_noise_maker(portdb)
12322 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12323 eclass_cache=ec, verbose_instance=noise_maker)
12326 os.umask(old_umask)
12328 def action_regen(settings, portdb, max_jobs, max_load):
12329 xterm_titles = "notitles" not in settings.features
12330 emergelog(xterm_titles, " === regen")
12331 #regenerate cache entries
12332 portage.writemsg_stdout("Regenerating cache entries...\n")
12334 os.close(sys.stdin.fileno())
12335 except SystemExit, e:
12336 raise # Needed else can't exit
12341 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12344 portage.writemsg_stdout("done!\n")
12346 def action_config(settings, trees, myopts, myfiles):
12347 if len(myfiles) != 1:
12348 print red("!!! config can only take a single package atom at this time\n")
12350 if not is_valid_package_atom(myfiles[0]):
12351 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12353 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12354 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12358 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12359 except portage.exception.AmbiguousPackageName, e:
12360 # Multiple matches thrown from cpv_expand
12363 print "No packages found.\n"
12365 elif len(pkgs) > 1:
12366 if "--ask" in myopts:
12368 print "Please select a package to configure:"
12372 options.append(str(idx))
12373 print options[-1]+") "+pkg
12375 options.append("X")
12376 idx = userquery("Selection?", options)
12379 pkg = pkgs[int(idx)-1]
12381 print "The following packages available:"
12384 print "\nPlease use a specific atom or the --ask option."
12390 if "--ask" in myopts:
12391 if userquery("Ready to configure "+pkg+"?") == "No":
12394 print "Configuring pkg..."
12396 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12397 mysettings = portage.config(clone=settings)
12398 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12399 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12400 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12402 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12403 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12404 if retval == os.EX_OK:
12405 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12406 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12409 def action_info(settings, trees, myopts, myfiles):
12410 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12411 settings.profile_path, settings["CHOST"],
12412 trees[settings["ROOT"]]["vartree"].dbapi)
12414 header_title = "System Settings"
12416 print header_width * "="
12417 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12418 print header_width * "="
12419 print "System uname: "+platform.platform(aliased=1)
12421 lastSync = portage.grabfile(os.path.join(
12422 settings["PORTDIR"], "metadata", "timestamp.chk"))
12423 print "Timestamp of tree:",
12429 output=commands.getstatusoutput("distcc --version")
12431 print str(output[1].split("\n",1)[0]),
12432 if "distcc" in settings.features:
12437 output=commands.getstatusoutput("ccache -V")
12439 print str(output[1].split("\n",1)[0]),
12440 if "ccache" in settings.features:
12445 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12446 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12447 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12448 myvars = portage.util.unique_array(myvars)
12452 if portage.isvalidatom(x):
12453 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12454 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12455 pkg_matches.sort(portage.pkgcmp)
12457 for pn, ver, rev in pkg_matches:
12459 pkgs.append(ver + "-" + rev)
12463 pkgs = ", ".join(pkgs)
12464 print "%-20s %s" % (x+":", pkgs)
12466 print "%-20s %s" % (x+":", "[NOT VALID]")
12468 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12470 if "--verbose" in myopts:
12471 myvars=settings.keys()
12473 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12474 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12475 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12476 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12478 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12480 myvars = portage.util.unique_array(myvars)
12486 print '%s="%s"' % (x, settings[x])
12488 use = set(settings["USE"].split())
12489 use_expand = settings["USE_EXPAND"].split()
12491 for varname in use_expand:
12492 flag_prefix = varname.lower() + "_"
12493 for f in list(use):
12494 if f.startswith(flag_prefix):
12498 print 'USE="%s"' % " ".join(use),
12499 for varname in use_expand:
12500 myval = settings.get(varname)
12502 print '%s="%s"' % (varname, myval),
12505 unset_vars.append(x)
12507 print "Unset: "+", ".join(unset_vars)
12510 if "--debug" in myopts:
12511 for x in dir(portage):
12512 module = getattr(portage, x)
12513 if "cvs_id_string" in dir(module):
12514 print "%s: %s" % (str(x), str(module.cvs_id_string))
12516 # See if we can find any packages installed matching the strings
12517 # passed on the command line
12519 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12520 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12522 mypkgs.extend(vardb.match(x))
12524 # If some packages were found...
12526 # Get our global settings (we only print stuff if it varies from
12527 # the current config)
12528 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12529 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12531 pkgsettings = portage.config(clone=settings)
12533 for myvar in mydesiredvars:
12534 global_vals[myvar] = set(settings.get(myvar, "").split())
12536 # Loop through each package
12537 # Only print settings if they differ from global settings
12538 header_title = "Package Settings"
12539 print header_width * "="
12540 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12541 print header_width * "="
12542 from portage.output import EOutput
12545 # Get all package specific variables
12546 auxvalues = vardb.aux_get(pkg, auxkeys)
12548 for i in xrange(len(auxkeys)):
12549 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12551 for myvar in mydesiredvars:
12552 # If the package variable doesn't match the
12553 # current global variable, something has changed
12554 # so set diff_found so we know to print
12555 if valuesmap[myvar] != global_vals[myvar]:
12556 diff_values[myvar] = valuesmap[myvar]
12557 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12558 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12559 pkgsettings.reset()
12560 # If a matching ebuild is no longer available in the tree, maybe it
12561 # would make sense to compare against the flags for the best
12562 # available version with the same slot?
12564 if portdb.cpv_exists(pkg):
12566 pkgsettings.setcpv(pkg, mydb=mydb)
12567 if valuesmap["IUSE"].intersection(
12568 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12569 diff_values["USE"] = valuesmap["USE"]
12570 # If a difference was found, print the info for
12573 # Print package info
12574 print "%s was built with the following:" % pkg
12575 for myvar in mydesiredvars + ["USE"]:
12576 if myvar in diff_values:
12577 mylist = list(diff_values[myvar])
12579 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12581 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12582 ebuildpath = vardb.findname(pkg)
12583 if not ebuildpath or not os.path.exists(ebuildpath):
12584 out.ewarn("No ebuild found for '%s'" % pkg)
12586 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12587 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12588 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12591 def action_search(root_config, myopts, myfiles, spinner):
12593 print "emerge: no search terms provided."
12595 searchinstance = search(root_config,
12596 spinner, "--searchdesc" in myopts,
12597 "--quiet" not in myopts, "--usepkg" in myopts,
12598 "--usepkgonly" in myopts)
12599 for mysearch in myfiles:
12601 searchinstance.execute(mysearch)
12602 except re.error, comment:
12603 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12605 searchinstance.output()
12607 def action_depclean(settings, trees, ldpath_mtimes,
12608 myopts, action, myfiles, spinner):
12609 # Kill packages that aren't explicitly merged or are required as a
12610 # dependency of another package. World file is explicit.
12612 # Global depclean or prune operations are not very safe when there are
12613 # missing dependencies since it's unknown how badly incomplete
12614 # the dependency graph is, and we might accidentally remove packages
12615 # that should have been pulled into the graph. On the other hand, it's
12616 # relatively safe to ignore missing deps when only asked to remove
12617 # specific packages.
12618 allow_missing_deps = len(myfiles) > 0
12621 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12622 msg.append("mistakes. Packages that are part of the world set will always\n")
12623 msg.append("be kept. They can be manually added to this set with\n")
12624 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12625 msg.append("package.provided (see portage(5)) will be removed by\n")
12626 msg.append("depclean, even if they are part of the world set.\n")
12628 msg.append("As a safety measure, depclean will not remove any packages\n")
12629 msg.append("unless *all* required dependencies have been resolved. As a\n")
12630 msg.append("consequence, it is often necessary to run %s\n" % \
12631 good("`emerge --update"))
12632 msg.append(good("--newuse --deep @system @world`") + \
12633 " prior to depclean.\n")
12635 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12636 portage.writemsg_stdout("\n")
12638 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12640 xterm_titles = "notitles" not in settings.features
12641 myroot = settings["ROOT"]
12642 root_config = trees[myroot]["root_config"]
12643 getSetAtoms = root_config.setconfig.getSetAtoms
12644 vardb = trees[myroot]["vartree"].dbapi
12646 required_set_names = ("system", "world")
12650 for s in required_set_names:
12651 required_sets[s] = InternalPackageSet(
12652 initial_atoms=getSetAtoms(s))
12655 # When removing packages, use a temporary version of world
12656 # which excludes packages that are intended to be eligible for
12658 world_temp_set = required_sets["world"]
12659 system_set = required_sets["system"]
12661 if not system_set or not world_temp_set:
12664 writemsg_level("!!! You have no system list.\n",
12665 level=logging.ERROR, noiselevel=-1)
12667 if not world_temp_set:
12668 writemsg_level("!!! You have no world file.\n",
12669 level=logging.WARNING, noiselevel=-1)
12671 writemsg_level("!!! Proceeding is likely to " + \
12672 "break your installation.\n",
12673 level=logging.WARNING, noiselevel=-1)
12674 if "--pretend" not in myopts:
12675 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12677 if action == "depclean":
12678 emergelog(xterm_titles, " >>> depclean")
12681 args_set = InternalPackageSet()
12684 if not is_valid_package_atom(x):
12685 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12686 level=logging.ERROR, noiselevel=-1)
12687 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12690 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12691 except portage.exception.AmbiguousPackageName, e:
12692 msg = "The short ebuild name \"" + x + \
12693 "\" is ambiguous. Please specify " + \
12694 "one of the following " + \
12695 "fully-qualified ebuild names instead:"
12696 for line in textwrap.wrap(msg, 70):
12697 writemsg_level("!!! %s\n" % (line,),
12698 level=logging.ERROR, noiselevel=-1)
12700 writemsg_level(" %s\n" % colorize("INFORM", i),
12701 level=logging.ERROR, noiselevel=-1)
12702 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12705 matched_packages = False
12708 matched_packages = True
12710 if not matched_packages:
12711 writemsg_level(">>> No packages selected for removal by %s\n" % \
12715 writemsg_level("\nCalculating dependencies ")
12716 resolver_params = create_depgraph_params(myopts, "remove")
12717 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12718 vardb = resolver.trees[myroot]["vartree"].dbapi
12720 if action == "depclean":
12723 # Pull in everything that's installed but not matched
12724 # by an argument atom since we don't want to clean any
12725 # package if something depends on it.
12727 world_temp_set.clear()
12732 if args_set.findAtomForPackage(pkg) is None:
12733 world_temp_set.add("=" + pkg.cpv)
12735 except portage.exception.InvalidDependString, e:
12736 show_invalid_depstring_notice(pkg,
12737 pkg.metadata["PROVIDE"], str(e))
12739 world_temp_set.add("=" + pkg.cpv)
12742 elif action == "prune":
12744 # Pull in everything that's installed since we don't
12745 # to prune a package if something depends on it.
12746 world_temp_set.clear()
12747 world_temp_set.update(vardb.cp_all())
12751 # Try to prune everything that's slotted.
12752 for cp in vardb.cp_all():
12753 if len(vardb.cp_list(cp)) > 1:
12756 # Remove atoms from world that match installed packages
12757 # that are also matched by argument atoms, but do not remove
12758 # them if they match the highest installed version.
12761 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12762 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12763 raise AssertionError("package expected in matches: " + \
12764 "cp = %s, cpv = %s matches = %s" % \
12765 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12767 highest_version = pkgs_for_cp[-1]
12768 if pkg == highest_version:
12769 # pkg is the highest version
12770 world_temp_set.add("=" + pkg.cpv)
12773 if len(pkgs_for_cp) <= 1:
12774 raise AssertionError("more packages expected: " + \
12775 "cp = %s, cpv = %s matches = %s" % \
12776 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12779 if args_set.findAtomForPackage(pkg) is None:
12780 world_temp_set.add("=" + pkg.cpv)
12782 except portage.exception.InvalidDependString, e:
12783 show_invalid_depstring_notice(pkg,
12784 pkg.metadata["PROVIDE"], str(e))
12786 world_temp_set.add("=" + pkg.cpv)
12790 for s, package_set in required_sets.iteritems():
12791 set_atom = SETPREFIX + s
12792 set_arg = SetArg(arg=set_atom, set=package_set,
12793 root_config=resolver.roots[myroot])
12794 set_args[s] = set_arg
12795 for atom in set_arg.set:
12796 resolver._dep_stack.append(
12797 Dependency(atom=atom, root=myroot, parent=set_arg))
12798 resolver.digraph.add(set_arg, None)
12800 success = resolver._complete_graph()
12801 writemsg_level("\b\b... done!\n")
12803 resolver.display_problems()
12808 def unresolved_deps():
12810 unresolvable = set()
12811 for dep in resolver._initially_unsatisfied_deps:
12812 if isinstance(dep.parent, Package) and \
12813 (dep.priority > UnmergeDepPriority.SOFT):
12814 unresolvable.add((dep.atom, dep.parent.cpv))
12816 if not unresolvable:
12819 if unresolvable and not allow_missing_deps:
12820 prefix = bad(" * ")
12822 msg.append("Dependencies could not be completely resolved due to")
12823 msg.append("the following required packages not being installed:")
12825 for atom, parent in unresolvable:
12826 msg.append(" %s pulled in by:" % (atom,))
12827 msg.append(" %s" % (parent,))
12829 msg.append("Have you forgotten to run " + \
12830 good("`emerge --update --newuse --deep @system @world`") + " prior")
12831 msg.append(("to %s? It may be necessary to manually " + \
12832 "uninstall packages that no longer") % action)
12833 msg.append("exist in the portage tree since " + \
12834 "it may not be possible to satisfy their")
12835 msg.append("dependencies. Also, be aware of " + \
12836 "the --with-bdeps option that is documented")
12837 msg.append("in " + good("`man emerge`") + ".")
12838 if action == "prune":
12840 msg.append("If you would like to ignore " + \
12841 "dependencies then use %s." % good("--nodeps"))
12842 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12843 level=logging.ERROR, noiselevel=-1)
12847 if unresolved_deps():
12850 graph = resolver.digraph.copy()
12851 required_pkgs_total = 0
12853 if isinstance(node, Package):
12854 required_pkgs_total += 1
12856 def show_parents(child_node):
12857 parent_nodes = graph.parent_nodes(child_node)
12858 if not parent_nodes:
12859 # With --prune, the highest version can be pulled in without any
12860 # real parent since all installed packages are pulled in. In that
12861 # case there's nothing to show here.
12864 for node in parent_nodes:
12865 parent_strs.append(str(getattr(node, "cpv", node)))
12868 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
12869 for parent_str in parent_strs:
12870 msg.append(" %s\n" % (parent_str,))
12872 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12874 def create_cleanlist():
12875 pkgs_to_remove = []
12877 if action == "depclean":
12883 arg_atom = args_set.findAtomForPackage(pkg)
12884 except portage.exception.InvalidDependString:
12885 # this error has already been displayed by now
12889 if pkg not in graph:
12890 pkgs_to_remove.append(pkg)
12891 elif "--verbose" in myopts:
12896 if pkg not in graph:
12897 pkgs_to_remove.append(pkg)
12898 elif "--verbose" in myopts:
12901 elif action == "prune":
12902 # Prune really uses all installed instead of world. It's not
12903 # a real reverse dependency so don't display it as such.
12904 graph.remove(set_args["world"])
12906 for atom in args_set:
12907 for pkg in vardb.match_pkgs(atom):
12908 if pkg not in graph:
12909 pkgs_to_remove.append(pkg)
12910 elif "--verbose" in myopts:
12913 if not pkgs_to_remove:
12915 ">>> No packages selected for removal by %s\n" % action)
12916 if "--verbose" not in myopts:
12918 ">>> To see reverse dependencies, use %s\n" % \
12920 if action == "prune":
12922 ">>> To ignore dependencies, use %s\n" % \
12925 return pkgs_to_remove
12927 cleanlist = create_cleanlist()
12930 clean_set = set(cleanlist)
12932 # Check if any of these package are the sole providers of libraries
12933 # with consumers that have not been selected for removal. If so, these
12934 # packages and any dependencies need to be added to the graph.
12935 real_vardb = trees[myroot]["vartree"].dbapi
12936 linkmap = real_vardb.linkmap
12937 liblist = linkmap.listLibraryObjects()
12938 consumer_cache = {}
12939 provider_cache = {}
12943 writemsg_level(">>> Checking for lib consumers...\n")
12945 for pkg in cleanlist:
12946 pkg_dblink = real_vardb._dblink(pkg.cpv)
12947 provided_libs = set()
12949 for lib in liblist:
12950 if pkg_dblink.isowner(lib, myroot):
12951 provided_libs.add(lib)
12953 if not provided_libs:
12957 for lib in provided_libs:
12958 lib_consumers = consumer_cache.get(lib)
12959 if lib_consumers is None:
12960 lib_consumers = linkmap.findConsumers(lib)
12961 consumer_cache[lib] = lib_consumers
12963 consumers[lib] = lib_consumers
12968 for lib, lib_consumers in consumers.items():
12969 for consumer_file in list(lib_consumers):
12970 if pkg_dblink.isowner(consumer_file, myroot):
12971 lib_consumers.remove(consumer_file)
12972 if not lib_consumers:
12978 for lib, lib_consumers in consumers.iteritems():
12980 soname = soname_cache.get(lib)
12982 soname = linkmap.getSoname(lib)
12983 soname_cache[lib] = soname
12985 consumer_providers = []
12986 for lib_consumer in lib_consumers:
12987 providers = provider_cache.get(lib)
12988 if providers is None:
12989 providers = linkmap.findProviders(lib_consumer)
12990 provider_cache[lib_consumer] = providers
12991 if soname not in providers:
12992 # Why does this happen?
12994 consumer_providers.append(
12995 (lib_consumer, providers[soname]))
12997 consumers[lib] = consumer_providers
12999 consumer_map[pkg] = consumers
13003 search_files = set()
13004 for consumers in consumer_map.itervalues():
13005 for lib, consumer_providers in consumers.iteritems():
13006 for lib_consumer, providers in consumer_providers:
13007 search_files.add(lib_consumer)
13008 search_files.update(providers)
13010 writemsg_level(">>> Assigning files to packages...\n")
13011 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13013 for pkg, consumers in consumer_map.items():
13014 for lib, consumer_providers in consumers.items():
13015 lib_consumers = set()
13017 for lib_consumer, providers in consumer_providers:
13018 owner_set = file_owners.get(lib_consumer)
13019 provider_dblinks = set()
13020 provider_pkgs = set()
13022 if len(providers) > 1:
13023 for provider in providers:
13024 provider_set = file_owners.get(provider)
13025 if provider_set is not None:
13026 provider_dblinks.update(provider_set)
13028 if len(provider_dblinks) > 1:
13029 for provider_dblink in provider_dblinks:
13030 pkg_key = ("installed", myroot,
13031 provider_dblink.mycpv, "nomerge")
13032 if pkg_key not in clean_set:
13033 provider_pkgs.add(vardb.get(pkg_key))
13038 if owner_set is not None:
13039 lib_consumers.update(owner_set)
13041 for consumer_dblink in list(lib_consumers):
13042 if ("installed", myroot, consumer_dblink.mycpv,
13043 "nomerge") in clean_set:
13044 lib_consumers.remove(consumer_dblink)
13048 consumers[lib] = lib_consumers
13052 del consumer_map[pkg]
13055 # TODO: Implement a package set for rebuilding consumer packages.
13057 msg = "In order to avoid breakage of link level " + \
13058 "dependencies, one or more packages will not be removed. " + \
13059 "This can be solved by rebuilding " + \
13060 "the packages that pulled them in."
13062 prefix = bad(" * ")
13063 from textwrap import wrap
13064 writemsg_level("".join(prefix + "%s\n" % line for \
13065 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13068 for pkg, consumers in consumer_map.iteritems():
13069 unique_consumers = set(chain(*consumers.values()))
13070 unique_consumers = sorted(consumer.mycpv \
13071 for consumer in unique_consumers)
13073 msg.append(" %s pulled in by:" % (pkg.cpv,))
13074 for consumer in unique_consumers:
13075 msg.append(" %s" % (consumer,))
13077 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13078 level=logging.WARNING, noiselevel=-1)
13080 # Add lib providers to the graph as children of lib consumers,
13081 # and also add any dependencies pulled in by the provider.
13082 writemsg_level(">>> Adding lib providers to graph...\n")
13084 for pkg, consumers in consumer_map.iteritems():
13085 for consumer_dblink in set(chain(*consumers.values())):
13086 consumer_pkg = vardb.get(("installed", myroot,
13087 consumer_dblink.mycpv, "nomerge"))
13088 if not resolver._add_pkg(pkg,
13089 Dependency(parent=consumer_pkg,
13090 priority=UnmergeDepPriority(runtime=True),
13092 resolver.display_problems()
13095 writemsg_level("\nCalculating dependencies ")
13096 success = resolver._complete_graph()
13097 writemsg_level("\b\b... done!\n")
13098 resolver.display_problems()
13101 if unresolved_deps():
13104 graph = resolver.digraph.copy()
13105 required_pkgs_total = 0
13107 if isinstance(node, Package):
13108 required_pkgs_total += 1
13109 cleanlist = create_cleanlist()
13112 clean_set = set(cleanlist)
13114 # Use a topological sort to create an unmerge order such that
13115 # each package is unmerged before it's dependencies. This is
13116 # necessary to avoid breaking things that may need to run
13117 # during pkg_prerm or pkg_postrm phases.
13119 # Create a new graph to account for dependencies between the
13120 # packages being unmerged.
13124 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13125 runtime = UnmergeDepPriority(runtime=True)
13126 runtime_post = UnmergeDepPriority(runtime_post=True)
13127 buildtime = UnmergeDepPriority(buildtime=True)
13129 "RDEPEND": runtime,
13130 "PDEPEND": runtime_post,
13131 "DEPEND": buildtime,
13134 for node in clean_set:
13135 graph.add(node, None)
13137 node_use = node.metadata["USE"].split()
13138 for dep_type in dep_keys:
13139 depstr = node.metadata[dep_type]
13143 portage.dep._dep_check_strict = False
13144 success, atoms = portage.dep_check(depstr, None, settings,
13145 myuse=node_use, trees=resolver._graph_trees,
13148 portage.dep._dep_check_strict = True
13150 # Ignore invalid deps of packages that will
13151 # be uninstalled anyway.
13154 priority = priority_map[dep_type]
13156 if not isinstance(atom, portage.dep.Atom):
13157 # Ignore invalid atoms returned from dep_check().
13161 matches = vardb.match_pkgs(atom)
13164 for child_node in matches:
13165 if child_node in clean_set:
13166 graph.add(child_node, node, priority=priority)
13169 if len(graph.order) == len(graph.root_nodes()):
13170 # If there are no dependencies between packages
13171 # let unmerge() group them by cat/pn.
13173 cleanlist = [pkg.cpv for pkg in graph.order]
13175 # Order nodes from lowest to highest overall reference count for
13176 # optimal root node selection.
13177 node_refcounts = {}
13178 for node in graph.order:
13179 node_refcounts[node] = len(graph.parent_nodes(node))
13180 def cmp_reference_count(node1, node2):
13181 return node_refcounts[node1] - node_refcounts[node2]
13182 graph.order.sort(cmp_reference_count)
13184 ignore_priority_range = [None]
13185 ignore_priority_range.extend(
13186 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13187 while not graph.empty():
13188 for ignore_priority in ignore_priority_range:
13189 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13193 raise AssertionError("no root nodes")
13194 if ignore_priority is not None:
13195 # Some deps have been dropped due to circular dependencies,
13196 # so only pop one node in order do minimize the number that
13201 cleanlist.append(node.cpv)
13203 unmerge(root_config, myopts, "unmerge", cleanlist,
13204 ldpath_mtimes, ordered=ordered)
13206 if action == "prune":
13209 if not cleanlist and "--quiet" in myopts:
13212 print "Packages installed: "+str(len(vardb.cpv_all()))
13213 print "Packages in world: " + \
13214 str(len(root_config.sets["world"].getAtoms()))
13215 print "Packages in system: " + \
13216 str(len(root_config.sets["system"].getAtoms()))
13217 print "Required packages: "+str(required_pkgs_total)
13218 if "--pretend" in myopts:
13219 print "Number to remove: "+str(len(cleanlist))
13221 print "Number removed: "+str(len(cleanlist))
13223 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13224 skip_masked=False, skip_unsatisfied=False):
13226 Construct a depgraph for the given resume list. This will raise
13227 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13229 @returns: (success, depgraph, dropped_tasks)
13231 mergelist = mtimedb["resume"]["mergelist"]
13232 dropped_tasks = set()
13234 mydepgraph = depgraph(settings, trees,
13235 myopts, myparams, spinner)
13237 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13238 skip_masked=skip_masked)
13239 except depgraph.UnsatisfiedResumeDep, e:
13240 if not skip_unsatisfied:
13243 graph = mydepgraph.digraph
13244 unsatisfied_parents = dict((dep.parent, dep.parent) \
13245 for dep in e.value)
13246 traversed_nodes = set()
13247 unsatisfied_stack = list(unsatisfied_parents)
13248 while unsatisfied_stack:
13249 pkg = unsatisfied_stack.pop()
13250 if pkg in traversed_nodes:
13252 traversed_nodes.add(pkg)
13254 # If this package was pulled in by a parent
13255 # package scheduled for merge, removing this
13256 # package may cause the the parent package's
13257 # dependency to become unsatisfied.
13258 for parent_node in graph.parent_nodes(pkg):
13259 if not isinstance(parent_node, Package) \
13260 or parent_node.operation not in ("merge", "nomerge"):
13263 graph.child_nodes(parent_node,
13264 ignore_priority=DepPriority.SOFT)
13265 if pkg in unsatisfied:
13266 unsatisfied_parents[parent_node] = parent_node
13267 unsatisfied_stack.append(parent_node)
13269 pruned_mergelist = [x for x in mergelist \
13270 if isinstance(x, list) and \
13271 tuple(x) not in unsatisfied_parents]
13273 # If the mergelist doesn't shrink then this loop is infinite.
13274 if len(pruned_mergelist) == len(mergelist):
13275 # This happens if a package can't be dropped because
13276 # it's already installed, but it has unsatisfied PDEPEND.
13278 mergelist[:] = pruned_mergelist
13280 # Exclude installed packages that have been removed from the graph due
13281 # to failure to build/install runtime dependencies after the dependent
13282 # package has already been installed.
13283 dropped_tasks.update(pkg for pkg in \
13284 unsatisfied_parents if pkg.operation != "nomerge")
13285 mydepgraph.break_refs(unsatisfied_parents)
13287 del e, graph, traversed_nodes, \
13288 unsatisfied_parents, unsatisfied_stack
13292 return (success, mydepgraph, dropped_tasks)
13294 def action_build(settings, trees, mtimedb,
13295 myopts, myaction, myfiles, spinner):
13297 # validate the state of the resume data
13298 # so that we can make assumptions later.
13299 for k in ("resume", "resume_backup"):
13300 if k not in mtimedb:
13302 resume_data = mtimedb[k]
13303 if not isinstance(resume_data, dict):
13306 mergelist = resume_data.get("mergelist")
13307 if not isinstance(mergelist, list):
13310 for x in mergelist:
13311 if not (isinstance(x, list) and len(x) == 4):
13313 pkg_type, pkg_root, pkg_key, pkg_action = x
13314 if pkg_root not in trees:
13315 # Current $ROOT setting differs,
13316 # so the list must be stale.
13322 resume_opts = resume_data.get("myopts")
13323 if not isinstance(resume_opts, (dict, list)):
13326 favorites = resume_data.get("favorites")
13327 if not isinstance(favorites, list):
13332 if "--resume" in myopts and \
13333 ("resume" in mtimedb or
13334 "resume_backup" in mtimedb):
13336 if "resume" not in mtimedb:
13337 mtimedb["resume"] = mtimedb["resume_backup"]
13338 del mtimedb["resume_backup"]
13340 # "myopts" is a list for backward compatibility.
13341 resume_opts = mtimedb["resume"].get("myopts", [])
13342 if isinstance(resume_opts, list):
13343 resume_opts = dict((k,True) for k in resume_opts)
13344 for opt in ("--skipfirst", "--ask", "--tree"):
13345 resume_opts.pop(opt, None)
13346 myopts.update(resume_opts)
13348 if "--debug" in myopts:
13349 writemsg_level("myopts %s\n" % (myopts,))
13351 # Adjust config according to options of the command being resumed.
13352 for myroot in trees:
13353 mysettings = trees[myroot]["vartree"].settings
13354 mysettings.unlock()
13355 adjust_config(myopts, mysettings)
13357 del myroot, mysettings
13359 ldpath_mtimes = mtimedb["ldpath"]
13362 buildpkgonly = "--buildpkgonly" in myopts
13363 pretend = "--pretend" in myopts
13364 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13365 ask = "--ask" in myopts
13366 nodeps = "--nodeps" in myopts
13367 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13368 tree = "--tree" in myopts
13369 if nodeps and tree:
13371 del myopts["--tree"]
13372 portage.writemsg(colorize("WARN", " * ") + \
13373 "--tree is broken with --nodeps. Disabling...\n")
13374 debug = "--debug" in myopts
13375 verbose = "--verbose" in myopts
13376 quiet = "--quiet" in myopts
13377 if pretend or fetchonly:
13378 # make the mtimedb readonly
13379 mtimedb.filename = None
13380 if "--digest" in myopts:
13381 msg = "The --digest option can prevent corruption from being" + \
13382 " noticed. The `repoman manifest` command is the preferred" + \
13383 " way to generate manifests and it is capable of doing an" + \
13384 " entire repository or category at once."
13385 prefix = bad(" * ")
13386 writemsg(prefix + "\n")
13387 from textwrap import wrap
13388 for line in wrap(msg, 72):
13389 writemsg("%s%s\n" % (prefix, line))
13390 writemsg(prefix + "\n")
13392 if "--quiet" not in myopts and \
13393 ("--pretend" in myopts or "--ask" in myopts or \
13394 "--tree" in myopts or "--verbose" in myopts):
13396 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13398 elif "--buildpkgonly" in myopts:
13402 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13404 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13408 print darkgreen("These are the packages that would be %s, in order:") % action
13411 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13412 if not show_spinner:
13413 spinner.update = spinner.update_quiet
13416 favorites = mtimedb["resume"].get("favorites")
13417 if not isinstance(favorites, list):
13421 print "Calculating dependencies ",
13422 myparams = create_depgraph_params(myopts, myaction)
13424 resume_data = mtimedb["resume"]
13425 mergelist = resume_data["mergelist"]
13426 if mergelist and "--skipfirst" in myopts:
13427 for i, task in enumerate(mergelist):
13428 if isinstance(task, list) and \
13429 task and task[-1] == "merge":
13433 skip_masked = "--skipfirst" in myopts
13434 skip_unsatisfied = "--skipfirst" in myopts
13438 success, mydepgraph, dropped_tasks = resume_depgraph(
13439 settings, trees, mtimedb, myopts, myparams, spinner,
13440 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13441 except (portage.exception.PackageNotFound,
13442 depgraph.UnsatisfiedResumeDep), e:
13443 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13444 mydepgraph = e.depgraph
13447 from textwrap import wrap
13448 from portage.output import EOutput
13451 resume_data = mtimedb["resume"]
13452 mergelist = resume_data.get("mergelist")
13453 if not isinstance(mergelist, list):
13455 if mergelist and debug or (verbose and not quiet):
13456 out.eerror("Invalid resume list:")
13459 for task in mergelist:
13460 if isinstance(task, list):
13461 out.eerror(indent + str(tuple(task)))
13464 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13465 out.eerror("One or more packages are either masked or " + \
13466 "have missing dependencies:")
13469 for dep in e.value:
13470 if dep.atom is None:
13471 out.eerror(indent + "Masked package:")
13472 out.eerror(2 * indent + str(dep.parent))
13475 out.eerror(indent + str(dep.atom) + " pulled in by:")
13476 out.eerror(2 * indent + str(dep.parent))
13478 msg = "The resume list contains packages " + \
13479 "that are either masked or have " + \
13480 "unsatisfied dependencies. " + \
13481 "Please restart/continue " + \
13482 "the operation manually, or use --skipfirst " + \
13483 "to skip the first package in the list and " + \
13484 "any other packages that may be " + \
13485 "masked or have missing dependencies."
13486 for line in wrap(msg, 72):
13488 elif isinstance(e, portage.exception.PackageNotFound):
13489 out.eerror("An expected package is " + \
13490 "not available: %s" % str(e))
13492 msg = "The resume list contains one or more " + \
13493 "packages that are no longer " + \
13494 "available. Please restart/continue " + \
13495 "the operation manually."
13496 for line in wrap(msg, 72):
13500 print "\b\b... done!"
13504 portage.writemsg("!!! One or more packages have been " + \
13505 "dropped due to\n" + \
13506 "!!! masking or unsatisfied dependencies:\n\n",
13508 for task in dropped_tasks:
13509 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13510 portage.writemsg("\n", noiselevel=-1)
13513 if mydepgraph is not None:
13514 mydepgraph.display_problems()
13515 if not (ask or pretend):
13516 # delete the current list and also the backup
13517 # since it's probably stale too.
13518 for k in ("resume", "resume_backup"):
13519 mtimedb.pop(k, None)
13524 if ("--resume" in myopts):
13525 print darkgreen("emerge: It seems we have nothing to resume...")
13528 myparams = create_depgraph_params(myopts, myaction)
13529 if "--quiet" not in myopts and "--nodeps" not in myopts:
13530 print "Calculating dependencies ",
13532 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13534 retval, favorites = mydepgraph.select_files(myfiles)
13535 except portage.exception.PackageNotFound, e:
13536 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13538 except portage.exception.PackageSetNotFound, e:
13539 root_config = trees[settings["ROOT"]]["root_config"]
13540 display_missing_pkg_set(root_config, e.value)
13543 print "\b\b... done!"
13545 mydepgraph.display_problems()
13548 if "--pretend" not in myopts and \
13549 ("--ask" in myopts or "--tree" in myopts or \
13550 "--verbose" in myopts) and \
13551 not ("--quiet" in myopts and "--ask" not in myopts):
13552 if "--resume" in myopts:
13553 mymergelist = mydepgraph.altlist()
13554 if len(mymergelist) == 0:
13555 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13557 favorites = mtimedb["resume"]["favorites"]
13558 retval = mydepgraph.display(
13559 mydepgraph.altlist(reversed=tree),
13560 favorites=favorites)
13561 mydepgraph.display_problems()
13562 if retval != os.EX_OK:
13564 prompt="Would you like to resume merging these packages?"
13566 retval = mydepgraph.display(
13567 mydepgraph.altlist(reversed=("--tree" in myopts)),
13568 favorites=favorites)
13569 mydepgraph.display_problems()
13570 if retval != os.EX_OK:
13573 for x in mydepgraph.altlist():
13574 if isinstance(x, Package) and x.operation == "merge":
13578 sets = trees[settings["ROOT"]]["root_config"].sets
13579 world_candidates = None
13580 if "--noreplace" in myopts and \
13581 not oneshot and favorites:
13582 # Sets that are not world candidates are filtered
13583 # out here since the favorites list needs to be
13584 # complete for depgraph.loadResumeCommand() to
13585 # operate correctly.
13586 world_candidates = [x for x in favorites \
13587 if not (x.startswith(SETPREFIX) and \
13588 not sets[x[1:]].world_candidate)]
13589 if "--noreplace" in myopts and \
13590 not oneshot and world_candidates:
13592 for x in world_candidates:
13593 print " %s %s" % (good("*"), x)
13594 prompt="Would you like to add these packages to your world favorites?"
13595 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13596 prompt="Nothing to merge; would you like to auto-clean packages?"
13599 print "Nothing to merge; quitting."
13602 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13603 prompt="Would you like to fetch the source files for these packages?"
13605 prompt="Would you like to merge these packages?"
13607 if "--ask" in myopts and userquery(prompt) == "No":
13612 # Don't ask again (e.g. when auto-cleaning packages after merge)
13613 myopts.pop("--ask", None)
13615 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13616 if ("--resume" in myopts):
13617 mymergelist = mydepgraph.altlist()
13618 if len(mymergelist) == 0:
13619 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13621 favorites = mtimedb["resume"]["favorites"]
13622 retval = mydepgraph.display(
13623 mydepgraph.altlist(reversed=tree),
13624 favorites=favorites)
13625 mydepgraph.display_problems()
13626 if retval != os.EX_OK:
13629 retval = mydepgraph.display(
13630 mydepgraph.altlist(reversed=("--tree" in myopts)),
13631 favorites=favorites)
13632 mydepgraph.display_problems()
13633 if retval != os.EX_OK:
13635 if "--buildpkgonly" in myopts:
13636 graph_copy = mydepgraph.digraph.clone()
13637 for node in list(graph_copy.order):
13638 if not isinstance(node, Package):
13639 graph_copy.remove(node)
13640 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13641 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13642 print "!!! You have to merge the dependencies before you can build this package.\n"
13645 if "--buildpkgonly" in myopts:
13646 graph_copy = mydepgraph.digraph.clone()
13647 for node in list(graph_copy.order):
13648 if not isinstance(node, Package):
13649 graph_copy.remove(node)
13650 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13651 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13652 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13655 if ("--resume" in myopts):
13656 favorites=mtimedb["resume"]["favorites"]
13657 mymergelist = mydepgraph.altlist()
13658 mydepgraph.break_refs(mymergelist)
13659 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13660 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13661 del mydepgraph, mymergelist
13662 clear_caches(trees)
13664 retval = mergetask.merge()
13665 merge_count = mergetask.curval
13667 if "resume" in mtimedb and \
13668 "mergelist" in mtimedb["resume"] and \
13669 len(mtimedb["resume"]["mergelist"]) > 1:
13670 mtimedb["resume_backup"] = mtimedb["resume"]
13671 del mtimedb["resume"]
13673 mtimedb["resume"]={}
13674 # Stored as a dict starting with portage-2.1.6_rc1, and supported
13675 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13676 # a list type for options.
13677 mtimedb["resume"]["myopts"] = myopts.copy()
13679 # Convert Atom instances to plain str since the mtimedb loader
13680 # sets unpickler.find_global = None which causes unpickler.load()
13681 # to raise the following exception:
13683 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13685 # TODO: Maybe stop setting find_global = None, or find some other
13686 # way to avoid accidental triggering of the above UnpicklingError.
13687 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13689 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13690 for pkgline in mydepgraph.altlist():
13691 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13692 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13693 tmpsettings = portage.config(clone=settings)
13695 if settings.get("PORTAGE_DEBUG", "") == "1":
13697 retval = portage.doebuild(
13698 y, "digest", settings["ROOT"], tmpsettings, edebug,
13699 ("--pretend" in myopts),
13700 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13703 pkglist = mydepgraph.altlist()
13704 mydepgraph.saveNomergeFavorites()
13705 mydepgraph.break_refs(pkglist)
13706 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13707 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13708 del mydepgraph, pkglist
13709 clear_caches(trees)
13711 retval = mergetask.merge()
13712 merge_count = mergetask.curval
13714 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13715 if "yes" == settings.get("AUTOCLEAN"):
13716 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13717 unmerge(trees[settings["ROOT"]]["root_config"],
13718 myopts, "clean", [],
13719 ldpath_mtimes, autoclean=1)
13721 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13722 + " AUTOCLEAN is disabled. This can cause serious"
13723 + " problems due to overlapping packages.\n")
13724 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13728 def multiple_actions(action1, action2):
13729 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13730 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13733 def insert_optional_args(args):
13735 Parse optional arguments and insert a value if one has
13736 not been provided. This is done before feeding the args
13737 to the optparse parser since that parser does not support
13738 this feature natively.
13742 jobs_opts = ("-j", "--jobs")
13743 arg_stack = args[:]
13744 arg_stack.reverse()
13746 arg = arg_stack.pop()
13748 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13749 if not (short_job_opt or arg in jobs_opts):
13750 new_args.append(arg)
13753 # Insert an empty placeholder in order to
13754 # satisfy the requirements of optparse.
13756 new_args.append("--jobs")
13759 if short_job_opt and len(arg) > 2:
13760 if arg[:2] == "-j":
13762 job_count = int(arg[2:])
13764 saved_opts = arg[2:]
13767 saved_opts = arg[1:].replace("j", "")
13769 if job_count is None and arg_stack:
13771 job_count = int(arg_stack[-1])
13775 # Discard the job count from the stack
13776 # since we're consuming it here.
13779 if job_count is None:
13780 # unlimited number of jobs
13781 new_args.append("True")
13783 new_args.append(str(job_count))
13785 if saved_opts is not None:
13786 new_args.append("-" + saved_opts)
13790 def parse_opts(tmpcmdline, silent=False):
13795 global actions, options, shortmapping
13797 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13798 argument_options = {
13800 "help":"specify the location for portage configuration files",
13804 "help":"enable or disable color output",
13806 "choices":("y", "n")
13811 "help" : "Specifies the number of packages to build " + \
13817 "--load-average": {
13819 "help" :"Specifies that no new builds should be started " + \
13820 "if there are other builds running and the load average " + \
13821 "is at least LOAD (a floating-point number).",
13827 "help":"include unnecessary build time dependencies",
13829 "choices":("y", "n")
13832 "help":"specify conditions to trigger package reinstallation",
13834 "choices":["changed-use"]
13838 from optparse import OptionParser
13839 parser = OptionParser()
13840 if parser.has_option("--help"):
13841 parser.remove_option("--help")
13843 for action_opt in actions:
13844 parser.add_option("--" + action_opt, action="store_true",
13845 dest=action_opt.replace("-", "_"), default=False)
13846 for myopt in options:
13847 parser.add_option(myopt, action="store_true",
13848 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13849 for shortopt, longopt in shortmapping.iteritems():
13850 parser.add_option("-" + shortopt, action="store_true",
13851 dest=longopt.lstrip("--").replace("-", "_"), default=False)
13852 for myalias, myopt in longopt_aliases.iteritems():
13853 parser.add_option(myalias, action="store_true",
13854 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13856 for myopt, kwargs in argument_options.iteritems():
13857 parser.add_option(myopt,
13858 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13860 tmpcmdline = insert_optional_args(tmpcmdline)
13862 myoptions, myargs = parser.parse_args(args=tmpcmdline)
13866 if myoptions.jobs == "True":
13870 jobs = int(myoptions.jobs)
13874 if jobs is not True and \
13878 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13879 (myoptions.jobs,), noiselevel=-1)
13881 myoptions.jobs = jobs
13883 if myoptions.load_average:
13885 load_average = float(myoptions.load_average)
13889 if load_average <= 0.0:
13890 load_average = None
13892 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13893 (myoptions.load_average,), noiselevel=-1)
13895 myoptions.load_average = load_average
13897 for myopt in options:
13898 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13900 myopts[myopt] = True
13902 for myopt in argument_options:
13903 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13907 for action_opt in actions:
13908 v = getattr(myoptions, action_opt.replace("-", "_"))
13911 multiple_actions(myaction, action_opt)
13913 myaction = action_opt
13917 return myaction, myopts, myfiles
13919 def validate_ebuild_environment(trees):
13920 for myroot in trees:
13921 settings = trees[myroot]["vartree"].settings
13922 settings.validate()
13924 def clear_caches(trees):
13925 for d in trees.itervalues():
13926 d["porttree"].dbapi.melt()
13927 d["porttree"].dbapi._aux_cache.clear()
13928 d["bintree"].dbapi._aux_cache.clear()
13929 d["bintree"].dbapi._clear_cache()
13930 d["vartree"].dbapi.linkmap._clear_cache()
13931 portage.dircache.clear()
13934 def load_emerge_config(trees=None):
13936 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13937 v = os.environ.get(envvar, None)
13938 if v and v.strip():
13940 trees = portage.create_trees(trees=trees, **kwargs)
13942 for root, root_trees in trees.iteritems():
13943 settings = root_trees["vartree"].settings
13944 setconfig = load_default_config(settings, root_trees)
13945 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13947 settings = trees["/"]["vartree"].settings
13949 for myroot in trees:
13951 settings = trees[myroot]["vartree"].settings
13954 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13955 mtimedb = portage.MtimeDB(mtimedbfile)
13957 return settings, trees, mtimedb
13959 def adjust_config(myopts, settings):
13960 """Make emerge specific adjustments to the config."""
13962 # To enhance usability, make some vars case insensitive by forcing them to
13964 for myvar in ("AUTOCLEAN", "NOCOLOR"):
13965 if myvar in settings:
13966 settings[myvar] = settings[myvar].lower()
13967 settings.backup_changes(myvar)
13970 # Kill noauto as it will break merges otherwise.
13971 if "noauto" in settings.features:
13972 while "noauto" in settings.features:
13973 settings.features.remove("noauto")
13974 settings["FEATURES"] = " ".join(settings.features)
13975 settings.backup_changes("FEATURES")
13979 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13980 except ValueError, e:
13981 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13982 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13983 settings["CLEAN_DELAY"], noiselevel=-1)
13984 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13985 settings.backup_changes("CLEAN_DELAY")
13987 EMERGE_WARNING_DELAY = 10
13989 EMERGE_WARNING_DELAY = int(settings.get(
13990 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13991 except ValueError, e:
13992 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13993 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13994 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13995 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13996 settings.backup_changes("EMERGE_WARNING_DELAY")
13998 if "--quiet" in myopts:
13999 settings["PORTAGE_QUIET"]="1"
14000 settings.backup_changes("PORTAGE_QUIET")
14002 if "--verbose" in myopts:
14003 settings["PORTAGE_VERBOSE"] = "1"
14004 settings.backup_changes("PORTAGE_VERBOSE")
14006 # Set so that configs will be merged regardless of remembered status
14007 if ("--noconfmem" in myopts):
14008 settings["NOCONFMEM"]="1"
14009 settings.backup_changes("NOCONFMEM")
14011 # Set various debug markers... They should be merged somehow.
14014 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14015 if PORTAGE_DEBUG not in (0, 1):
14016 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14017 PORTAGE_DEBUG, noiselevel=-1)
14018 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14021 except ValueError, e:
14022 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14023 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14024 settings["PORTAGE_DEBUG"], noiselevel=-1)
14026 if "--debug" in myopts:
14028 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14029 settings.backup_changes("PORTAGE_DEBUG")
14031 if settings.get("NOCOLOR") not in ("yes","true"):
14032 portage.output.havecolor = 1
14034 """The explicit --color < y | n > option overrides the NOCOLOR environment
14035 variable and stdout auto-detection."""
14036 if "--color" in myopts:
14037 if "y" == myopts["--color"]:
14038 portage.output.havecolor = 1
14039 settings["NOCOLOR"] = "false"
14041 portage.output.havecolor = 0
14042 settings["NOCOLOR"] = "true"
14043 settings.backup_changes("NOCOLOR")
14044 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14045 portage.output.havecolor = 0
14046 settings["NOCOLOR"] = "true"
14047 settings.backup_changes("NOCOLOR")
14049 def apply_priorities(settings):
14053 def nice(settings):
14055 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14056 except (OSError, ValueError), e:
14057 out = portage.output.EOutput()
14058 out.eerror("Failed to change nice value to '%s'" % \
14059 settings["PORTAGE_NICENESS"])
14060 out.eerror("%s\n" % str(e))
14062 def ionice(settings):
14064 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14066 ionice_cmd = shlex.split(ionice_cmd)
14070 from portage.util import varexpand
14071 variables = {"PID" : str(os.getpid())}
14072 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14075 rval = portage.process.spawn(cmd, env=os.environ)
14076 except portage.exception.CommandNotFound:
14077 # The OS kernel probably doesn't support ionice,
14078 # so return silently.
14081 if rval != os.EX_OK:
14082 out = portage.output.EOutput()
14083 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14084 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14086 def display_missing_pkg_set(root_config, set_name):
14089 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14090 "The following sets exist:") % \
14091 colorize("INFORM", set_name))
14094 for s in sorted(root_config.sets):
14095 msg.append(" %s" % s)
14098 writemsg_level("".join("%s\n" % l for l in msg),
14099 level=logging.ERROR, noiselevel=-1)
14101 def expand_set_arguments(myfiles, myaction, root_config):
14103 setconfig = root_config.setconfig
14105 sets = setconfig.getSets()
14107 # In order to know exactly which atoms/sets should be added to the
14108 # world file, the depgraph performs set expansion later. It will get
14109 # confused about where the atoms came from if it's not allowed to
14110 # expand them itself.
14111 do_not_expand = (None, )
14114 if a in ("system", "world"):
14115 newargs.append(SETPREFIX+a)
14122 # separators for set arguments
14126 # WARNING: all operators must be of equal length
14128 DIFF_OPERATOR = "-@"
14129 UNION_OPERATOR = "+@"
14131 for i in range(0, len(myfiles)):
14132 if myfiles[i].startswith(SETPREFIX):
14135 x = myfiles[i][len(SETPREFIX):]
14138 start = x.find(ARG_START)
14139 end = x.find(ARG_END)
14140 if start > 0 and start < end:
14141 namepart = x[:start]
14142 argpart = x[start+1:end]
14144 # TODO: implement proper quoting
14145 args = argpart.split(",")
14149 k, v = a.split("=", 1)
14152 options[a] = "True"
14153 setconfig.update(namepart, options)
14154 newset += (x[:start-len(namepart)]+namepart)
14155 x = x[end+len(ARG_END):]
14159 myfiles[i] = SETPREFIX+newset
14161 sets = setconfig.getSets()
14163 # display errors that occured while loading the SetConfig instance
14164 for e in setconfig.errors:
14165 print colorize("BAD", "Error during set creation: %s" % e)
14167 # emerge relies on the existance of sets with names "world" and "system"
14168 required_sets = ("world", "system")
14170 for s in required_sets:
14172 msg = ["emerge: incomplete set configuration, " + \
14173 "no \"%s\" set defined" % s]
14174 msg.append(" sets defined: %s" % ", ".join(sets))
14176 sys.stderr.write(line + "\n")
14178 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14181 if a.startswith(SETPREFIX):
14182 # support simple set operations (intersection, difference and union)
14183 # on the commandline. Expressions are evaluated strictly left-to-right
14184 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14185 expression = a[len(SETPREFIX):]
14188 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14189 is_pos = expression.rfind(IS_OPERATOR)
14190 diff_pos = expression.rfind(DIFF_OPERATOR)
14191 union_pos = expression.rfind(UNION_OPERATOR)
14192 op_pos = max(is_pos, diff_pos, union_pos)
14193 s1 = expression[:op_pos]
14194 s2 = expression[op_pos+len(IS_OPERATOR):]
14195 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14197 display_missing_pkg_set(root_config, s2)
14199 expr_sets.insert(0, s2)
14200 expr_ops.insert(0, op)
14202 if not expression in sets:
14203 display_missing_pkg_set(root_config, expression)
14205 expr_sets.insert(0, expression)
14206 result = set(setconfig.getSetAtoms(expression))
14207 for i in range(0, len(expr_ops)):
14208 s2 = setconfig.getSetAtoms(expr_sets[i+1])
14209 if expr_ops[i] == IS_OPERATOR:
14210 result.intersection_update(s2)
14211 elif expr_ops[i] == DIFF_OPERATOR:
14212 result.difference_update(s2)
14213 elif expr_ops[i] == UNION_OPERATOR:
14216 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14217 newargs.extend(result)
14219 s = a[len(SETPREFIX):]
14221 display_missing_pkg_set(root_config, s)
14223 setconfig.active.append(s)
14225 set_atoms = setconfig.getSetAtoms(s)
14226 except portage.exception.PackageSetNotFound, e:
14227 writemsg_level(("emerge: the given set '%s' " + \
14228 "contains a non-existent set named '%s'.\n") % \
14229 (s, e), level=logging.ERROR, noiselevel=-1)
14231 if myaction in unmerge_actions and \
14232 not sets[s].supportsOperation("unmerge"):
14233 sys.stderr.write("emerge: the given set '%s' does " % s + \
14234 "not support unmerge operations\n")
14236 elif not set_atoms:
14237 print "emerge: '%s' is an empty set" % s
14238 elif myaction not in do_not_expand:
14239 newargs.extend(set_atoms)
14241 newargs.append(SETPREFIX+s)
14242 for e in sets[s].errors:
14246 return (newargs, retval)
14248 def repo_name_check(trees):
14249 missing_repo_names = set()
14250 for root, root_trees in trees.iteritems():
14251 if "porttree" in root_trees:
14252 portdb = root_trees["porttree"].dbapi
14253 missing_repo_names.update(portdb.porttrees)
14254 repos = portdb.getRepositories()
14256 missing_repo_names.discard(portdb.getRepositoryPath(r))
14257 if portdb.porttree_root in missing_repo_names and \
14258 not os.path.exists(os.path.join(
14259 portdb.porttree_root, "profiles")):
14260 # This is normal if $PORTDIR happens to be empty,
14261 # so don't warn about it.
14262 missing_repo_names.remove(portdb.porttree_root)
14264 if missing_repo_names:
14266 msg.append("WARNING: One or more repositories " + \
14267 "have missing repo_name entries:")
14269 for p in missing_repo_names:
14270 msg.append("\t%s/profiles/repo_name" % (p,))
14272 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14273 "should be a plain text file containing a unique " + \
14274 "name for the repository on the first line.", 70))
14275 writemsg_level("".join("%s\n" % l for l in msg),
14276 level=logging.WARNING, noiselevel=-1)
14278 return bool(missing_repo_names)
14280 def config_protect_check(trees):
14281 for root, root_trees in trees.iteritems():
14282 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14283 msg = "!!! CONFIG_PROTECT is empty"
14285 msg += " for '%s'" % root
14286 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14288 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14290 if "--quiet" in myopts:
14291 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14292 print "!!! one of the following fully-qualified ebuild names instead:\n"
14293 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14294 print " " + colorize("INFORM", cp)
14297 s = search(root_config, spinner, "--searchdesc" in myopts,
14298 "--quiet" not in myopts, "--usepkg" in myopts,
14299 "--usepkgonly" in myopts)
14300 null_cp = portage.dep_getkey(insert_category_into_atom(
14302 cat, atom_pn = portage.catsplit(null_cp)
14303 s.searchkey = atom_pn
14304 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14307 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14308 print "!!! one of the above fully-qualified ebuild names instead.\n"
14310 def profile_check(trees, myaction, myopts):
14311 if myaction in ("info", "sync"):
14313 elif "--version" in myopts or "--help" in myopts:
14315 for root, root_trees in trees.iteritems():
14316 if root_trees["root_config"].settings.profiles:
14318 # generate some profile related warning messages
14319 validate_ebuild_environment(trees)
14320 msg = "If you have just changed your profile configuration, you " + \
14321 "should revert back to the previous configuration. Due to " + \
14322 "your current profile being invalid, allowed actions are " + \
14323 "limited to --help, --info, --sync, and --version."
14324 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14325 level=logging.ERROR, noiselevel=-1)
14330 global portage # NFC why this is necessary now - genone
14331 portage._disable_legacy_globals()
14332 # Disable color until we're sure that it should be enabled (after
14333 # EMERGE_DEFAULT_OPTS has been parsed).
14334 portage.output.havecolor = 0
14335 # This first pass is just for options that need to be known as early as
14336 # possible, such as --config-root. They will be parsed again later,
14337 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14338 # the value of --config-root).
14339 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14340 if "--debug" in myopts:
14341 os.environ["PORTAGE_DEBUG"] = "1"
14342 if "--config-root" in myopts:
14343 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14345 # Portage needs to ensure a sane umask for the files it creates.
14347 settings, trees, mtimedb = load_emerge_config()
14348 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14349 rval = profile_check(trees, myaction, myopts)
14350 if rval != os.EX_OK:
14353 if portage._global_updates(trees, mtimedb["updates"]):
14355 # Reload the whole config from scratch.
14356 settings, trees, mtimedb = load_emerge_config(trees=trees)
14357 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14359 xterm_titles = "notitles" not in settings.features
14362 if "--ignore-default-opts" not in myopts:
14363 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14364 tmpcmdline.extend(sys.argv[1:])
14365 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14367 if "--digest" in myopts:
14368 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14369 # Reload the whole config from scratch so that the portdbapi internal
14370 # config is updated with new FEATURES.
14371 settings, trees, mtimedb = load_emerge_config(trees=trees)
14372 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14374 for myroot in trees:
14375 mysettings = trees[myroot]["vartree"].settings
14376 mysettings.unlock()
14377 adjust_config(myopts, mysettings)
14378 mysettings["PORTAGE_COUNTER_HASH"] = \
14379 trees[myroot]["vartree"].dbapi._counter_hash()
14380 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14382 del myroot, mysettings
14384 apply_priorities(settings)
14386 spinner = stdout_spinner()
14387 if "candy" in settings.features:
14388 spinner.update = spinner.update_scroll
14390 if "--quiet" not in myopts:
14391 portage.deprecated_profile_check(settings=settings)
14392 repo_name_check(trees)
14393 config_protect_check(trees)
14395 eclasses_overridden = {}
14396 for mytrees in trees.itervalues():
14397 mydb = mytrees["porttree"].dbapi
14398 # Freeze the portdbapi for performance (memoize all xmatch results).
14400 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14403 if eclasses_overridden and \
14404 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14405 prefix = bad(" * ")
14406 if len(eclasses_overridden) == 1:
14407 writemsg(prefix + "Overlay eclass overrides " + \
14408 "eclass from PORTDIR:\n", noiselevel=-1)
14410 writemsg(prefix + "Overlay eclasses override " + \
14411 "eclasses from PORTDIR:\n", noiselevel=-1)
14412 writemsg(prefix + "\n", noiselevel=-1)
14413 for eclass_name in sorted(eclasses_overridden):
14414 writemsg(prefix + " '%s/%s.eclass'\n" % \
14415 (eclasses_overridden[eclass_name], eclass_name),
14417 writemsg(prefix + "\n", noiselevel=-1)
14418 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14419 "because it will trigger invalidation of cached ebuild metadata " + \
14420 "that is distributed with the portage tree. If you must " + \
14421 "override eclasses from PORTDIR then you are advised to add " + \
14422 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14423 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14424 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14425 "you would like to disable this warning."
14426 from textwrap import wrap
14427 for line in wrap(msg, 72):
14428 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14430 if "moo" in myfiles:
14433 Larry loves Gentoo (""" + platform.system() + """)
14435 _______________________
14436 < Have you mooed today? >
14437 -----------------------
14447 ext = os.path.splitext(x)[1]
14448 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14449 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14452 root_config = trees[settings["ROOT"]]["root_config"]
14453 if myaction == "list-sets":
14454 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14458 # only expand sets for actions taking package arguments
14459 oldargs = myfiles[:]
14460 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14461 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14462 if retval != os.EX_OK:
14465 # Need to handle empty sets specially, otherwise emerge will react
14466 # with the help message for empty argument lists
14467 if oldargs and not myfiles:
14468 print "emerge: no targets left after set expansion"
14471 if ("--tree" in myopts) and ("--columns" in myopts):
14472 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14475 if ("--quiet" in myopts):
14476 spinner.update = spinner.update_quiet
14477 portage.util.noiselimit = -1
14479 # Always create packages if FEATURES=buildpkg
14480 # Imply --buildpkg if --buildpkgonly
14481 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14482 if "--buildpkg" not in myopts:
14483 myopts["--buildpkg"] = True
14485 # Also allow -S to invoke search action (-sS)
14486 if ("--searchdesc" in myopts):
14487 if myaction and myaction != "search":
14488 myfiles.append(myaction)
14489 if "--search" not in myopts:
14490 myopts["--search"] = True
14491 myaction = "search"
14493 # Always try and fetch binary packages if FEATURES=getbinpkg
14494 if ("getbinpkg" in settings.features):
14495 myopts["--getbinpkg"] = True
14497 if "--buildpkgonly" in myopts:
14498 # --buildpkgonly will not merge anything, so
14499 # it cancels all binary package options.
14500 for opt in ("--getbinpkg", "--getbinpkgonly",
14501 "--usepkg", "--usepkgonly"):
14502 myopts.pop(opt, None)
14504 if "--fetch-all-uri" in myopts:
14505 myopts["--fetchonly"] = True
14507 if "--skipfirst" in myopts and "--resume" not in myopts:
14508 myopts["--resume"] = True
14510 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14511 myopts["--usepkgonly"] = True
14513 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14514 myopts["--getbinpkg"] = True
14516 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14517 myopts["--usepkg"] = True
14519 # Also allow -K to apply --usepkg/-k
14520 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14521 myopts["--usepkg"] = True
14523 # Allow -p to remove --ask
14524 if ("--pretend" in myopts) and ("--ask" in myopts):
14525 print ">>> --pretend disables --ask... removing --ask from options."
14526 del myopts["--ask"]
14528 # forbid --ask when not in a terminal
14529 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14530 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14531 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14535 if settings.get("PORTAGE_DEBUG", "") == "1":
14536 spinner.update = spinner.update_quiet
14538 if "python-trace" in settings.features:
14539 import portage.debug
14540 portage.debug.set_trace(True)
14542 if not ("--quiet" in myopts):
14543 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14544 spinner.update = spinner.update_basic
14546 if "--version" in myopts:
14547 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14548 settings.profile_path, settings["CHOST"],
14549 trees[settings["ROOT"]]["vartree"].dbapi)
14551 elif "--help" in myopts:
14552 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14555 if "--debug" in myopts:
14556 print "myaction", myaction
14557 print "myopts", myopts
14559 if not myaction and not myfiles and "--resume" not in myopts:
14560 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14563 pretend = "--pretend" in myopts
14564 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14565 buildpkgonly = "--buildpkgonly" in myopts
14567 # check if root user is the current user for the actions where emerge needs this
14568 if portage.secpass < 2:
14569 # We've already allowed "--version" and "--help" above.
14570 if "--pretend" not in myopts and myaction not in ("search","info"):
14571 need_superuser = not \
14573 (buildpkgonly and secpass >= 1) or \
14574 myaction in ("metadata", "regen") or \
14575 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14576 if portage.secpass < 1 or \
14579 access_desc = "superuser"
14581 access_desc = "portage group"
14582 # Always show portage_group_warning() when only portage group
14583 # access is required but the user is not in the portage group.
14584 from portage.data import portage_group_warning
14585 if "--ask" in myopts:
14586 myopts["--pretend"] = True
14587 del myopts["--ask"]
14588 print ("%s access is required... " + \
14589 "adding --pretend to options.\n") % access_desc
14590 if portage.secpass < 1 and not need_superuser:
14591 portage_group_warning()
14593 sys.stderr.write(("emerge: %s access is " + \
14594 "required.\n\n") % access_desc)
14595 if portage.secpass < 1 and not need_superuser:
14596 portage_group_warning()
14599 disable_emergelog = False
14600 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14602 disable_emergelog = True
14604 if myaction in ("search", "info"):
14605 disable_emergelog = True
14606 if disable_emergelog:
14607 """ Disable emergelog for everything except build or unmerge
14608 operations. This helps minimize parallel emerge.log entries that can
14609 confuse log parsers. We especially want it disabled during
14610 parallel-fetch, which uses --resume --fetchonly."""
14612 def emergelog(*pargs, **kargs):
14615 if not "--pretend" in myopts:
14616 emergelog(xterm_titles, "Started emerge on: "+\
14617 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14620 myelogstr=" ".join(myopts)
14622 myelogstr+=" "+myaction
14624 myelogstr += " " + " ".join(oldargs)
14625 emergelog(xterm_titles, " *** emerge " + myelogstr)
14628 def emergeexitsig(signum, frame):
14629 signal.signal(signal.SIGINT, signal.SIG_IGN)
14630 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14631 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14632 sys.exit(100+signum)
14633 signal.signal(signal.SIGINT, emergeexitsig)
14634 signal.signal(signal.SIGTERM, emergeexitsig)
14637 """This gets out final log message in before we quit."""
14638 if "--pretend" not in myopts:
14639 emergelog(xterm_titles, " *** terminating.")
14640 if "notitles" not in settings.features:
14642 portage.atexit_register(emergeexit)
14644 if myaction in ("config", "metadata", "regen", "sync"):
14645 if "--pretend" in myopts:
14646 sys.stderr.write(("emerge: The '%s' action does " + \
14647 "not support '--pretend'.\n") % myaction)
14650 if "sync" == myaction:
14651 return action_sync(settings, trees, mtimedb, myopts, myaction)
14652 elif "metadata" == myaction:
14653 action_metadata(settings, portdb, myopts)
14654 elif myaction=="regen":
14655 validate_ebuild_environment(trees)
14656 action_regen(settings, portdb, myopts.get("--jobs"),
14657 myopts.get("--load-average"))
14659 elif "config"==myaction:
14660 validate_ebuild_environment(trees)
14661 action_config(settings, trees, myopts, myfiles)
14664 elif "search"==myaction:
14665 validate_ebuild_environment(trees)
14666 action_search(trees[settings["ROOT"]]["root_config"],
14667 myopts, myfiles, spinner)
14668 elif myaction in ("clean", "unmerge") or \
14669 (myaction == "prune" and "--nodeps" in myopts):
14670 validate_ebuild_environment(trees)
14672 # Ensure atoms are valid before calling unmerge().
14673 # For backward compat, leading '=' is not required.
14675 if is_valid_package_atom(x) or \
14676 is_valid_package_atom("=" + x):
14679 msg.append("'%s' is not a valid package atom." % (x,))
14680 msg.append("Please check ebuild(5) for full details.")
14681 writemsg_level("".join("!!! %s\n" % line for line in msg),
14682 level=logging.ERROR, noiselevel=-1)
14685 # When given a list of atoms, unmerge
14686 # them in the order given.
14687 ordered = myaction == "unmerge"
14688 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14689 mtimedb["ldpath"], ordered=ordered):
14690 if not (buildpkgonly or fetchonly or pretend):
14691 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14693 elif myaction in ("depclean", "info", "prune"):
14695 # Ensure atoms are valid before calling unmerge().
14696 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14699 if is_valid_package_atom(x):
14701 valid_atoms.append(
14702 portage.dep_expand(x, mydb=vardb, settings=settings))
14703 except portage.exception.AmbiguousPackageName, e:
14704 msg = "The short ebuild name \"" + x + \
14705 "\" is ambiguous. Please specify " + \
14706 "one of the following " + \
14707 "fully-qualified ebuild names instead:"
14708 for line in textwrap.wrap(msg, 70):
14709 writemsg_level("!!! %s\n" % (line,),
14710 level=logging.ERROR, noiselevel=-1)
14712 writemsg_level(" %s\n" % colorize("INFORM", i),
14713 level=logging.ERROR, noiselevel=-1)
14714 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14718 msg.append("'%s' is not a valid package atom." % (x,))
14719 msg.append("Please check ebuild(5) for full details.")
14720 writemsg_level("".join("!!! %s\n" % line for line in msg),
14721 level=logging.ERROR, noiselevel=-1)
14724 if myaction == "info":
14725 return action_info(settings, trees, myopts, valid_atoms)
14727 validate_ebuild_environment(trees)
14728 action_depclean(settings, trees, mtimedb["ldpath"],
14729 myopts, myaction, valid_atoms, spinner)
14730 if not (buildpkgonly or fetchonly or pretend):
14731 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14732 # "update", "system", or just process files:
14734 validate_ebuild_environment(trees)
14735 if "--pretend" not in myopts:
14736 display_news_notification(root_config, myopts)
14737 retval = action_build(settings, trees, mtimedb,
14738 myopts, myaction, myfiles, spinner)
14739 root_config = trees[settings["ROOT"]]["root_config"]
14740 post_emerge(root_config, myopts, mtimedb, retval)