2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
24 from collections import deque
41 from os import path as osp
42 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51 nc_len, red, teal, turquoise, xtermTitle, \
52 xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
61 portage.dep._dep_check_strict = True
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage._sets import load_default_config, SETPREFIX
70 from portage._sets.base import InternalPackageSet
72 from itertools import chain, izip
73 from UserDict import DictMixin
76 import cPickle as pickle
81 import cStringIO as StringIO
85 class stdout_spinner(object):
87 "Gentoo Rocks ("+platform.system()+")",
88 "Thank you for using Gentoo. :)",
89 "Are you actually trying to read this?",
90 "How many times have you stared at this?",
91 "We are generating the cache right now",
92 "You are paying too much attention.",
93 "A theory is better than its explanation.",
94 "Phasers locked on target, Captain.",
95 "Thrashing is just virtual crashing.",
96 "To be is to program.",
97 "Real Users hate Real Programmers.",
98 "When all else fails, read the instructions.",
99 "Functionality breeds Contempt.",
100 "The future lies ahead.",
101 "3.1415926535897932384626433832795028841971694",
102 "Sometimes insanity is the only alternative.",
103 "Inaccuracy saves a world of explanation.",
106 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
110 self.update = self.update_twirl
111 self.scroll_sequence = self.scroll_msgs[
112 int(time.time() * 100) % len(self.scroll_msgs)]
114 self.min_display_latency = 0.05
116 def _return_early(self):
118 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119 each update* method should return without doing any output when this
122 cur_time = time.time()
123 if cur_time - self.last_update < self.min_display_latency:
125 self.last_update = cur_time
128 def update_basic(self):
129 self.spinpos = (self.spinpos + 1) % 500
130 if self._return_early():
132 if (self.spinpos % 100) == 0:
133 if self.spinpos == 0:
134 sys.stdout.write(". ")
136 sys.stdout.write(".")
139 def update_scroll(self):
140 if self._return_early():
142 if(self.spinpos >= len(self.scroll_sequence)):
143 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
146 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
148 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
150 def update_twirl(self):
151 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152 if self._return_early():
154 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
157 def update_quiet(self):
160 def userquery(prompt, responses=None, colours=None):
161 """Displays a prompt and a set of responses, then waits for a response
162 which is checked against the responses and the first to match is
163 returned. An empty response will match the first value in responses. The
164 input buffer is *not* cleared prior to the prompt!
167 responses: a List of Strings.
168 colours: a List of Functions taking and returning a String, used to
169 process the responses for display. Typically these will be functions
170 like red() but could be e.g. lambda x: "DisplayString".
171 If responses is omitted, defaults to ["Yes", "No"], [green, red].
172 If only colours is omitted, defaults to [bold, ...].
174 Returns a member of the List responses. (If called without optional
175 arguments, returns "Yes" or "No".)
176 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
178 if responses is None:
179 responses = ["Yes", "No"]
181 create_color_func("PROMPT_CHOICE_DEFAULT"),
182 create_color_func("PROMPT_CHOICE_OTHER")
184 elif colours is None:
186 colours=(colours*len(responses))[:len(responses)]
190 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191 for key in responses:
192 # An empty response will match the first value in responses.
193 if response.upper()==key[:len(response)].upper():
195 print "Sorry, response '%s' not understood." % response,
196 except (EOFError, KeyboardInterrupt):
200 actions = frozenset([
201 "clean", "config", "depclean",
203 "prune", "regen", "search",
207 "--ask", "--alphabetical",
208 "--buildpkg", "--buildpkgonly",
209 "--changelog", "--columns",
214 "--fetchonly", "--fetch-all-uri",
215 "--getbinpkg", "--getbinpkgonly",
216 "--help", "--ignore-default-opts",
219 "--newuse", "--nocolor",
220 "--nodeps", "--noreplace",
221 "--nospinner", "--oneshot",
222 "--onlydeps", "--pretend",
223 "--quiet", "--resume",
224 "--searchdesc", "--selective",
228 "--usepkg", "--usepkgonly",
229 "--verbose", "--version"
235 "b":"--buildpkg", "B":"--buildpkgonly",
236 "c":"--clean", "C":"--unmerge",
237 "d":"--debug", "D":"--deep",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
242 "k":"--usepkg", "K":"--usepkgonly",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps", "O":"--nodeps",
246 "p":"--pretend", "P":"--prune",
248 "s":"--search", "S":"--searchdesc",
251 "v":"--verbose", "V":"--version"
254 def emergelog(xterm_titles, mystr, short_msg=None):
255 if xterm_titles and short_msg:
256 if "HOSTNAME" in os.environ:
257 short_msg = os.environ["HOSTNAME"]+": "+short_msg
258 xtermTitle(short_msg)
260 file_path = "/var/log/emerge.log"
261 mylogfile = open(file_path, "a")
262 portage.util.apply_secpass_permissions(file_path,
263 uid=portage.portage_uid, gid=portage.portage_gid,
267 mylock = portage.locks.lockfile(mylogfile)
268 # seek because we may have gotten held up by the lock.
269 # if so, we may not be positioned at the end of the file.
271 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
275 portage.locks.unlockfile(mylock)
277 except (IOError,OSError,portage.exception.PortageException), e:
279 print >> sys.stderr, "emergelog():",e
281 def countdown(secs=5, doing="Starting"):
283 print ">>> Waiting",secs,"seconds before starting..."
284 print ">>> (Control-C to abort)...\n"+doing+" in: ",
288 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295 if type(mysize) not in [types.IntType,types.LongType]:
297 if 0 != mysize % 1024:
298 # Always round up to the next kB so that it doesn't show 0 kB when
299 # some small file still needs to be fetched.
300 mysize += 1024 - mysize % 1024
301 mystr=str(mysize/1024)
305 mystr=mystr[:mycount]+","+mystr[mycount:]
309 def getgccversion(chost):
312 return: the current in-use gcc version
315 gcc_ver_command = 'gcc -dumpversion'
316 gcc_ver_prefix = 'gcc-'
318 gcc_not_found_error = red(
319 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320 "!!! to update the environment of this terminal and possibly\n" +
321 "!!! other terminals also.\n"
324 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
328 mystatus, myoutput = commands.getstatusoutput(
329 chost + "-" + gcc_ver_command)
330 if mystatus == os.EX_OK:
331 return gcc_ver_prefix + myoutput
333 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334 if mystatus == os.EX_OK:
335 return gcc_ver_prefix + myoutput
337 portage.writemsg(gcc_not_found_error, noiselevel=-1)
338 return "[unavailable]"
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341 profilever = "unavailable"
343 realpath = os.path.realpath(profile)
344 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
345 if realpath.startswith(basepath):
346 profilever = realpath[1 + len(basepath):]
349 profilever = "!" + os.readlink(profile)
352 del realpath, basepath
355 libclist = vardb.match("virtual/libc")
356 libclist += vardb.match("virtual/glibc")
357 libclist = portage.util.unique_array(libclist)
359 xs=portage.catpkgsplit(x)
361 libcver+=","+"-".join(xs[1:])
363 libcver="-".join(xs[1:])
365 libcver="unavailable"
367 gccver = getgccversion(chost)
368 unameout=platform.release()+" "+platform.machine()
370 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
372 def create_depgraph_params(myopts, myaction):
373 #configure emerge engine parameters
375 # self: include _this_ package regardless of if it is merged.
376 # selective: exclude the package if it is merged
377 # recurse: go into the dependencies
378 # deep: go into the dependencies of already merged packages
379 # empty: pretend nothing is merged
380 # complete: completely account for all known dependencies
381 # remove: build graph for use in removing packages
382 myparams = set(["recurse"])
384 if myaction == "remove":
385 myparams.add("remove")
386 myparams.add("complete")
389 if "--update" in myopts or \
390 "--newuse" in myopts or \
391 "--reinstall" in myopts or \
392 "--noreplace" in myopts:
393 myparams.add("selective")
394 if "--emptytree" in myopts:
395 myparams.add("empty")
396 myparams.discard("selective")
397 if "--nodeps" in myopts:
398 myparams.discard("recurse")
399 if "--deep" in myopts:
401 if "--complete-graph" in myopts:
402 myparams.add("complete")
405 # search functionality
406 class search(object):
417 def __init__(self, root_config, spinner, searchdesc,
418 verbose, usepkg, usepkgonly):
419 """Searches the available and installed packages for the supplied search key.
420 The list of available and installed packages is created at object instantiation.
421 This makes successive searches faster."""
422 self.settings = root_config.settings
423 self.vartree = root_config.trees["vartree"]
424 self.spinner = spinner
425 self.verbose = verbose
426 self.searchdesc = searchdesc
427 self.root_config = root_config
428 self.setconfig = root_config.setconfig
429 self.matches = {"pkg" : []}
434 self.portdb = fake_portdb
435 for attrib in ("aux_get", "cp_all",
436 "xmatch", "findname", "getFetchMap"):
437 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
441 portdb = root_config.trees["porttree"].dbapi
442 bindb = root_config.trees["bintree"].dbapi
443 vardb = root_config.trees["vartree"].dbapi
445 if not usepkgonly and portdb._have_root_eclass_dir:
446 self._dbs.append(portdb)
448 if (usepkg or usepkgonly) and bindb.cp_all():
449 self._dbs.append(bindb)
451 self._dbs.append(vardb)
452 self._portdb = portdb
457 cp_all.update(db.cp_all())
458 return list(sorted(cp_all))
460 def _aux_get(self, *args, **kwargs):
463 return db.aux_get(*args, **kwargs)
468 def _findname(self, *args, **kwargs):
470 if db is not self._portdb:
471 # We don't want findname to return anything
472 # unless it's an ebuild in a portage tree.
473 # Otherwise, it's already built and we don't
476 func = getattr(db, "findname", None)
478 value = func(*args, **kwargs)
483 def _getFetchMap(self, *args, **kwargs):
485 func = getattr(db, "getFetchMap", None)
487 value = func(*args, **kwargs)
492 def _visible(self, db, cpv, metadata):
493 installed = db is self.vartree.dbapi
494 built = installed or db is not self._portdb
497 pkg_type = "installed"
500 return visible(self.settings,
501 Package(type_name=pkg_type, root_config=self.root_config,
502 cpv=cpv, built=built, installed=installed, metadata=metadata))
504 def _xmatch(self, level, atom):
506 This method does not expand old-style virtuals because it
507 is restricted to returning matches for a single ${CATEGORY}/${PN}
508 and old-style virual matches unreliable for that when querying
509 multiple package databases. If necessary, old-style virtuals
510 can be performed on atoms prior to calling this method.
512 cp = portage.dep_getkey(atom)
513 if level == "match-all":
516 if hasattr(db, "xmatch"):
517 matches.update(db.xmatch(level, atom))
519 matches.update(db.match(atom))
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "match-visible":
525 if hasattr(db, "xmatch"):
526 matches.update(db.xmatch(level, atom))
528 db_keys = list(db._aux_cache_keys)
529 for cpv in db.match(atom):
530 metadata = izip(db_keys,
531 db.aux_get(cpv, db_keys))
532 if not self._visible(db, cpv, metadata):
535 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536 db._cpv_sort_ascending(result)
537 elif level == "bestmatch-visible":
540 if hasattr(db, "xmatch"):
541 cpv = db.xmatch("bestmatch-visible", atom)
542 if not cpv or portage.cpv_getkey(cpv) != cp:
544 if not result or cpv == portage.best([cpv, result]):
547 db_keys = Package.metadata_keys
548 # break out of this loop with highest visible
549 # match, checked in descending order
550 for cpv in reversed(db.match(atom)):
551 if portage.cpv_getkey(cpv) != cp:
553 metadata = izip(db_keys,
554 db.aux_get(cpv, db_keys))
555 if not self._visible(db, cpv, metadata):
557 if not result or cpv == portage.best([cpv, result]):
561 raise NotImplementedError(level)
564 def execute(self,searchkey):
565 """Performs the search for the supplied search key"""
567 self.searchkey=searchkey
568 self.packagematches = []
571 self.matches = {"pkg":[], "desc":[]}
574 self.matches = {"pkg":[]}
575 print "Searching... ",
578 if self.searchkey.startswith('%'):
580 self.searchkey = self.searchkey[1:]
581 if self.searchkey.startswith('@'):
583 self.searchkey = self.searchkey[1:]
585 self.searchre=re.compile(self.searchkey,re.I)
587 self.searchre=re.compile(re.escape(self.searchkey), re.I)
588 for package in self.portdb.cp_all():
589 self.spinner.update()
592 match_string = package[:]
594 match_string = package.split("/")[-1]
597 if self.searchre.search(match_string):
598 if not self.portdb.xmatch("match-visible", package):
600 self.matches["pkg"].append([package,masked])
601 elif self.searchdesc: # DESCRIPTION searching
602 full_package = self.portdb.xmatch("bestmatch-visible", package)
604 #no match found; we don't want to query description
605 full_package = portage.best(
606 self.portdb.xmatch("match-all", package))
612 full_desc = self.portdb.aux_get(
613 full_package, ["DESCRIPTION"])[0]
615 print "emerge: search: aux_get() failed, skipping"
617 if self.searchre.search(full_desc):
618 self.matches["desc"].append([full_package,masked])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
657 desc, homepage, license = self.portdb.aux_get(
658 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
660 print "emerge: search: aux_get() failed, skipping"
663 print green("*")+" "+white(match)+" "+red("[ Masked ]")
665 print green("*")+" "+white(match)
666 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
670 mycat = match.split("/")[0]
671 mypkg = match.split("/")[1]
672 mycpv = match + "-" + myversion
673 myebuild = self.portdb.findname(mycpv)
675 pkgdir = os.path.dirname(myebuild)
676 from portage import manifest
677 mf = manifest.Manifest(
678 pkgdir, self.settings["DISTDIR"])
680 uri_map = self.portdb.getFetchMap(mycpv)
681 except portage.exception.InvalidDependString, e:
682 file_size_str = "Unknown (%s)" % (e,)
686 mysum[0] = mf.getDistfilesSize(uri_map)
688 file_size_str = "Unknown (missing " + \
689 "digest for %s)" % (e,)
694 if db is not vardb and \
695 db.cpv_exists(mycpv):
697 if not myebuild and hasattr(db, "bintree"):
698 myebuild = db.bintree.getname(mycpv)
700 mysum[0] = os.stat(myebuild).st_size
705 if myebuild and file_size_str is None:
706 mystr = str(mysum[0] / 1024)
710 mystr = mystr[:mycount] + "," + mystr[mycount:]
711 file_size_str = mystr + " kB"
715 print " ", darkgreen("Latest version available:"),myversion
716 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
719 (darkgreen("Size of files:"), file_size_str)
720 print " ", darkgreen("Homepage:")+" ",homepage
721 print " ", darkgreen("Description:")+" ",desc
722 print " ", darkgreen("License:")+" ",license
727 def getInstallationStatus(self,package):
728 installed_package = self.vartree.dep_bestmatch(package)
730 version = self.getVersion(installed_package,search.VERSION_RELEASE)
732 result = darkgreen("Latest version installed:")+" "+version
734 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
737 def getVersion(self,full_package,detail):
738 if len(full_package) > 1:
739 package_parts = portage.catpkgsplit(full_package)
740 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
741 result = package_parts[2]+ "-" + package_parts[3]
743 result = package_parts[2]
748 class RootConfig(object):
749 """This is used internally by depgraph to track information about a
753 "ebuild" : "porttree",
754 "binary" : "bintree",
755 "installed" : "vartree"
759 for k, v in pkg_tree_map.iteritems():
762 def __init__(self, settings, trees, setconfig):
764 self.settings = settings
765 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
766 self.root = self.settings["ROOT"]
767 self.setconfig = setconfig
768 self.sets = self.setconfig.getSets()
769 self.visible_pkgs = PackageVirtualDbapi(self.settings)
771 def create_world_atom(pkg, args_set, root_config):
772 """Create a new atom for the world file if one does not exist. If the
773 argument atom is precise enough to identify a specific slot then a slot
774 atom will be returned. Atoms that are in the system set may also be stored
775 in world since system atoms can only match one slot while world atoms can
776 be greedy with respect to slots. Unslotted system packages will not be
779 arg_atom = args_set.findAtomForPackage(pkg)
782 cp = portage.dep_getkey(arg_atom)
784 sets = root_config.sets
785 portdb = root_config.trees["porttree"].dbapi
786 vardb = root_config.trees["vartree"].dbapi
787 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
788 for cpv in portdb.match(cp))
789 slotted = len(available_slots) > 1 or \
790 (len(available_slots) == 1 and "0" not in available_slots)
792 # check the vdb in case this is multislot
793 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
794 for cpv in vardb.match(cp))
795 slotted = len(available_slots) > 1 or \
796 (len(available_slots) == 1 and "0" not in available_slots)
797 if slotted and arg_atom != cp:
798 # If the user gave a specific atom, store it as a
799 # slot atom in the world file.
800 slot_atom = pkg.slot_atom
802 # For USE=multislot, there are a couple of cases to
805 # 1) SLOT="0", but the real SLOT spontaneously changed to some
806 # unknown value, so just record an unslotted atom.
808 # 2) SLOT comes from an installed package and there is no
809 # matching SLOT in the portage tree.
811 # Make sure that the slot atom is available in either the
812 # portdb or the vardb, since otherwise the user certainly
813 # doesn't want the SLOT atom recorded in the world file
814 # (case 1 above). If it's only available in the vardb,
815 # the user may be trying to prevent a USE=multislot
816 # package from being removed by --depclean (case 2 above).
819 if not portdb.match(slot_atom):
820 # SLOT seems to come from an installed multislot package
822 # If there is no installed package matching the SLOT atom,
823 # it probably changed SLOT spontaneously due to USE=multislot,
824 # so just record an unslotted atom.
825 if vardb.match(slot_atom):
826 # Now verify that the argument is precise
827 # enough to identify a specific slot.
828 matches = mydb.match(arg_atom)
829 matched_slots = set()
831 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
832 if len(matched_slots) == 1:
833 new_world_atom = slot_atom
835 if new_world_atom == sets["world"].findAtomForPackage(pkg):
836 # Both atoms would be identical, so there's nothing to add.
839 # Unlike world atoms, system atoms are not greedy for slots, so they
840 # can't be safely excluded from world if they are slotted.
841 system_atom = sets["system"].findAtomForPackage(pkg)
843 if not portage.dep_getkey(system_atom).startswith("virtual/"):
845 # System virtuals aren't safe to exclude from world since they can
846 # match multiple old-style virtuals but only one of them will be
847 # pulled in by update or depclean.
848 providers = portdb.mysettings.getvirtuals().get(
849 portage.dep_getkey(system_atom))
850 if providers and len(providers) == 1 and providers[0] == cp:
852 return new_world_atom
854 def filter_iuse_defaults(iuse):
856 if flag.startswith("+") or flag.startswith("-"):
861 class SlotObject(object):
862 __slots__ = ("__weakref__",)
864 def __init__(self, **kwargs):
865 classes = [self.__class__]
870 classes.extend(c.__bases__)
871 slots = getattr(c, "__slots__", None)
875 myvalue = kwargs.get(myattr, None)
876 setattr(self, myattr, myvalue)
880 Create a new instance and copy all attributes
881 defined from __slots__ (including those from
884 obj = self.__class__()
886 classes = [self.__class__]
891 classes.extend(c.__bases__)
892 slots = getattr(c, "__slots__", None)
896 setattr(obj, myattr, getattr(self, myattr))
900 class AbstractDepPriority(SlotObject):
901 __slots__ = ("buildtime", "runtime", "runtime_post")
903 def __lt__(self, other):
904 return self.__int__() < other
906 def __le__(self, other):
907 return self.__int__() <= other
909 def __eq__(self, other):
910 return self.__int__() == other
912 def __ne__(self, other):
913 return self.__int__() != other
915 def __gt__(self, other):
916 return self.__int__() > other
918 def __ge__(self, other):
919 return self.__int__() >= other
923 return copy.copy(self)
925 class DepPriority(AbstractDepPriority):
927 This class generates an integer priority level based of various
928 attributes of the dependency relationship. Attributes can be assigned
929 at any time and the new integer value will be generated on calls to the
930 __int__() method. Rich comparison operators are supported.
932 The boolean attributes that affect the integer value are "satisfied",
933 "buildtime", "runtime", and "system". Various combinations of
934 attributes lead to the following priority levels:
936 Combination of properties Priority Category
938 not satisfied and buildtime 0 HARD
939 not satisfied and runtime -1 MEDIUM
940 not satisfied and runtime_post -2 MEDIUM_SOFT
941 satisfied and buildtime and rebuild -3 SOFT
942 satisfied and buildtime -4 SOFT
943 satisfied and runtime -5 SOFT
944 satisfied and runtime_post -6 SOFT
945 (none of the above) -6 SOFT
947 Several integer constants are defined for categorization of priority
950 MEDIUM The upper boundary for medium dependencies.
951 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
952 SOFT The upper boundary for soft dependencies.
953 MIN The lower boundary for soft dependencies.
955 __slots__ = ("satisfied", "rebuild")
962 if not self.satisfied:
967 if self.runtime_post:
975 if self.runtime_post:
980 myvalue = self.__int__()
981 if myvalue > self.MEDIUM:
983 if myvalue > self.MEDIUM_SOFT:
985 if myvalue > self.SOFT:
989 class BlockerDepPriority(DepPriority):
994 BlockerDepPriority.instance = BlockerDepPriority()
996 class UnmergeDepPriority(AbstractDepPriority):
997 __slots__ = ("satisfied",)
999 Combination of properties Priority Category
1002 runtime_post -1 HARD
1004 (none of the above) -2 SOFT
1014 if self.runtime_post:
1021 myvalue = self.__int__()
1022 if myvalue > self.SOFT:
1026 class FakeVartree(portage.vartree):
1027 """This is implements an in-memory copy of a vartree instance that provides
1028 all the interfaces required for use by the depgraph. The vardb is locked
1029 during the constructor call just long enough to read a copy of the
1030 installed package information. This allows the depgraph to do it's
1031 dependency calculations without holding a lock on the vardb. It also
1032 allows things like vardb global updates to be done in memory so that the
1033 user doesn't necessarily need write access to the vardb in cases where
1034 global updates are necessary (updates are performed when necessary if there
1035 is not a matching ebuild in the tree)."""
1036 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1037 self._root_config = root_config
1038 if pkg_cache is None:
1040 real_vartree = root_config.trees["vartree"]
1041 portdb = root_config.trees["porttree"].dbapi
1042 self.root = real_vartree.root
1043 self.settings = real_vartree.settings
1044 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1045 self._pkg_cache = pkg_cache
1046 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1047 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1049 # At least the parent needs to exist for the lock file.
1050 portage.util.ensure_dirs(vdb_path)
1051 except portage.exception.PortageException:
1055 if acquire_lock and os.access(vdb_path, os.W_OK):
1056 vdb_lock = portage.locks.lockdir(vdb_path)
1057 real_dbapi = real_vartree.dbapi
1059 for cpv in real_dbapi.cpv_all():
1060 cache_key = ("installed", self.root, cpv, "nomerge")
1061 pkg = self._pkg_cache.get(cache_key)
1063 metadata = pkg.metadata
1065 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1066 myslot = metadata["SLOT"]
1067 mycp = portage.dep_getkey(cpv)
1068 myslot_atom = "%s:%s" % (mycp, myslot)
1070 mycounter = long(metadata["COUNTER"])
1073 metadata["COUNTER"] = str(mycounter)
1074 other_counter = slot_counters.get(myslot_atom, None)
1075 if other_counter is not None:
1076 if other_counter > mycounter:
1078 slot_counters[myslot_atom] = mycounter
1080 pkg = Package(built=True, cpv=cpv,
1081 installed=True, metadata=metadata,
1082 root_config=root_config, type_name="installed")
1083 self._pkg_cache[pkg] = pkg
1084 self.dbapi.cpv_inject(pkg)
1085 real_dbapi.flush_cache()
1088 portage.locks.unlockdir(vdb_lock)
1089 # Populate the old-style virtuals using the cached values.
1090 if not self.settings.treeVirtuals:
1091 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1092 portage.getCPFromCPV, self.get_all_provides())
1094 # Intialize variables needed for lazy cache pulls of the live ebuild
1095 # metadata. This ensures that the vardb lock is released ASAP, without
1096 # being delayed in case cache generation is triggered.
1097 self._aux_get = self.dbapi.aux_get
1098 self.dbapi.aux_get = self._aux_get_wrapper
1099 self._match = self.dbapi.match
1100 self.dbapi.match = self._match_wrapper
1101 self._aux_get_history = set()
1102 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1103 self._portdb = portdb
1104 self._global_updates = None
1106 def _match_wrapper(self, cpv, use_cache=1):
1108 Make sure the metadata in Package instances gets updated for any
1109 cpv that is returned from a match() call, since the metadata can
1110 be accessed directly from the Package instance instead of via
1113 matches = self._match(cpv, use_cache=use_cache)
1115 if cpv in self._aux_get_history:
1117 self._aux_get_wrapper(cpv, [])
1120 def _aux_get_wrapper(self, pkg, wants):
1121 if pkg in self._aux_get_history:
1122 return self._aux_get(pkg, wants)
1123 self._aux_get_history.add(pkg)
1125 # Use the live ebuild metadata if possible.
1126 live_metadata = dict(izip(self._portdb_keys,
1127 self._portdb.aux_get(pkg, self._portdb_keys)))
1128 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1130 self.dbapi.aux_update(pkg, live_metadata)
1131 except (KeyError, portage.exception.PortageException):
1132 if self._global_updates is None:
1133 self._global_updates = \
1134 grab_global_updates(self._portdb.porttree_root)
1135 perform_global_updates(
1136 pkg, self.dbapi, self._global_updates)
1137 return self._aux_get(pkg, wants)
1139 def sync(self, acquire_lock=1):
1141 Call this method to synchronize state with the real vardb
1142 after one or more packages may have been installed or
1145 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1147 # At least the parent needs to exist for the lock file.
1148 portage.util.ensure_dirs(vdb_path)
1149 except portage.exception.PortageException:
1153 if acquire_lock and os.access(vdb_path, os.W_OK):
1154 vdb_lock = portage.locks.lockdir(vdb_path)
1158 portage.locks.unlockdir(vdb_lock)
1162 real_vardb = self._root_config.trees["vartree"].dbapi
1163 current_cpv_set = frozenset(real_vardb.cpv_all())
1164 pkg_vardb = self.dbapi
1165 aux_get_history = self._aux_get_history
1167 # Remove any packages that have been uninstalled.
1168 for pkg in list(pkg_vardb):
1169 if pkg.cpv not in current_cpv_set:
1170 pkg_vardb.cpv_remove(pkg)
1171 aux_get_history.discard(pkg.cpv)
1173 # Validate counters and timestamps.
1176 validation_keys = ["COUNTER", "_mtime_"]
1177 for cpv in current_cpv_set:
1179 pkg_hash_key = ("installed", root, cpv, "nomerge")
1180 pkg = pkg_vardb.get(pkg_hash_key)
1182 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1184 if counter != pkg.metadata["COUNTER"] or \
1186 pkg_vardb.cpv_remove(pkg)
1187 aux_get_history.discard(pkg.cpv)
1191 pkg = self._pkg(cpv)
1193 other_counter = slot_counters.get(pkg.slot_atom)
1194 if other_counter is not None:
1195 if other_counter > pkg.counter:
1198 slot_counters[pkg.slot_atom] = pkg.counter
1199 pkg_vardb.cpv_inject(pkg)
1201 real_vardb.flush_cache()
1203 def _pkg(self, cpv):
1204 root_config = self._root_config
1205 real_vardb = root_config.trees["vartree"].dbapi
1206 db_keys = list(real_vardb._aux_cache_keys)
1207 pkg = Package(cpv=cpv, installed=True,
1208 metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1209 root_config=root_config,
1210 type_name="installed")
1213 def grab_global_updates(portdir):
1214 from portage.update import grab_updates, parse_updates
1215 updpath = os.path.join(portdir, "profiles", "updates")
1217 rawupdates = grab_updates(updpath)
1218 except portage.exception.DirectoryNotFound:
1221 for mykey, mystat, mycontent in rawupdates:
1222 commands, errors = parse_updates(mycontent)
1223 upd_commands.extend(commands)
1226 def perform_global_updates(mycpv, mydb, mycommands):
1227 from portage.update import update_dbentries
1228 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1229 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1230 updates = update_dbentries(mycommands, aux_dict)
1232 mydb.aux_update(mycpv, updates)
1234 def visible(pkgsettings, pkg):
1236 Check if a package is visible. This can raise an InvalidDependString
1237 exception if LICENSE is invalid.
1238 TODO: optionally generate a list of masking reasons
1240 @returns: True if the package is visible, False otherwise.
1242 if not pkg.metadata["SLOT"]:
1244 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1245 if not pkgsettings._accept_chost(pkg):
1247 eapi = pkg.metadata["EAPI"]
1248 if not portage.eapi_is_supported(eapi):
1250 if not pkg.installed:
1251 if portage._eapi_is_deprecated(eapi):
1253 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1255 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1257 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1260 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1262 except portage.exception.InvalidDependString:
1266 def get_masking_status(pkg, pkgsettings, root_config):
1268 mreasons = portage.getmaskingstatus(
1269 pkg, settings=pkgsettings,
1270 portdb=root_config.trees["porttree"].dbapi)
1272 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1273 if not pkgsettings._accept_chost(pkg):
1274 mreasons.append("CHOST: %s" % \
1275 pkg.metadata["CHOST"])
1277 if not pkg.metadata["SLOT"]:
1278 mreasons.append("invalid: SLOT is undefined")
1282 def get_mask_info(root_config, cpv, pkgsettings,
1283 db, pkg_type, built, installed, db_keys):
1286 metadata = dict(izip(db_keys,
1287 db.aux_get(cpv, db_keys)))
1290 if metadata and not built:
1291 pkgsettings.setcpv(cpv, mydb=metadata)
1292 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1293 if metadata is None:
1294 mreasons = ["corruption"]
1296 pkg = Package(type_name=pkg_type, root_config=root_config,
1297 cpv=cpv, built=built, installed=installed, metadata=metadata)
1298 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1299 return metadata, mreasons
1301 def show_masked_packages(masked_packages):
1302 shown_licenses = set()
1303 shown_comments = set()
1304 # Maybe there is both an ebuild and a binary. Only
1305 # show one of them to avoid redundant appearance.
1307 have_eapi_mask = False
1308 for (root_config, pkgsettings, cpv,
1309 metadata, mreasons) in masked_packages:
1310 if cpv in shown_cpvs:
1313 comment, filename = None, None
1314 if "package.mask" in mreasons:
1315 comment, filename = \
1316 portage.getmaskingreason(
1317 cpv, metadata=metadata,
1318 settings=pkgsettings,
1319 portdb=root_config.trees["porttree"].dbapi,
1320 return_location=True)
1321 missing_licenses = []
1323 if not portage.eapi_is_supported(metadata["EAPI"]):
1324 have_eapi_mask = True
1326 missing_licenses = \
1327 pkgsettings._getMissingLicenses(
1329 except portage.exception.InvalidDependString:
1330 # This will have already been reported
1331 # above via mreasons.
1334 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1335 if comment and comment not in shown_comments:
1338 shown_comments.add(comment)
1339 portdb = root_config.trees["porttree"].dbapi
1340 for l in missing_licenses:
1341 l_path = portdb.findLicensePath(l)
1342 if l in shown_licenses:
1344 msg = ("A copy of the '%s' license" + \
1345 " is located at '%s'.") % (l, l_path)
1348 shown_licenses.add(l)
1349 return have_eapi_mask
1351 class Task(SlotObject):
1352 __slots__ = ("_hash_key", "_hash_value")
1354 def _get_hash_key(self):
1355 hash_key = getattr(self, "_hash_key", None)
1356 if hash_key is None:
1357 raise NotImplementedError(self)
1360 def __eq__(self, other):
1361 return self._get_hash_key() == other
1363 def __ne__(self, other):
1364 return self._get_hash_key() != other
1367 hash_value = getattr(self, "_hash_value", None)
1368 if hash_value is None:
1369 self._hash_value = hash(self._get_hash_key())
1370 return self._hash_value
1373 return len(self._get_hash_key())
1375 def __getitem__(self, key):
1376 return self._get_hash_key()[key]
1379 return iter(self._get_hash_key())
1381 def __contains__(self, key):
1382 return key in self._get_hash_key()
1385 return str(self._get_hash_key())
1387 class Blocker(Task):
1389 __hash__ = Task.__hash__
1390 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1392 def __init__(self, **kwargs):
1393 Task.__init__(self, **kwargs)
1394 self.cp = portage.dep_getkey(self.atom)
1396 def _get_hash_key(self):
1397 hash_key = getattr(self, "_hash_key", None)
1398 if hash_key is None:
1400 ("blocks", self.root, self.atom, self.eapi)
1401 return self._hash_key
1403 class Package(Task):
1405 __hash__ = Task.__hash__
1406 __slots__ = ("built", "cpv", "depth",
1407 "installed", "metadata", "onlydeps", "operation",
1408 "root_config", "type_name",
1409 "category", "counter", "cp", "cpv_split",
1410 "inherited", "iuse", "mtime",
1411 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1414 "CHOST", "COUNTER", "DEPEND", "EAPI",
1415 "INHERITED", "IUSE", "KEYWORDS",
1416 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1417 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1419 def __init__(self, **kwargs):
1420 Task.__init__(self, **kwargs)
1421 self.root = self.root_config.root
1422 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1423 self.cp = portage.cpv_getkey(self.cpv)
1424 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1425 self.category, self.pf = portage.catsplit(self.cpv)
1426 self.cpv_split = portage.catpkgsplit(self.cpv)
1427 self.pv_split = self.cpv_split[1:]
1431 __slots__ = ("__weakref__", "enabled")
1433 def __init__(self, use):
1434 self.enabled = frozenset(use)
1436 class _iuse(object):
1438 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1440 def __init__(self, tokens, iuse_implicit):
1441 self.tokens = tuple(tokens)
1442 self.iuse_implicit = iuse_implicit
1449 enabled.append(x[1:])
1451 disabled.append(x[1:])
1454 self.enabled = frozenset(enabled)
1455 self.disabled = frozenset(disabled)
1456 self.all = frozenset(chain(enabled, disabled, other))
1458 def __getattribute__(self, name):
1461 return object.__getattribute__(self, "regex")
1462 except AttributeError:
1463 all = object.__getattribute__(self, "all")
1464 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1465 # Escape anything except ".*" which is supposed
1466 # to pass through from _get_implicit_iuse()
1467 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1468 regex = "^(%s)$" % "|".join(regex)
1469 regex = regex.replace("\\.\\*", ".*")
1470 self.regex = re.compile(regex)
1471 return object.__getattribute__(self, name)
1473 def _get_hash_key(self):
1474 hash_key = getattr(self, "_hash_key", None)
1475 if hash_key is None:
1476 if self.operation is None:
1477 self.operation = "merge"
1478 if self.onlydeps or self.installed:
1479 self.operation = "nomerge"
1481 (self.type_name, self.root, self.cpv, self.operation)
1482 return self._hash_key
1484 def __cmp__(self, other):
1491 def __lt__(self, other):
1492 if other.cp != self.cp:
1494 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1498 def __le__(self, other):
1499 if other.cp != self.cp:
1501 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1505 def __gt__(self, other):
1506 if other.cp != self.cp:
1508 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1512 def __ge__(self, other):
1513 if other.cp != self.cp:
1515 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1519 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1520 if not x.startswith("UNUSED_"))
1521 _all_metadata_keys.discard("CDEPEND")
1522 _all_metadata_keys.update(Package.metadata_keys)
1524 from portage.cache.mappings import slot_dict_class
1525 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1527 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1529 Detect metadata updates and synchronize Package attributes.
1532 __slots__ = ("_pkg",)
1533 _wrapped_keys = frozenset(
1534 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1536 def __init__(self, pkg, metadata):
1537 _PackageMetadataWrapperBase.__init__(self)
1539 self.update(metadata)
1541 def __setitem__(self, k, v):
1542 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1543 if k in self._wrapped_keys:
1544 getattr(self, "_set_" + k.lower())(k, v)
1546 def _set_inherited(self, k, v):
1547 if isinstance(v, basestring):
1548 v = frozenset(v.split())
1549 self._pkg.inherited = v
1551 def _set_iuse(self, k, v):
1552 self._pkg.iuse = self._pkg._iuse(
1553 v.split(), self._pkg.root_config.iuse_implicit)
1555 def _set_slot(self, k, v):
1558 def _set_use(self, k, v):
1559 self._pkg.use = self._pkg._use(v.split())
1561 def _set_counter(self, k, v):
1562 if isinstance(v, basestring):
1567 self._pkg.counter = v
1569 def _set__mtime_(self, k, v):
1570 if isinstance(v, basestring):
1572 v = float(v.strip())
1577 class EbuildFetchonly(SlotObject):
1579 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1582 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1583 # ensuring sane $PWD (bug #239560) and storing elog
1584 # messages. Use a private temp directory, in order
1585 # to avoid locking the main one.
1586 settings = self.settings
1587 global_tmpdir = settings["PORTAGE_TMPDIR"]
1588 from tempfile import mkdtemp
1590 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1592 if e.errno != portage.exception.PermissionDenied.errno:
1594 raise portage.exception.PermissionDenied(global_tmpdir)
1595 settings["PORTAGE_TMPDIR"] = private_tmpdir
1596 settings.backup_changes("PORTAGE_TMPDIR")
1598 retval = self._execute()
1600 settings["PORTAGE_TMPDIR"] = global_tmpdir
1601 settings.backup_changes("PORTAGE_TMPDIR")
1602 shutil.rmtree(private_tmpdir)
1606 settings = self.settings
1608 root_config = pkg.root_config
1609 portdb = root_config.trees["porttree"].dbapi
1610 ebuild_path = portdb.findname(pkg.cpv)
1611 settings.setcpv(pkg)
1612 debug = settings.get("PORTAGE_DEBUG") == "1"
1613 use_cache = 1 # always true
1614 portage.doebuild_environment(ebuild_path, "fetch",
1615 root_config.root, settings, debug, use_cache, portdb)
1616 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1618 retval = portage.doebuild(ebuild_path, "fetch",
1619 self.settings["ROOT"], self.settings, debug=debug,
1620 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1621 mydbapi=portdb, tree="porttree")
1623 if retval != os.EX_OK:
1624 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1625 eerror(msg, phase="unpack", key=pkg.cpv)
1627 portage.elog.elog_process(self.pkg.cpv, self.settings)
1630 class AsynchronousTask(SlotObject):
1632 Subclasses override _wait() and _poll() so that calls
1633 to public methods can be wrapped for implementing
1634 hooks such as exit listener notification.
1636 Sublasses should call self.wait() to notify exit listeners after
1637 the task is complete and self.returncode has been set.
1640 __slots__ = ("background", "cancelled", "returncode") + \
1641 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1645 Start an asynchronous task and then return as soon as possible.
1651 raise NotImplementedError(self)
1654 return self.returncode is None
1661 return self.returncode
1664 if self.returncode is None:
1667 return self.returncode
1670 return self.returncode
1673 self.cancelled = True
1676 def addStartListener(self, f):
1678 The function will be called with one argument, a reference to self.
1680 if self._start_listeners is None:
1681 self._start_listeners = []
1682 self._start_listeners.append(f)
1684 def removeStartListener(self, f):
1685 if self._start_listeners is None:
1687 self._start_listeners.remove(f)
1689 def _start_hook(self):
1690 if self._start_listeners is not None:
1691 start_listeners = self._start_listeners
1692 self._start_listeners = None
1694 for f in start_listeners:
1697 def addExitListener(self, f):
1699 The function will be called with one argument, a reference to self.
1701 if self._exit_listeners is None:
1702 self._exit_listeners = []
1703 self._exit_listeners.append(f)
1705 def removeExitListener(self, f):
1706 if self._exit_listeners is None:
1707 if self._exit_listener_stack is not None:
1708 self._exit_listener_stack.remove(f)
1710 self._exit_listeners.remove(f)
1712 def _wait_hook(self):
1714 Call this method after the task completes, just before returning
1715 the returncode from wait() or poll(). This hook is
1716 used to trigger exit listeners when the returncode first
1719 if self.returncode is not None and \
1720 self._exit_listeners is not None:
1722 # This prevents recursion, in case one of the
1723 # exit handlers triggers this method again by
1724 # calling wait(). Use a stack that gives
1725 # removeExitListener() an opportunity to consume
1726 # listeners from the stack, before they can get
1727 # called below. This is necessary because a call
1728 # to one exit listener may result in a call to
1729 # removeExitListener() for another listener on
1730 # the stack. That listener needs to be removed
1731 # from the stack since it would be inconsistent
1732 # to call it after it has been been passed into
1733 # removeExitListener().
1734 self._exit_listener_stack = self._exit_listeners
1735 self._exit_listeners = None
1737 self._exit_listener_stack.reverse()
1738 while self._exit_listener_stack:
1739 self._exit_listener_stack.pop()(self)
1741 class PipeReader(AsynchronousTask):
1744 Reads output from one or more files and saves it in memory,
1745 for retrieval via the getvalue() method. This is driven by
1746 the scheduler's poll() loop, so it runs entirely within the
1750 __slots__ = ("input_files", "scheduler",) + \
1751 ("pid", "_read_data", "_registered", "_reg_ids")
1756 self._reg_ids = set()
1757 self._read_data = []
1758 for k, f in self.input_files.iteritems():
1759 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1760 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1761 self._reg_ids.add(self.scheduler.register(f.fileno(),
1762 PollConstants.POLLIN, self._output_handler))
1763 self._registered = True
1766 return self._registered
1769 if self.returncode is not None:
1770 return self.returncode
1772 if self._registered:
1773 self.scheduler.schedule(self._reg_ids)
1776 self.returncode = os.EX_OK
1777 return self.returncode
1780 """Retrieve the entire contents"""
1781 return "".join(self._read_data)
1784 """Free the memory buffer."""
1785 self._read_data = None
1787 def _output_handler(self, fd, event):
1788 files = self.input_files
1789 for f in files.itervalues():
1790 if fd == f.fileno():
1793 buf = array.array('B')
1795 buf.fromfile(f, self._bufsize)
1800 self._read_data.append(buf.tostring())
1805 return self._registered
1807 def _unregister(self):
1809 Unregister from the scheduler and close open files.
1812 self._registered = False
1814 if self._reg_ids is not None:
1815 for reg_id in self._reg_ids:
1816 self.scheduler.unregister(reg_id)
1817 self._reg_ids = None
1819 if self.input_files is not None:
1820 for f in self.input_files.itervalues():
1822 self.input_files = None
1824 class CompositeTask(AsynchronousTask):
1826 __slots__ = ("scheduler",) + ("_current_task",)
1829 return self._current_task is not None
1832 self.cancelled = True
1833 if self._current_task is not None:
1834 self._current_task.cancel()
1838 This does a loop calling self._current_task.poll()
1839 repeatedly as long as the value of self._current_task
1840 keeps changing. It calls poll() a maximum of one time
1841 for a given self._current_task instance. This is useful
1842 since calling poll() on a task can trigger advance to
1843 the next task could eventually lead to the returncode
1844 being set in cases when polling only a single task would
1845 not have the same effect.
1850 task = self._current_task
1851 if task is None or task is prev:
1852 # don't poll the same task more than once
1857 return self.returncode
1863 task = self._current_task
1865 # don't wait for the same task more than once
1868 # Before the task.wait() method returned, an exit
1869 # listener should have set self._current_task to either
1870 # a different task or None. Something is wrong.
1871 raise AssertionError("self._current_task has not " + \
1872 "changed since calling wait", self, task)
1876 return self.returncode
1878 def _assert_current(self, task):
1880 Raises an AssertionError if the given task is not the
1881 same one as self._current_task. This can be useful
1884 if task is not self._current_task:
1885 raise AssertionError("Unrecognized task: %s" % (task,))
1887 def _default_exit(self, task):
1889 Calls _assert_current() on the given task and then sets the
1890 composite returncode attribute if task.returncode != os.EX_OK.
1891 If the task failed then self._current_task will be set to None.
1892 Subclasses can use this as a generic task exit callback.
1895 @returns: The task.returncode attribute.
1897 self._assert_current(task)
1898 if task.returncode != os.EX_OK:
1899 self.returncode = task.returncode
1900 self._current_task = None
1901 return task.returncode
1903 def _final_exit(self, task):
1905 Assumes that task is the final task of this composite task.
1906 Calls _default_exit() and sets self.returncode to the task's
1907 returncode and sets self._current_task to None.
1909 self._default_exit(task)
1910 self._current_task = None
1911 self.returncode = task.returncode
1912 return self.returncode
1914 def _default_final_exit(self, task):
1916 This calls _final_exit() and then wait().
1918 Subclasses can use this as a generic final task exit callback.
1921 self._final_exit(task)
1924 def _start_task(self, task, exit_handler):
1926 Register exit handler for the given task, set it
1927 as self._current_task, and call task.start().
1929 Subclasses can use this as a generic way to start
1933 task.addExitListener(exit_handler)
1934 self._current_task = task
1937 class TaskSequence(CompositeTask):
1939 A collection of tasks that executes sequentially. Each task
1940 must have a addExitListener() method that can be used as
1941 a means to trigger movement from one task to the next.
1944 __slots__ = ("_task_queue",)
1946 def __init__(self, **kwargs):
1947 AsynchronousTask.__init__(self, **kwargs)
1948 self._task_queue = deque()
1950 def add(self, task):
1951 self._task_queue.append(task)
1954 self._start_next_task()
1957 self._task_queue.clear()
1958 CompositeTask.cancel(self)
1960 def _start_next_task(self):
1961 self._start_task(self._task_queue.popleft(),
1962 self._task_exit_handler)
1964 def _task_exit_handler(self, task):
1965 if self._default_exit(task) != os.EX_OK:
1967 elif self._task_queue:
1968 self._start_next_task()
1970 self._final_exit(task)
1973 class SubProcess(AsynchronousTask):
1975 __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1977 # A file descriptor is required for the scheduler to monitor changes from
1978 # inside a poll() loop. When logging is not enabled, create a pipe just to
1979 # serve this purpose alone.
1983 if self.returncode is not None:
1984 return self.returncode
1985 if self.pid is None:
1986 return self.returncode
1987 if self._registered:
1988 return self.returncode
1991 retval = os.waitpid(self.pid, os.WNOHANG)
1993 if e.errno != errno.ECHILD:
1996 retval = (self.pid, 1)
1998 if retval == (0, 0):
2000 self._set_returncode(retval)
2001 return self.returncode
2006 os.kill(self.pid, signal.SIGTERM)
2008 if e.errno != errno.ESRCH:
2012 self.cancelled = True
2013 if self.pid is not None:
2015 return self.returncode
2018 return self.pid is not None and \
2019 self.returncode is None
2023 if self.returncode is not None:
2024 return self.returncode
2026 if self._registered:
2027 self.scheduler.schedule(self._reg_id)
2029 if self.returncode is not None:
2030 return self.returncode
2033 wait_retval = os.waitpid(self.pid, 0)
2035 if e.errno != errno.ECHILD:
2038 self._set_returncode((self.pid, 1))
2040 self._set_returncode(wait_retval)
2042 return self.returncode
2044 def _unregister(self):
2046 Unregister from the scheduler and close open files.
2049 self._registered = False
2051 if self._reg_id is not None:
2052 self.scheduler.unregister(self._reg_id)
2055 if self._files is not None:
2056 for f in self._files.itervalues():
2060 def _set_returncode(self, wait_retval):
2062 retval = wait_retval[1]
2064 if retval != os.EX_OK:
2066 retval = (retval & 0xff) << 8
2068 retval = retval >> 8
2070 self.returncode = retval
2072 class SpawnProcess(SubProcess):
2075 Constructor keyword args are passed into portage.process.spawn().
2076 The required "args" keyword argument will be passed as the first
2080 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2081 "uid", "gid", "groups", "umask", "logfile",
2082 "path_lookup", "pre_exec")
2084 __slots__ = ("args",) + \
2087 _file_names = ("log", "process", "stdout")
2088 _files_dict = slot_dict_class(_file_names, prefix="")
2096 if self.fd_pipes is None:
2098 fd_pipes = self.fd_pipes
2099 fd_pipes.setdefault(0, sys.stdin.fileno())
2100 fd_pipes.setdefault(1, sys.stdout.fileno())
2101 fd_pipes.setdefault(2, sys.stderr.fileno())
2103 # flush any pending output
2104 for fd in fd_pipes.itervalues():
2105 if fd == sys.stdout.fileno():
2107 if fd == sys.stderr.fileno():
2110 logfile = self.logfile
2111 self._files = self._files_dict()
2114 master_fd, slave_fd = self._pipe(fd_pipes)
2115 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2116 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2119 fd_pipes_orig = fd_pipes.copy()
2121 # TODO: Use job control functions like tcsetpgrp() to control
2122 # access to stdin. Until then, use /dev/null so that any
2123 # attempts to read from stdin will immediately return EOF
2124 # instead of blocking indefinitely.
2125 null_input = open('/dev/null', 'rb')
2126 fd_pipes[0] = null_input.fileno()
2128 fd_pipes[0] = fd_pipes_orig[0]
2130 files.process = os.fdopen(master_fd, 'r')
2131 if logfile is not None:
2133 fd_pipes[1] = slave_fd
2134 fd_pipes[2] = slave_fd
2136 files.log = open(logfile, "a")
2137 portage.util.apply_secpass_permissions(logfile,
2138 uid=portage.portage_uid, gid=portage.portage_gid,
2141 if not self.background:
2142 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2144 output_handler = self._output_handler
2148 # Create a dummy pipe so the scheduler can monitor
2149 # the process from inside a poll() loop.
2150 fd_pipes[self._dummy_pipe_fd] = slave_fd
2152 fd_pipes[1] = slave_fd
2153 fd_pipes[2] = slave_fd
2154 output_handler = self._dummy_handler
2157 for k in self._spawn_kwarg_names:
2158 v = getattr(self, k)
2162 kwargs["fd_pipes"] = fd_pipes
2163 kwargs["returnpid"] = True
2164 kwargs.pop("logfile", None)
2166 retval = self._spawn(self.args, **kwargs)
2169 if null_input is not None:
2172 if isinstance(retval, int):
2174 for f in files.values():
2176 self.returncode = retval
2180 self.pid = retval[0]
2181 portage.process.spawned_pids.remove(self.pid)
2183 self._reg_id = self.scheduler.register(files.process.fileno(),
2184 PollConstants.POLLIN, output_handler)
2185 self._registered = True
2187 def _pipe(self, fd_pipes):
2189 @type fd_pipes: dict
2190 @param fd_pipes: pipes from which to copy terminal size if desired.
2194 def _spawn(self, args, **kwargs):
2195 return portage.process.spawn(args, **kwargs)
2197 def _output_handler(self, fd, event):
2199 buf = array.array('B')
2201 buf.fromfile(files.process, self._bufsize)
2205 if not self.background:
2206 buf.tofile(files.stdout)
2207 files.stdout.flush()
2208 buf.tofile(files.log)
2213 return self._registered
2215 def _dummy_handler(self, fd, event):
2217 This method is mainly interested in detecting EOF, since
2218 the only purpose of the pipe is to allow the scheduler to
2219 monitor the process from inside a poll() loop.
2222 buf = array.array('B')
2224 buf.fromfile(files.process, self._bufsize)
2232 return self._registered
2234 class MiscFunctionsProcess(SpawnProcess):
2236 Spawns misc-functions.sh with an existing ebuild environment.
2239 __slots__ = ("commands", "phase", "pkg", "settings")
2242 settings = self.settings
2243 settings.pop("EBUILD_PHASE", None)
2244 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2245 misc_sh_binary = os.path.join(portage_bin_path,
2246 os.path.basename(portage.const.MISC_SH_BINARY))
2248 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2249 self.logfile = settings.get("PORTAGE_LOG_FILE")
2251 portage._doebuild_exit_status_unlink(
2252 settings.get("EBUILD_EXIT_STATUS_FILE"))
2254 SpawnProcess._start(self)
2256 def _spawn(self, args, **kwargs):
2257 settings = self.settings
2258 debug = settings.get("PORTAGE_DEBUG") == "1"
2259 return portage.spawn(" ".join(args), settings,
2260 debug=debug, **kwargs)
2262 def _set_returncode(self, wait_retval):
2263 SpawnProcess._set_returncode(self, wait_retval)
2264 self.returncode = portage._doebuild_exit_status_check_and_log(
2265 self.settings, self.phase, self.returncode)
2267 class EbuildFetcher(SpawnProcess):
2269 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2274 root_config = self.pkg.root_config
2275 portdb = root_config.trees["porttree"].dbapi
2276 ebuild_path = portdb.findname(self.pkg.cpv)
2277 settings = self.config_pool.allocate()
2278 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2279 self._build_dir.lock()
2280 self._build_dir.clean()
2281 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2282 if self.logfile is None:
2283 self.logfile = settings.get("PORTAGE_LOG_FILE")
2289 # If any incremental variables have been overridden
2290 # via the environment, those values need to be passed
2291 # along here so that they are correctly considered by
2292 # the config instance in the subproccess.
2293 fetch_env = os.environ.copy()
2295 fetch_env["PORTAGE_NICENESS"] = "0"
2297 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2299 ebuild_binary = os.path.join(
2300 settings["PORTAGE_BIN_PATH"], "ebuild")
2302 fetch_args = [ebuild_binary, ebuild_path, phase]
2303 debug = settings.get("PORTAGE_DEBUG") == "1"
2305 fetch_args.append("--debug")
2307 self.args = fetch_args
2308 self.env = fetch_env
2309 SpawnProcess._start(self)
2311 def _pipe(self, fd_pipes):
2312 """When appropriate, use a pty so that fetcher progress bars,
2313 like wget has, will work properly."""
2314 if self.background or not sys.stdout.isatty():
2315 # When the output only goes to a log file,
2316 # there's no point in creating a pty.
2318 stdout_pipe = fd_pipes.get(1)
2319 got_pty, master_fd, slave_fd = \
2320 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2321 return (master_fd, slave_fd)
2323 def _set_returncode(self, wait_retval):
2324 SpawnProcess._set_returncode(self, wait_retval)
2325 # Collect elog messages that might have been
2326 # created by the pkg_nofetch phase.
2327 if self._build_dir is not None:
2328 # Skip elog messages for prefetch, in order to avoid duplicates.
2329 if not self.prefetch and self.returncode != os.EX_OK:
2331 if self.logfile is not None:
2333 elog_out = open(self.logfile, 'a')
2334 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2335 if self.logfile is not None:
2336 msg += ", Log file:"
2337 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2338 if self.logfile is not None:
2339 eerror(" '%s'" % (self.logfile,),
2340 phase="unpack", key=self.pkg.cpv, out=elog_out)
2341 if elog_out is not None:
2343 if not self.prefetch:
2344 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2345 features = self._build_dir.settings.features
2346 if self.returncode == os.EX_OK:
2347 self._build_dir.clean()
2348 self._build_dir.unlock()
2349 self.config_pool.deallocate(self._build_dir.settings)
2350 self._build_dir = None
2352 class EbuildBuildDir(SlotObject):
2354 __slots__ = ("dir_path", "pkg", "settings",
2355 "locked", "_catdir", "_lock_obj")
2357 def __init__(self, **kwargs):
2358 SlotObject.__init__(self, **kwargs)
2363 This raises an AlreadyLocked exception if lock() is called
2364 while a lock is already held. In order to avoid this, call
2365 unlock() or check whether the "locked" attribute is True
2366 or False before calling lock().
2368 if self._lock_obj is not None:
2369 raise self.AlreadyLocked((self._lock_obj,))
2371 dir_path = self.dir_path
2372 if dir_path is None:
2373 root_config = self.pkg.root_config
2374 portdb = root_config.trees["porttree"].dbapi
2375 ebuild_path = portdb.findname(self.pkg.cpv)
2376 settings = self.settings
2377 settings.setcpv(self.pkg)
2378 debug = settings.get("PORTAGE_DEBUG") == "1"
2379 use_cache = 1 # always true
2380 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2381 self.settings, debug, use_cache, portdb)
2382 dir_path = self.settings["PORTAGE_BUILDDIR"]
2384 catdir = os.path.dirname(dir_path)
2385 self._catdir = catdir
2387 portage.util.ensure_dirs(os.path.dirname(catdir),
2388 gid=portage.portage_gid,
2392 catdir_lock = portage.locks.lockdir(catdir)
2393 portage.util.ensure_dirs(catdir,
2394 gid=portage.portage_gid,
2396 self._lock_obj = portage.locks.lockdir(dir_path)
2398 self.locked = self._lock_obj is not None
2399 if catdir_lock is not None:
2400 portage.locks.unlockdir(catdir_lock)
2403 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2404 by keepwork or keeptemp in FEATURES."""
2405 settings = self.settings
2406 features = settings.features
2407 if not ("keepwork" in features or "keeptemp" in features):
2409 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2410 except EnvironmentError, e:
2411 if e.errno != errno.ENOENT:
2416 if self._lock_obj is None:
2419 portage.locks.unlockdir(self._lock_obj)
2420 self._lock_obj = None
2423 catdir = self._catdir
2426 catdir_lock = portage.locks.lockdir(catdir)
2432 if e.errno not in (errno.ENOENT,
2433 errno.ENOTEMPTY, errno.EEXIST):
2436 portage.locks.unlockdir(catdir_lock)
2438 class AlreadyLocked(portage.exception.PortageException):
2441 class EbuildBuild(CompositeTask):
2443 __slots__ = ("args_set", "config_pool", "find_blockers",
2444 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2445 "prefetcher", "settings", "world_atom") + \
2446 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2450 logger = self.logger
2453 settings = self.settings
2454 world_atom = self.world_atom
2455 root_config = pkg.root_config
2458 portdb = root_config.trees[tree].dbapi
2459 settings["EMERGE_FROM"] = pkg.type_name
2460 settings.backup_changes("EMERGE_FROM")
2462 ebuild_path = portdb.findname(self.pkg.cpv)
2463 self._ebuild_path = ebuild_path
2465 prefetcher = self.prefetcher
2466 if prefetcher is None:
2468 elif not prefetcher.isAlive():
2470 elif prefetcher.poll() is None:
2472 waiting_msg = "Fetching files " + \
2473 "in the background. " + \
2474 "To view fetch progress, run `tail -f " + \
2475 "/var/log/emerge-fetch.log` in another " + \
2477 msg_prefix = colorize("GOOD", " * ")
2478 from textwrap import wrap
2479 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2480 for line in wrap(waiting_msg, 65))
2481 if not self.background:
2482 writemsg(waiting_msg, noiselevel=-1)
2484 self._current_task = prefetcher
2485 prefetcher.addExitListener(self._prefetch_exit)
2488 self._prefetch_exit(prefetcher)
2490 def _prefetch_exit(self, prefetcher):
2494 settings = self.settings
2497 fetcher = EbuildFetchonly(
2498 fetch_all=opts.fetch_all_uri,
2499 pkg=pkg, pretend=opts.pretend,
2501 retval = fetcher.execute()
2502 self.returncode = retval
2506 fetcher = EbuildFetcher(config_pool=self.config_pool,
2507 fetchall=opts.fetch_all_uri,
2508 fetchonly=opts.fetchonly,
2509 background=self.background,
2510 pkg=pkg, scheduler=self.scheduler)
2512 self._start_task(fetcher, self._fetch_exit)
2514 def _fetch_exit(self, fetcher):
2518 fetch_failed = False
2520 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2522 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2524 if fetch_failed and fetcher.logfile is not None and \
2525 os.path.exists(fetcher.logfile):
2526 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2528 if not fetch_failed and fetcher.logfile is not None:
2529 # Fetch was successful, so remove the fetch log.
2531 os.unlink(fetcher.logfile)
2535 if fetch_failed or opts.fetchonly:
2539 logger = self.logger
2541 pkg_count = self.pkg_count
2542 scheduler = self.scheduler
2543 settings = self.settings
2544 features = settings.features
2545 ebuild_path = self._ebuild_path
2546 system_set = pkg.root_config.sets["system"]
2548 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2549 self._build_dir.lock()
2551 # Cleaning is triggered before the setup
2552 # phase, in portage.doebuild().
2553 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2554 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2555 short_msg = "emerge: (%s of %s) %s Clean" % \
2556 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2557 logger.log(msg, short_msg=short_msg)
2559 #buildsyspkg: Check if we need to _force_ binary package creation
2560 self._issyspkg = "buildsyspkg" in features and \
2561 system_set.findAtomForPackage(pkg) and \
2564 if opts.buildpkg or self._issyspkg:
2566 self._buildpkg = True
2568 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2569 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2570 short_msg = "emerge: (%s of %s) %s Compile" % \
2571 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2572 logger.log(msg, short_msg=short_msg)
2575 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2576 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2577 short_msg = "emerge: (%s of %s) %s Compile" % \
2578 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2579 logger.log(msg, short_msg=short_msg)
2581 build = EbuildExecuter(background=self.background, pkg=pkg,
2582 scheduler=scheduler, settings=settings)
2583 self._start_task(build, self._build_exit)
2585 def _unlock_builddir(self):
2586 portage.elog.elog_process(self.pkg.cpv, self.settings)
2587 self._build_dir.unlock()
2589 def _build_exit(self, build):
2590 if self._default_exit(build) != os.EX_OK:
2591 self._unlock_builddir()
2596 buildpkg = self._buildpkg
2599 self._final_exit(build)
2604 msg = ">>> This is a system package, " + \
2605 "let's pack a rescue tarball.\n"
2607 log_path = self.settings.get("PORTAGE_LOG_FILE")
2608 if log_path is not None:
2609 log_file = open(log_path, 'a')
2615 if not self.background:
2616 portage.writemsg_stdout(msg, noiselevel=-1)
2618 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2619 scheduler=self.scheduler, settings=self.settings)
2621 self._start_task(packager, self._buildpkg_exit)
2623 def _buildpkg_exit(self, packager):
2625 Released build dir lock when there is a failure or
2626 when in buildpkgonly mode. Otherwise, the lock will
2627 be released when merge() is called.
2630 if self._default_exit(packager) == os.EX_OK and \
2631 self.opts.buildpkgonly:
2632 # Need to call "clean" phase for buildpkgonly mode
2633 portage.elog.elog_process(self.pkg.cpv, self.settings)
2635 clean_phase = EbuildPhase(background=self.background,
2636 pkg=self.pkg, phase=phase,
2637 scheduler=self.scheduler, settings=self.settings,
2639 self._start_task(clean_phase, self._clean_exit)
2642 if self._final_exit(packager) != os.EX_OK or \
2643 self.opts.buildpkgonly:
2644 self._unlock_builddir()
2647 def _clean_exit(self, clean_phase):
2648 if self._final_exit(clean_phase) != os.EX_OK or \
2649 self.opts.buildpkgonly:
2650 self._unlock_builddir()
2655 Install the package and then clean up and release locks.
2656 Only call this after the build has completed successfully
2657 and neither fetchonly nor buildpkgonly mode are enabled.
2660 find_blockers = self.find_blockers
2661 ldpath_mtimes = self.ldpath_mtimes
2662 logger = self.logger
2664 pkg_count = self.pkg_count
2665 settings = self.settings
2666 world_atom = self.world_atom
2667 ebuild_path = self._ebuild_path
2670 merge = EbuildMerge(find_blockers=self.find_blockers,
2671 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2672 pkg_count=pkg_count, pkg_path=ebuild_path,
2673 scheduler=self.scheduler,
2674 settings=settings, tree=tree, world_atom=world_atom)
2676 msg = " === (%s of %s) Merging (%s::%s)" % \
2677 (pkg_count.curval, pkg_count.maxval,
2678 pkg.cpv, ebuild_path)
2679 short_msg = "emerge: (%s of %s) %s Merge" % \
2680 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2681 logger.log(msg, short_msg=short_msg)
2684 rval = merge.execute()
2686 self._unlock_builddir()
2690 class EbuildExecuter(CompositeTask):
2692 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2694 _phases = ("prepare", "configure", "compile", "test", "install")
2696 _live_eclasses = frozenset([
2706 self._tree = "porttree"
2709 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2710 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2711 self._start_task(clean_phase, self._clean_phase_exit)
2713 def _clean_phase_exit(self, clean_phase):
2715 if self._default_exit(clean_phase) != os.EX_OK:
2720 scheduler = self.scheduler
2721 settings = self.settings
2724 # This initializes PORTAGE_LOG_FILE.
2725 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2727 setup_phase = EbuildPhase(background=self.background,
2728 pkg=pkg, phase="setup", scheduler=scheduler,
2729 settings=settings, tree=self._tree)
2731 setup_phase.addExitListener(self._setup_exit)
2732 self._current_task = setup_phase
2733 self.scheduler.scheduleSetup(setup_phase)
2735 def _setup_exit(self, setup_phase):
2737 if self._default_exit(setup_phase) != os.EX_OK:
2741 unpack_phase = EbuildPhase(background=self.background,
2742 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2743 settings=self.settings, tree=self._tree)
2745 if self._live_eclasses.intersection(self.pkg.inherited):
2746 # Serialize $DISTDIR access for live ebuilds since
2747 # otherwise they can interfere with eachother.
2749 unpack_phase.addExitListener(self._unpack_exit)
2750 self._current_task = unpack_phase
2751 self.scheduler.scheduleUnpack(unpack_phase)
2754 self._start_task(unpack_phase, self._unpack_exit)
2756 def _unpack_exit(self, unpack_phase):
2758 if self._default_exit(unpack_phase) != os.EX_OK:
2762 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2765 phases = self._phases
2766 eapi = pkg.metadata["EAPI"]
2767 if eapi in ("0", "1", "2_pre1"):
2768 # skip src_prepare and src_configure
2770 elif eapi in ("2_pre2",):
2774 for phase in phases:
2775 ebuild_phases.add(EbuildPhase(background=self.background,
2776 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2777 settings=self.settings, tree=self._tree))
2779 self._start_task(ebuild_phases, self._default_final_exit)
2781 class EbuildMetadataPhase(SubProcess):
2784 Asynchronous interface for the ebuild "depend" phase which is
2785 used to extract metadata from the ebuild.
2788 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2789 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2792 _file_names = ("ebuild",)
2793 _files_dict = slot_dict_class(_file_names, prefix="")
2794 _bufsize = SpawnProcess._bufsize
2798 settings = self.settings
2800 ebuild_path = self.ebuild_path
2801 debug = settings.get("PORTAGE_DEBUG") == "1"
2805 if self.fd_pipes is not None:
2806 fd_pipes = self.fd_pipes.copy()
2810 fd_pipes.setdefault(0, sys.stdin.fileno())
2811 fd_pipes.setdefault(1, sys.stdout.fileno())
2812 fd_pipes.setdefault(2, sys.stderr.fileno())
2814 # flush any pending output
2815 for fd in fd_pipes.itervalues():
2816 if fd == sys.stdout.fileno():
2818 if fd == sys.stderr.fileno():
2821 fd_pipes_orig = fd_pipes.copy()
2822 self._files = self._files_dict()
2825 master_fd, slave_fd = os.pipe()
2826 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2827 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2829 fd_pipes[self._metadata_fd] = slave_fd
2831 retval = portage.doebuild(ebuild_path, "depend",
2832 settings["ROOT"], settings, debug,
2833 mydbapi=self.portdb, tree="porttree",
2834 fd_pipes=fd_pipes, returnpid=True)
2838 if isinstance(retval, int):
2839 # doebuild failed before spawning
2841 self.returncode = retval
2845 self.pid = retval[0]
2846 portage.process.spawned_pids.remove(self.pid)
2848 self._raw_metadata = []
2849 files.ebuild = os.fdopen(master_fd, 'r')
2850 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2851 PollConstants.POLLIN, self._output_handler)
2852 self._registered = True
2854 def _output_handler(self, fd, event):
2856 self._raw_metadata.append(files.ebuild.read())
2857 if not self._raw_metadata[-1]:
2861 if self.returncode == os.EX_OK:
2862 metadata = izip(portage.auxdbkeys,
2863 "".join(self._raw_metadata).splitlines())
2864 self.metadata_callback(self.cpv, self.ebuild_path,
2865 self.repo_path, metadata, self.ebuild_mtime)
2867 return self._registered
2869 class EbuildProcess(SpawnProcess):
2871 __slots__ = ("phase", "pkg", "settings", "tree")
2874 # Don't open the log file during the clean phase since the
2875 # open file can result in an nfs lock on $T/build.log which
2876 # prevents the clean phase from removing $T.
2877 if self.phase not in ("clean", "cleanrm"):
2878 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2879 SpawnProcess._start(self)
2881 def _pipe(self, fd_pipes):
2882 stdout_pipe = fd_pipes.get(1)
2883 got_pty, master_fd, slave_fd = \
2884 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2885 return (master_fd, slave_fd)
2887 def _spawn(self, args, **kwargs):
2889 root_config = self.pkg.root_config
2891 mydbapi = root_config.trees[tree].dbapi
2892 settings = self.settings
2893 ebuild_path = settings["EBUILD"]
2894 debug = settings.get("PORTAGE_DEBUG") == "1"
2896 rval = portage.doebuild(ebuild_path, self.phase,
2897 root_config.root, settings, debug,
2898 mydbapi=mydbapi, tree=tree, **kwargs)
2902 def _set_returncode(self, wait_retval):
2903 SpawnProcess._set_returncode(self, wait_retval)
2905 if self.phase not in ("clean", "cleanrm"):
2906 self.returncode = portage._doebuild_exit_status_check_and_log(
2907 self.settings, self.phase, self.returncode)
2909 portage._post_phase_userpriv_perms(self.settings)
2911 class EbuildPhase(CompositeTask):
2913 __slots__ = ("background", "pkg", "phase",
2914 "scheduler", "settings", "tree")
2916 _post_phase_cmds = portage._post_phase_cmds
2920 ebuild_process = EbuildProcess(background=self.background,
2921 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2922 settings=self.settings, tree=self.tree)
2924 self._start_task(ebuild_process, self._ebuild_exit)
2926 def _ebuild_exit(self, ebuild_process):
2928 if self.phase == "install":
2930 log_path = self.settings.get("PORTAGE_LOG_FILE")
2932 if self.background and log_path is not None:
2933 log_file = open(log_path, 'a')
2936 portage._check_build_log(self.settings, out=out)
2938 if log_file is not None:
2941 if self._default_exit(ebuild_process) != os.EX_OK:
2945 settings = self.settings
2947 if self.phase == "install":
2948 portage._post_src_install_uid_fix(settings)
2950 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2951 if post_phase_cmds is not None:
2952 post_phase = MiscFunctionsProcess(background=self.background,
2953 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2954 scheduler=self.scheduler, settings=settings)
2955 self._start_task(post_phase, self._post_phase_exit)
2958 self.returncode = ebuild_process.returncode
2959 self._current_task = None
2962 def _post_phase_exit(self, post_phase):
2963 if self._final_exit(post_phase) != os.EX_OK:
2964 writemsg("!!! post %s failed; exiting.\n" % self.phase,
2966 self._current_task = None
2970 class EbuildBinpkg(EbuildProcess):
2972 This assumes that src_install() has successfully completed.
2974 __slots__ = ("_binpkg_tmpfile",)
2977 self.phase = "package"
2978 self.tree = "porttree"
2980 root_config = pkg.root_config
2981 portdb = root_config.trees["porttree"].dbapi
2982 bintree = root_config.trees["bintree"]
2983 ebuild_path = portdb.findname(self.pkg.cpv)
2984 settings = self.settings
2985 debug = settings.get("PORTAGE_DEBUG") == "1"
2987 bintree.prevent_collision(pkg.cpv)
2988 binpkg_tmpfile = os.path.join(bintree.pkgdir,
2989 pkg.cpv + ".tbz2." + str(os.getpid()))
2990 self._binpkg_tmpfile = binpkg_tmpfile
2991 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
2992 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
2995 EbuildProcess._start(self)
2997 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
2999 def _set_returncode(self, wait_retval):
3000 EbuildProcess._set_returncode(self, wait_retval)
3003 bintree = pkg.root_config.trees["bintree"]
3004 binpkg_tmpfile = self._binpkg_tmpfile
3005 if self.returncode == os.EX_OK:
3006 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3008 class EbuildMerge(SlotObject):
3010 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3011 "pkg", "pkg_count", "pkg_path", "pretend",
3012 "scheduler", "settings", "tree", "world_atom")
3015 root_config = self.pkg.root_config
3016 settings = self.settings
3017 retval = portage.merge(settings["CATEGORY"],
3018 settings["PF"], settings["D"],
3019 os.path.join(settings["PORTAGE_BUILDDIR"],
3020 "build-info"), root_config.root, settings,
3021 myebuild=settings["EBUILD"],
3022 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3023 vartree=root_config.trees["vartree"],
3024 prev_mtimes=self.ldpath_mtimes,
3025 scheduler=self.scheduler,
3026 blockers=self.find_blockers)
3028 if retval == os.EX_OK:
3029 self.world_atom(self.pkg)
3034 def _log_success(self):
3036 pkg_count = self.pkg_count
3037 pkg_path = self.pkg_path
3038 logger = self.logger
3039 if "noclean" not in self.settings.features:
3040 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3041 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3042 logger.log((" === (%s of %s) " + \
3043 "Post-Build Cleaning (%s::%s)") % \
3044 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3045 short_msg=short_msg)
3046 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3047 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3049 class PackageUninstall(AsynchronousTask):
3051 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3055 unmerge(self.pkg.root_config, self.opts, "unmerge",
3056 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3057 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3058 writemsg_level=self._writemsg_level)
3059 except UninstallFailure, e:
3060 self.returncode = e.status
3062 self.returncode = os.EX_OK
3065 def _writemsg_level(self, msg, level=0, noiselevel=0):
3067 log_path = self.settings.get("PORTAGE_LOG_FILE")
3068 background = self.background
3070 if log_path is None:
3071 if not (background and level < logging.WARNING):
3072 portage.util.writemsg_level(msg,
3073 level=level, noiselevel=noiselevel)
3076 portage.util.writemsg_level(msg,
3077 level=level, noiselevel=noiselevel)
3079 f = open(log_path, 'a')
3085 class Binpkg(CompositeTask):
3087 __slots__ = ("find_blockers",
3088 "ldpath_mtimes", "logger", "opts",
3089 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3090 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3091 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3093 def _writemsg_level(self, msg, level=0, noiselevel=0):
3095 if not self.background:
3096 portage.util.writemsg_level(msg,
3097 level=level, noiselevel=noiselevel)
3099 log_path = self.settings.get("PORTAGE_LOG_FILE")
3100 if log_path is not None:
3101 f = open(log_path, 'a')
3110 settings = self.settings
3111 settings.setcpv(pkg)
3112 self._tree = "bintree"
3113 self._bintree = self.pkg.root_config.trees[self._tree]
3114 self._verify = "strict" in self.settings.features and \
3115 not self.opts.pretend
3117 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3118 "portage", pkg.category, pkg.pf)
3119 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3120 pkg=pkg, settings=settings)
3121 self._image_dir = os.path.join(dir_path, "image")
3122 self._infloc = os.path.join(dir_path, "build-info")
3123 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3125 # The prefetcher has already completed or it
3126 # could be running now. If it's running now,
3127 # wait for it to complete since it holds
3128 # a lock on the file being fetched. The
3129 # portage.locks functions are only designed
3130 # to work between separate processes. Since
3131 # the lock is held by the current process,
3132 # use the scheduler and fetcher methods to
3133 # synchronize with the fetcher.
3134 prefetcher = self.prefetcher
3135 if prefetcher is None:
3137 elif not prefetcher.isAlive():
3139 elif prefetcher.poll() is None:
3141 waiting_msg = ("Fetching '%s' " + \
3142 "in the background. " + \
3143 "To view fetch progress, run `tail -f " + \
3144 "/var/log/emerge-fetch.log` in another " + \
3145 "terminal.") % prefetcher.pkg_path
3146 msg_prefix = colorize("GOOD", " * ")
3147 from textwrap import wrap
3148 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3149 for line in wrap(waiting_msg, 65))
3150 if not self.background:
3151 writemsg(waiting_msg, noiselevel=-1)
3153 self._current_task = prefetcher
3154 prefetcher.addExitListener(self._prefetch_exit)
3157 self._prefetch_exit(prefetcher)
3159 def _prefetch_exit(self, prefetcher):
3162 pkg_count = self.pkg_count
3163 fetcher = BinpkgFetcher(background=self.background,
3164 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3165 scheduler=self.scheduler)
3166 pkg_path = fetcher.pkg_path
3167 self._pkg_path = pkg_path
3169 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3171 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3172 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3173 short_msg = "emerge: (%s of %s) %s Fetch" % \
3174 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3175 self.logger.log(msg, short_msg=short_msg)
3178 fetcher.addExitListener(self._fetcher_exit)
3179 self._current_task = fetcher
3180 self.scheduler.fetch.schedule(fetcher)
3182 self._start_task(fetcher, self._fetcher_exit)
3185 self._fetcher_exit(fetcher)
3187 def _fetcher_exit(self, fetcher):
3189 # The fetcher only has a returncode when
3190 # --getbinpkg is enabled.
3191 if fetcher.returncode is not None:
3192 self._fetched_pkg = True
3193 if self.opts.fetchonly:
3194 self._final_exit(fetcher)
3197 elif self._default_exit(fetcher) != os.EX_OK:
3203 verifier = BinpkgVerifier(background=self.background,
3204 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3207 verifier.addExitListener(self._verifier_exit)
3208 self._current_task = verifier
3209 self.scheduler.fetch.schedule(verifier)
3211 self._start_task(verifier, self._verifier_exit)
3214 self._verifier_exit(verifier)
3216 def _verifier_exit(self, verifier):
3217 if verifier is not None and \
3218 self._default_exit(verifier) != os.EX_OK:
3222 logger = self.logger
3224 pkg_count = self.pkg_count
3225 pkg_path = self._pkg_path
3227 if self._fetched_pkg:
3228 self._bintree.inject(pkg.cpv, filename=pkg_path)
3230 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3231 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3232 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3233 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3234 logger.log(msg, short_msg=short_msg)
3236 self._build_dir.lock()
3239 settings = self.settings
3240 settings.setcpv(pkg)
3241 settings["EBUILD"] = self._ebuild_path
3242 ebuild_phase = EbuildPhase(background=self.background,
3243 pkg=pkg, phase=phase, scheduler=self.scheduler,
3244 settings=settings, tree=self._tree)
3246 self._start_task(ebuild_phase, self._clean_exit)
3248 def _clean_exit(self, clean_phase):
3249 if self._default_exit(clean_phase) != os.EX_OK:
3250 self._unlock_builddir()
3254 dir_path = self._build_dir.dir_path
3257 shutil.rmtree(dir_path)
3258 except (IOError, OSError), e:
3259 if e.errno != errno.ENOENT:
3263 infloc = self._infloc
3265 pkg_path = self._pkg_path
3268 for mydir in (dir_path, self._image_dir, infloc):
3269 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3270 gid=portage.data.portage_gid, mode=dir_mode)
3272 # This initializes PORTAGE_LOG_FILE.
3273 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3274 self._writemsg_level(">>> Extracting info\n")
3276 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3277 check_missing_metadata = ("CATEGORY", "PF")
3278 missing_metadata = set()
3279 for k in check_missing_metadata:
3280 v = pkg_xpak.getfile(k)
3282 missing_metadata.add(k)
3284 pkg_xpak.unpackinfo(infloc)
3285 for k in missing_metadata:
3293 f = open(os.path.join(infloc, k), 'wb')
3299 # Store the md5sum in the vdb.
3300 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3302 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3306 # This gives bashrc users an opportunity to do various things
3307 # such as remove binary packages after they're installed.
3308 settings = self.settings
3309 settings.setcpv(self.pkg)
3310 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3311 settings.backup_changes("PORTAGE_BINPKG_FILE")
3314 setup_phase = EbuildPhase(background=self.background,
3315 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3316 settings=settings, tree=self._tree)
3318 setup_phase.addExitListener(self._setup_exit)
3319 self._current_task = setup_phase
3320 self.scheduler.scheduleSetup(setup_phase)
3322 def _setup_exit(self, setup_phase):
3323 if self._default_exit(setup_phase) != os.EX_OK:
3324 self._unlock_builddir()
3328 extractor = BinpkgExtractorAsync(background=self.background,
3329 image_dir=self._image_dir,
3330 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3331 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3332 self._start_task(extractor, self._extractor_exit)
3334 def _extractor_exit(self, extractor):
3335 if self._final_exit(extractor) != os.EX_OK:
3336 self._unlock_builddir()
3337 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3341 def _unlock_builddir(self):
3342 portage.elog.elog_process(self.pkg.cpv, self.settings)
3343 self._build_dir.unlock()
3347 # This gives bashrc users an opportunity to do various things
3348 # such as remove binary packages after they're installed.
3349 settings = self.settings
3350 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3351 settings.backup_changes("PORTAGE_BINPKG_FILE")
3353 merge = EbuildMerge(find_blockers=self.find_blockers,
3354 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3355 pkg=self.pkg, pkg_count=self.pkg_count,
3356 pkg_path=self._pkg_path, scheduler=self.scheduler,
3357 settings=settings, tree=self._tree, world_atom=self.world_atom)
3360 retval = merge.execute()
3362 settings.pop("PORTAGE_BINPKG_FILE", None)
3363 self._unlock_builddir()
3366 class BinpkgFetcher(SpawnProcess):
3369 "locked", "pkg_path", "_lock_obj")
3371 def __init__(self, **kwargs):
3372 SpawnProcess.__init__(self, **kwargs)
3374 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3382 bintree = pkg.root_config.trees["bintree"]
3383 settings = bintree.settings
3384 use_locks = "distlocks" in settings.features
3385 pkg_path = self.pkg_path
3386 resume = os.path.exists(pkg_path)
3388 # urljoin doesn't work correctly with
3389 # unrecognized protocols like sftp
3390 if bintree._remote_has_index:
3391 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3393 rel_uri = pkg.cpv + ".tbz2"
3394 uri = bintree._remote_base_uri.rstrip("/") + \
3395 "/" + rel_uri.lstrip("/")
3397 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3398 "/" + pkg.pf + ".tbz2"
3400 protocol = urlparse.urlparse(uri)[0]
3401 fcmd_prefix = "FETCHCOMMAND"
3403 fcmd_prefix = "RESUMECOMMAND"
3404 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3406 fcmd = settings.get(fcmd_prefix)
3409 "DISTDIR" : os.path.dirname(pkg_path),
3411 "FILE" : os.path.basename(pkg_path)
3414 fetch_env = dict(settings.iteritems())
3415 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3416 for x in shlex.split(fcmd)]
3418 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3422 if self.fd_pipes is None:
3424 fd_pipes = self.fd_pipes
3426 # Redirect all output to stdout since some fetchers like
3427 # wget pollute stderr (if portage detects a problem then it
3428 # can send it's own message to stderr).
3429 fd_pipes.setdefault(0, sys.stdin.fileno())
3430 fd_pipes.setdefault(1, sys.stdout.fileno())
3431 fd_pipes.setdefault(2, sys.stdout.fileno())
3433 self.args = fetch_args
3434 self.env = fetch_env
3435 SpawnProcess._start(self)
3437 def _set_returncode(self, wait_retval):
3438 SpawnProcess._set_returncode(self, wait_retval)
3444 This raises an AlreadyLocked exception if lock() is called
3445 while a lock is already held. In order to avoid this, call
3446 unlock() or check whether the "locked" attribute is True
3447 or False before calling lock().
3449 if self._lock_obj is not None:
3450 raise self.AlreadyLocked((self._lock_obj,))
3452 self._lock_obj = portage.locks.lockfile(
3453 self.pkg_path, wantnewlockfile=1)
3456 class AlreadyLocked(portage.exception.PortageException):
3460 if self._lock_obj is None:
3462 portage.locks.unlockfile(self._lock_obj)
3463 self._lock_obj = None
3466 class BinpkgVerifier(AsynchronousTask):
3467 __slots__ = ("logfile", "pkg",)
3471 Note: Unlike a normal AsynchronousTask.start() method,
3472 this one does all work is synchronously. The returncode
3473 attribute will be set before it returns.
3477 root_config = pkg.root_config
3478 bintree = root_config.trees["bintree"]
3480 stdout_orig = sys.stdout
3481 stderr_orig = sys.stderr
3483 if self.background and self.logfile is not None:
3484 log_file = open(self.logfile, 'a')
3486 if log_file is not None:
3487 sys.stdout = log_file
3488 sys.stderr = log_file
3490 bintree.digestCheck(pkg)
3491 except portage.exception.FileNotFound:
3492 writemsg("!!! Fetching Binary failed " + \
3493 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3495 except portage.exception.DigestException, e:
3496 writemsg("\n!!! Digest verification failed:\n",
3498 writemsg("!!! %s\n" % e.value[0],
3500 writemsg("!!! Reason: %s\n" % e.value[1],
3502 writemsg("!!! Got: %s\n" % e.value[2],
3504 writemsg("!!! Expected: %s\n" % e.value[3],
3508 sys.stdout = stdout_orig
3509 sys.stderr = stderr_orig
3510 if log_file is not None:
3513 self.returncode = rval
3516 class BinpkgExtractorAsync(SpawnProcess):
3518 __slots__ = ("image_dir", "pkg", "pkg_path")
3520 _shell_binary = portage.const.BASH_BINARY
3523 self.args = [self._shell_binary, "-c",
3524 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3525 (portage._shell_quote(self.pkg_path),
3526 portage._shell_quote(self.image_dir))]
3528 self.env = self.pkg.root_config.settings.environ()
3529 SpawnProcess._start(self)
3531 class MergeListItem(CompositeTask):
3534 TODO: For parallel scheduling, everything here needs asynchronous
3535 execution support (start, poll, and wait methods).
3538 __slots__ = ("args_set",
3539 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3540 "find_blockers", "logger", "mtimedb", "pkg",
3541 "pkg_count", "pkg_to_replace", "prefetcher",
3542 "settings", "statusMessage", "world_atom") + \
3548 build_opts = self.build_opts
3551 # uninstall, executed by self.merge()
3552 self.returncode = os.EX_OK
3556 args_set = self.args_set
3557 find_blockers = self.find_blockers
3558 logger = self.logger
3559 mtimedb = self.mtimedb
3560 pkg_count = self.pkg_count
3561 scheduler = self.scheduler
3562 settings = self.settings
3563 world_atom = self.world_atom
3564 ldpath_mtimes = mtimedb["ldpath"]
3566 action_desc = "Emerging"
3568 if pkg.type_name == "binary":
3569 action_desc += " binary"
3571 if build_opts.fetchonly:
3572 action_desc = "Fetching"
3574 msg = "%s (%s of %s) %s" % \
3576 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3577 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3578 colorize("GOOD", pkg.cpv))
3581 msg += " %s %s" % (preposition, pkg.root)
3583 if not build_opts.pretend:
3584 self.statusMessage(msg)
3585 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3586 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3588 if pkg.type_name == "ebuild":
3590 build = EbuildBuild(args_set=args_set,
3591 background=self.background,
3592 config_pool=self.config_pool,
3593 find_blockers=find_blockers,
3594 ldpath_mtimes=ldpath_mtimes, logger=logger,
3595 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3596 prefetcher=self.prefetcher, scheduler=scheduler,
3597 settings=settings, world_atom=world_atom)
3599 self._install_task = build
3600 self._start_task(build, self._default_final_exit)
3603 elif pkg.type_name == "binary":
3605 binpkg = Binpkg(background=self.background,
3606 find_blockers=find_blockers,
3607 ldpath_mtimes=ldpath_mtimes, logger=logger,
3608 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3609 prefetcher=self.prefetcher, settings=settings,
3610 scheduler=scheduler, world_atom=world_atom)
3612 self._install_task = binpkg
3613 self._start_task(binpkg, self._default_final_exit)
3617 self._install_task.poll()
3618 return self.returncode
3621 self._install_task.wait()
3622 return self.returncode
3627 build_opts = self.build_opts
3628 find_blockers = self.find_blockers
3629 logger = self.logger
3630 mtimedb = self.mtimedb
3631 pkg_count = self.pkg_count
3632 prefetcher = self.prefetcher
3633 scheduler = self.scheduler
3634 settings = self.settings
3635 world_atom = self.world_atom
3636 ldpath_mtimes = mtimedb["ldpath"]
3639 if not (build_opts.buildpkgonly or \
3640 build_opts.fetchonly or build_opts.pretend):
3642 uninstall = PackageUninstall(background=self.background,
3643 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3644 pkg=pkg, scheduler=scheduler, settings=settings)
3647 retval = uninstall.wait()
3648 if retval != os.EX_OK:
3652 if build_opts.fetchonly or \
3653 build_opts.buildpkgonly:
3654 return self.returncode
3656 retval = self._install_task.install()
3659 class PackageMerge(AsynchronousTask):
3661 TODO: Implement asynchronous merge so that the scheduler can
3662 run while a merge is executing.
3665 __slots__ = ("merge",)
3669 pkg = self.merge.pkg
3670 pkg_count = self.merge.pkg_count
3673 action_desc = "Uninstalling"
3674 preposition = "from"
3676 action_desc = "Installing"
3679 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3682 msg += " %s %s" % (preposition, pkg.root)
3684 if not self.merge.build_opts.fetchonly and \
3685 not self.merge.build_opts.pretend and \
3686 not self.merge.build_opts.buildpkgonly:
3687 self.merge.statusMessage(msg)
3689 self.returncode = self.merge.merge()
3692 class DependencyArg(object):
3693 def __init__(self, arg=None, root_config=None):
3695 self.root_config = root_config
3700 class AtomArg(DependencyArg):
3701 def __init__(self, atom=None, **kwargs):
3702 DependencyArg.__init__(self, **kwargs)
3704 if not isinstance(self.atom, portage.dep.Atom):
3705 self.atom = portage.dep.Atom(self.atom)
3706 self.set = (self.atom, )
3708 class PackageArg(DependencyArg):
3709 def __init__(self, package=None, **kwargs):
3710 DependencyArg.__init__(self, **kwargs)
3711 self.package = package
3712 self.atom = portage.dep.Atom("=" + package.cpv)
3713 self.set = (self.atom, )
3715 class SetArg(DependencyArg):
3716 def __init__(self, set=None, **kwargs):
3717 DependencyArg.__init__(self, **kwargs)
3719 self.name = self.arg[len(SETPREFIX):]
3724 class Dependency(SlotObject):
3725 __slots__ = ("atom", "blocker", "depth",
3726 "parent", "onlydeps", "priority", "root")
3727 def __init__(self, **kwargs):
3728 SlotObject.__init__(self, **kwargs)
3729 if self.priority is None:
3730 self.priority = DepPriority()
3731 if self.depth is None:
3734 class BlockerCache(DictMixin):
3735 """This caches blockers of installed packages so that dep_check does not
3736 have to be done for every single installed package on every invocation of
3737 emerge. The cache is invalidated whenever it is detected that something
3738 has changed that might alter the results of dep_check() calls:
3739 1) the set of installed packages (including COUNTER) has changed
3740 2) the old-style virtuals have changed
3743 # Number of uncached packages to trigger cache update, since
3744 # it's wasteful to update it for every vdb change.
3745 _cache_threshold = 5
3747 class BlockerData(object):
3749 __slots__ = ("__weakref__", "atoms", "counter")
3751 def __init__(self, counter, atoms):
3752 self.counter = counter
3755 def __init__(self, myroot, vardb):
3757 self._virtuals = vardb.settings.getvirtuals()
3758 self._cache_filename = os.path.join(myroot,
3759 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3760 self._cache_version = "1"
3761 self._cache_data = None
3762 self._modified = set()
3767 f = open(self._cache_filename)
3768 mypickle = pickle.Unpickler(f)
3769 mypickle.find_global = None
3770 self._cache_data = mypickle.load()
3773 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3774 if isinstance(e, pickle.UnpicklingError):
3775 writemsg("!!! Error loading '%s': %s\n" % \
3776 (self._cache_filename, str(e)), noiselevel=-1)
3779 cache_valid = self._cache_data and \
3780 isinstance(self._cache_data, dict) and \
3781 self._cache_data.get("version") == self._cache_version and \
3782 isinstance(self._cache_data.get("blockers"), dict)
3784 # Validate all the atoms and counters so that
3785 # corruption is detected as soon as possible.
3786 invalid_items = set()
3787 for k, v in self._cache_data["blockers"].iteritems():
3788 if not isinstance(k, basestring):
3789 invalid_items.add(k)
3792 if portage.catpkgsplit(k) is None:
3793 invalid_items.add(k)
3795 except portage.exception.InvalidData:
3796 invalid_items.add(k)
3798 if not isinstance(v, tuple) or \
3800 invalid_items.add(k)
3803 if not isinstance(counter, (int, long)):
3804 invalid_items.add(k)
3806 if not isinstance(atoms, (list, tuple)):
3807 invalid_items.add(k)
3809 invalid_atom = False
3811 if not isinstance(atom, basestring):
3814 if atom[:1] != "!" or \
3815 not portage.isvalidatom(
3816 atom, allow_blockers=True):
3820 invalid_items.add(k)
3823 for k in invalid_items:
3824 del self._cache_data["blockers"][k]
3825 if not self._cache_data["blockers"]:
3829 self._cache_data = {"version":self._cache_version}
3830 self._cache_data["blockers"] = {}
3831 self._cache_data["virtuals"] = self._virtuals
3832 self._modified.clear()
3835 """If the current user has permission and the internal blocker cache
3836 been updated, save it to disk and mark it unmodified. This is called
3837 by emerge after it has proccessed blockers for all installed packages.
3838 Currently, the cache is only written if the user has superuser
3839 privileges (since that's required to obtain a lock), but all users
3840 have read access and benefit from faster blocker lookups (as long as
3841 the entire cache is still valid). The cache is stored as a pickled
3842 dict object with the following format:
3846 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3847 "virtuals" : vardb.settings.getvirtuals()
3850 if len(self._modified) >= self._cache_threshold and \
3853 f = portage.util.atomic_ofstream(self._cache_filename)
3854 pickle.dump(self._cache_data, f, -1)
3856 portage.util.apply_secpass_permissions(
3857 self._cache_filename, gid=portage.portage_gid, mode=0644)
3858 except (IOError, OSError), e:
3860 self._modified.clear()
3862 def __setitem__(self, cpv, blocker_data):
3864 Update the cache and mark it as modified for a future call to
3867 @param cpv: Package for which to cache blockers.
3869 @param blocker_data: An object with counter and atoms attributes.
3870 @type blocker_data: BlockerData
3872 self._cache_data["blockers"][cpv] = \
3873 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3874 self._modified.add(cpv)
3877 return iter(self._cache_data["blockers"])
3879 def __delitem__(self, cpv):
3880 del self._cache_data["blockers"][cpv]
3882 def __getitem__(self, cpv):
3885 @returns: An object with counter and atoms attributes.
3887 return self.BlockerData(*self._cache_data["blockers"][cpv])
3890 """This needs to be implemented so that self.__repr__() doesn't raise
3891 an AttributeError."""
3894 class BlockerDB(object):
3896 def __init__(self, root_config):
3897 self._root_config = root_config
3898 self._vartree = root_config.trees["vartree"]
3899 self._portdb = root_config.trees["porttree"].dbapi
3901 self._dep_check_trees = None
3902 self._fake_vartree = None
3904 def _get_fake_vartree(self, acquire_lock=0):
3905 fake_vartree = self._fake_vartree
3906 if fake_vartree is None:
3907 fake_vartree = FakeVartree(self._root_config,
3908 acquire_lock=acquire_lock)
3909 self._fake_vartree = fake_vartree
3910 self._dep_check_trees = { self._vartree.root : {
3911 "porttree" : fake_vartree,
3912 "vartree" : fake_vartree,
3915 fake_vartree.sync(acquire_lock=acquire_lock)
3918 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3919 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3920 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3921 settings = self._vartree.settings
3922 stale_cache = set(blocker_cache)
3923 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3924 dep_check_trees = self._dep_check_trees
3925 vardb = fake_vartree.dbapi
3926 installed_pkgs = list(vardb)
3928 for inst_pkg in installed_pkgs:
3929 stale_cache.discard(inst_pkg.cpv)
3930 cached_blockers = blocker_cache.get(inst_pkg.cpv)
3931 if cached_blockers is not None and \
3932 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3933 cached_blockers = None
3934 if cached_blockers is not None:
3935 blocker_atoms = cached_blockers.atoms
3937 # Use aux_get() to trigger FakeVartree global
3938 # updates on *DEPEND when appropriate.
3939 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3941 portage.dep._dep_check_strict = False
3942 success, atoms = portage.dep_check(depstr,
3943 vardb, settings, myuse=inst_pkg.use.enabled,
3944 trees=dep_check_trees, myroot=inst_pkg.root)
3946 portage.dep._dep_check_strict = True
3948 pkg_location = os.path.join(inst_pkg.root,
3949 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3950 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3951 (pkg_location, atoms), noiselevel=-1)
3954 blocker_atoms = [atom for atom in atoms \
3955 if atom.startswith("!")]
3956 blocker_atoms.sort()
3957 counter = long(inst_pkg.metadata["COUNTER"])
3958 blocker_cache[inst_pkg.cpv] = \
3959 blocker_cache.BlockerData(counter, blocker_atoms)
3960 for cpv in stale_cache:
3961 del blocker_cache[cpv]
3962 blocker_cache.flush()
3964 blocker_parents = digraph()
3966 for pkg in installed_pkgs:
3967 for blocker_atom in blocker_cache[pkg.cpv].atoms:
3968 blocker_atom = blocker_atom.lstrip("!")
3969 blocker_atoms.append(blocker_atom)
3970 blocker_parents.add(blocker_atom, pkg)
3972 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3973 blocking_pkgs = set()
3974 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3975 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3977 # Check for blockers in the other direction.
3978 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3980 portage.dep._dep_check_strict = False
3981 success, atoms = portage.dep_check(depstr,
3982 vardb, settings, myuse=new_pkg.use.enabled,
3983 trees=dep_check_trees, myroot=new_pkg.root)
3985 portage.dep._dep_check_strict = True
3987 # We should never get this far with invalid deps.
3988 show_invalid_depstring_notice(new_pkg, depstr, atoms)
3991 blocker_atoms = [atom.lstrip("!") for atom in atoms \
3994 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3995 for inst_pkg in installed_pkgs:
3997 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
3998 except (portage.exception.InvalidDependString, StopIteration):
4000 blocking_pkgs.add(inst_pkg)
4002 return blocking_pkgs
4004 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4006 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4007 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4008 p_type, p_root, p_key, p_status = parent_node
4010 if p_status == "nomerge":
4011 category, pf = portage.catsplit(p_key)
4012 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4013 msg.append("Portage is unable to process the dependencies of the ")
4014 msg.append("'%s' package. " % p_key)
4015 msg.append("In order to correct this problem, the package ")
4016 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4017 msg.append("As a temporary workaround, the --nodeps option can ")
4018 msg.append("be used to ignore all dependencies. For reference, ")
4019 msg.append("the problematic dependencies can be found in the ")
4020 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4022 msg.append("This package can not be installed. ")
4023 msg.append("Please notify the '%s' package maintainer " % p_key)
4024 msg.append("about this problem.")
4026 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4027 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4029 class PackageVirtualDbapi(portage.dbapi):
4031 A dbapi-like interface class that represents the state of the installed
4032 package database as new packages are installed, replacing any packages
4033 that previously existed in the same slot. The main difference between
4034 this class and fakedbapi is that this one uses Package instances
4035 internally (passed in via cpv_inject() and cpv_remove() calls).
4037 def __init__(self, settings):
4038 portage.dbapi.__init__(self)
4039 self.settings = settings
4040 self._match_cache = {}
4046 Remove all packages.
4050 self._cp_map.clear()
4051 self._cpv_map.clear()
4054 obj = PackageVirtualDbapi(self.settings)
4055 obj._match_cache = self._match_cache.copy()
4056 obj._cp_map = self._cp_map.copy()
4057 for k, v in obj._cp_map.iteritems():
4058 obj._cp_map[k] = v[:]
4059 obj._cpv_map = self._cpv_map.copy()
4063 return self._cpv_map.itervalues()
4065 def __contains__(self, item):
4066 existing = self._cpv_map.get(item.cpv)
4067 if existing is not None and \
4072 def get(self, item, default=None):
4073 cpv = getattr(item, "cpv", None)
4077 type_name, root, cpv, operation = item
4079 existing = self._cpv_map.get(cpv)
4080 if existing is not None and \
4085 def match_pkgs(self, atom):
4086 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4088 def _clear_cache(self):
4089 if self._categories is not None:
4090 self._categories = None
4091 if self._match_cache:
4092 self._match_cache = {}
4094 def match(self, origdep, use_cache=1):
4095 result = self._match_cache.get(origdep)
4096 if result is not None:
4098 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4099 self._match_cache[origdep] = result
4102 def cpv_exists(self, cpv):
4103 return cpv in self._cpv_map
4105 def cp_list(self, mycp, use_cache=1):
4106 cachelist = self._match_cache.get(mycp)
4107 # cp_list() doesn't expand old-style virtuals
4108 if cachelist and cachelist[0].startswith(mycp):
4110 cpv_list = self._cp_map.get(mycp)
4111 if cpv_list is None:
4114 cpv_list = [pkg.cpv for pkg in cpv_list]
4115 self._cpv_sort_ascending(cpv_list)
4116 if not (not cpv_list and mycp.startswith("virtual/")):
4117 self._match_cache[mycp] = cpv_list
4121 return list(self._cp_map)
4124 return list(self._cpv_map)
4126 def cpv_inject(self, pkg):
4127 cp_list = self._cp_map.get(pkg.cp)
4130 self._cp_map[pkg.cp] = cp_list
4131 e_pkg = self._cpv_map.get(pkg.cpv)
4132 if e_pkg is not None:
4135 self.cpv_remove(e_pkg)
4136 for e_pkg in cp_list:
4137 if e_pkg.slot_atom == pkg.slot_atom:
4140 self.cpv_remove(e_pkg)
4143 self._cpv_map[pkg.cpv] = pkg
4146 def cpv_remove(self, pkg):
4147 old_pkg = self._cpv_map.get(pkg.cpv)
4150 self._cp_map[pkg.cp].remove(pkg)
4151 del self._cpv_map[pkg.cpv]
4154 def aux_get(self, cpv, wants):
4155 metadata = self._cpv_map[cpv].metadata
4156 return [metadata.get(x, "") for x in wants]
4158 def aux_update(self, cpv, values):
4159 self._cpv_map[cpv].metadata.update(values)
4162 class depgraph(object):
4164 pkg_tree_map = RootConfig.pkg_tree_map
4166 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4168 def __init__(self, settings, trees, myopts, myparams, spinner):
4169 self.settings = settings
4170 self.target_root = settings["ROOT"]
4171 self.myopts = myopts
4172 self.myparams = myparams
4174 if settings.get("PORTAGE_DEBUG", "") == "1":
4176 self.spinner = spinner
4177 self._running_root = trees["/"]["root_config"]
4178 self._opts_no_restart = Scheduler._opts_no_restart
4179 self.pkgsettings = {}
4180 # Maps slot atom to package for each Package added to the graph.
4181 self._slot_pkg_map = {}
4182 # Maps nodes to the reasons they were selected for reinstallation.
4183 self._reinstall_nodes = {}
4186 self._trees_orig = trees
4188 # Contains a filtered view of preferred packages that are selected
4189 # from available repositories.
4190 self._filtered_trees = {}
4191 # Contains installed packages and new packages that have been added
4193 self._graph_trees = {}
4194 # All Package instances
4195 self._pkg_cache = self._package_cache(self)
4196 for myroot in trees:
4197 self.trees[myroot] = {}
4198 # Create a RootConfig instance that references
4199 # the FakeVartree instead of the real one.
4200 self.roots[myroot] = RootConfig(
4201 trees[myroot]["vartree"].settings,
4203 trees[myroot]["root_config"].setconfig)
4204 for tree in ("porttree", "bintree"):
4205 self.trees[myroot][tree] = trees[myroot][tree]
4206 self.trees[myroot]["vartree"] = \
4207 FakeVartree(trees[myroot]["root_config"],
4208 pkg_cache=self._pkg_cache)
4209 self.pkgsettings[myroot] = portage.config(
4210 clone=self.trees[myroot]["vartree"].settings)
4211 self._slot_pkg_map[myroot] = {}
4212 vardb = self.trees[myroot]["vartree"].dbapi
4213 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4214 "--buildpkgonly" not in self.myopts
4215 # This fakedbapi instance will model the state that the vdb will
4216 # have after new packages have been installed.
4217 fakedb = PackageVirtualDbapi(vardb.settings)
4218 if preload_installed_pkgs:
4220 self.spinner.update()
4221 # This triggers metadata updates via FakeVartree.
4222 vardb.aux_get(pkg.cpv, [])
4223 fakedb.cpv_inject(pkg)
4225 # Now that the vardb state is cached in our FakeVartree,
4226 # we won't be needing the real vartree cache for awhile.
4227 # To make some room on the heap, clear the vardbapi
4229 trees[myroot]["vartree"].dbapi._clear_cache()
4232 self.mydbapi[myroot] = fakedb
4235 graph_tree.dbapi = fakedb
4236 self._graph_trees[myroot] = {}
4237 self._filtered_trees[myroot] = {}
4238 # Substitute the graph tree for the vartree in dep_check() since we
4239 # want atom selections to be consistent with package selections
4240 # have already been made.
4241 self._graph_trees[myroot]["porttree"] = graph_tree
4242 self._graph_trees[myroot]["vartree"] = graph_tree
4243 def filtered_tree():
4245 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4246 self._filtered_trees[myroot]["porttree"] = filtered_tree
4248 # Passing in graph_tree as the vartree here could lead to better
4249 # atom selections in some cases by causing atoms for packages that
4250 # have been added to the graph to be preferred over other choices.
4251 # However, it can trigger atom selections that result in
4252 # unresolvable direct circular dependencies. For example, this
4253 # happens with gwydion-dylan which depends on either itself or
4254 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4255 # gwydion-dylan-bin needs to be selected in order to avoid a
4256 # an unresolvable direct circular dependency.
4258 # To solve the problem described above, pass in "graph_db" so that
4259 # packages that have been added to the graph are distinguishable
4260 # from other available packages and installed packages. Also, pass
4261 # the parent package into self._select_atoms() calls so that
4262 # unresolvable direct circular dependencies can be detected and
4263 # avoided when possible.
4264 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4265 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4268 portdb = self.trees[myroot]["porttree"].dbapi
4269 bindb = self.trees[myroot]["bintree"].dbapi
4270 vardb = self.trees[myroot]["vartree"].dbapi
4271 # (db, pkg_type, built, installed, db_keys)
4272 if "--usepkgonly" not in self.myopts:
4273 db_keys = list(portdb._aux_cache_keys)
4274 dbs.append((portdb, "ebuild", False, False, db_keys))
4275 if "--usepkg" in self.myopts:
4276 db_keys = list(bindb._aux_cache_keys)
4277 dbs.append((bindb, "binary", True, False, db_keys))
4278 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4279 dbs.append((vardb, "installed", True, True, db_keys))
4280 self._filtered_trees[myroot]["dbs"] = dbs
4281 if "--usepkg" in self.myopts:
4282 self.trees[myroot]["bintree"].populate(
4283 "--getbinpkg" in self.myopts,
4284 "--getbinpkgonly" in self.myopts)
4287 self.digraph=portage.digraph()
4288 # contains all sets added to the graph
4290 # contains atoms given as arguments
4291 self._sets["args"] = InternalPackageSet()
4292 # contains all atoms from all sets added to the graph, including
4293 # atoms given as arguments
4294 self._set_atoms = InternalPackageSet()
4295 self._atom_arg_map = {}
4296 # contains all nodes pulled in by self._set_atoms
4297 self._set_nodes = set()
4298 # Contains only Blocker -> Uninstall edges
4299 self._blocker_uninstalls = digraph()
4300 # Contains only Package -> Blocker edges
4301 self._blocker_parents = digraph()
4302 # Contains only irrelevant Package -> Blocker edges
4303 self._irrelevant_blockers = digraph()
4304 # Contains only unsolvable Package -> Blocker edges
4305 self._unsolvable_blockers = digraph()
4306 self._slot_collision_info = {}
4307 # Slot collision nodes are not allowed to block other packages since
4308 # blocker validation is only able to account for one package per slot.
4309 self._slot_collision_nodes = set()
4310 self._parent_atoms = {}
4311 self._slot_conflict_parent_atoms = set()
4312 self._serialized_tasks_cache = None
4313 self._scheduler_graph = None
4314 self._displayed_list = None
4315 self._pprovided_args = []
4316 self._missing_args = []
4317 self._masked_installed = set()
4318 self._unsatisfied_deps_for_display = []
4319 self._unsatisfied_blockers_for_display = None
4320 self._circular_deps_for_display = None
4321 self._dep_stack = []
4322 self._unsatisfied_deps = []
4323 self._initially_unsatisfied_deps = []
4324 self._ignored_deps = []
4325 self._required_set_names = set(["system", "world"])
4326 self._select_atoms = self._select_atoms_highest_available
4327 self._select_package = self._select_pkg_highest_available
4328 self._highest_pkg_cache = {}
4330 def _show_slot_collision_notice(self):
4331 """Show an informational message advising the user to mask one of the
4332 the packages. In some cases it may be possible to resolve this
4333 automatically, but support for backtracking (removal nodes that have
4334 already been selected) will be required in order to handle all possible
4338 if not self._slot_collision_info:
4341 self._show_merge_list()
4344 msg.append("\n!!! Multiple package instances within a single " + \
4345 "package slot have been pulled\n")
4346 msg.append("!!! into the dependency graph, resulting" + \
4347 " in a slot conflict:\n\n")
4349 # Max number of parents shown, to avoid flooding the display.
4351 explanation_columns = 70
4353 for (slot_atom, root), slot_nodes \
4354 in self._slot_collision_info.iteritems():
4355 msg.append(str(slot_atom))
4358 for node in slot_nodes:
4360 msg.append(str(node))
4361 parent_atoms = self._parent_atoms.get(node)
4364 # Prefer conflict atoms over others.
4365 for parent_atom in parent_atoms:
4366 if len(pruned_list) >= max_parents:
4368 if parent_atom in self._slot_conflict_parent_atoms:
4369 pruned_list.add(parent_atom)
4371 # If this package was pulled in by conflict atoms then
4372 # show those alone since those are the most interesting.
4374 # When generating the pruned list, prefer instances
4375 # of DependencyArg over instances of Package.
4376 for parent_atom in parent_atoms:
4377 if len(pruned_list) >= max_parents:
4379 parent, atom = parent_atom
4380 if isinstance(parent, DependencyArg):
4381 pruned_list.add(parent_atom)
4382 # Prefer Packages instances that themselves have been
4383 # pulled into collision slots.
4384 for parent_atom in parent_atoms:
4385 if len(pruned_list) >= max_parents:
4387 parent, atom = parent_atom
4388 if isinstance(parent, Package) and \
4389 (parent.slot_atom, parent.root) \
4390 in self._slot_collision_info:
4391 pruned_list.add(parent_atom)
4392 for parent_atom in parent_atoms:
4393 if len(pruned_list) >= max_parents:
4395 pruned_list.add(parent_atom)
4396 omitted_parents = len(parent_atoms) - len(pruned_list)
4397 parent_atoms = pruned_list
4398 msg.append(" pulled in by\n")
4399 for parent_atom in parent_atoms:
4400 parent, atom = parent_atom
4401 msg.append(2*indent)
4402 if isinstance(parent,
4403 (PackageArg, AtomArg)):
4404 # For PackageArg and AtomArg types, it's
4405 # redundant to display the atom attribute.
4406 msg.append(str(parent))
4408 # Display the specific atom from SetArg or
4410 msg.append("%s required by %s" % (atom, parent))
4413 msg.append(2*indent)
4414 msg.append("(and %d more)\n" % omitted_parents)
4416 msg.append(" (no parents)\n")
4418 explanation = self._slot_conflict_explanation(slot_nodes)
4421 msg.append(indent + "Explanation:\n\n")
4422 for line in textwrap.wrap(explanation, explanation_columns):
4423 msg.append(2*indent + line + "\n")
4426 sys.stderr.write("".join(msg))
4429 explanations_for_all = explanations == len(self._slot_collision_info)
4431 if explanations_for_all or "--quiet" in self.myopts:
4435 msg.append("It may be possible to solve this problem ")
4436 msg.append("by using package.mask to prevent one of ")
4437 msg.append("those packages from being selected. ")
4438 msg.append("However, it is also possible that conflicting ")
4439 msg.append("dependencies exist such that they are impossible to ")
4440 msg.append("satisfy simultaneously. If such a conflict exists in ")
4441 msg.append("the dependencies of two different packages, then those ")
4442 msg.append("packages can not be installed simultaneously.")
4444 from formatter import AbstractFormatter, DumbWriter
4445 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4447 f.add_flowing_data(x)
4451 msg.append("For more information, see MASKED PACKAGES ")
4452 msg.append("section in the emerge man page or refer ")
4453 msg.append("to the Gentoo Handbook.")
4455 f.add_flowing_data(x)
4459 def _slot_conflict_explanation(self, slot_nodes):
4461 When a slot conflict occurs due to USE deps, there are a few
4462 different cases to consider:
4464 1) New USE are correctly set but --newuse wasn't requested so an
4465 installed package with incorrect USE happened to get pulled
4466 into graph before the new one.
4468 2) New USE are incorrectly set but an installed package has correct
4469 USE so it got pulled into the graph, and a new instance also got
4470 pulled in due to --newuse or an upgrade.
4472 3) Multiple USE deps exist that can't be satisfied simultaneously,
4473 and multiple package instances got pulled into the same slot to
4474 satisfy the conflicting deps.
4476 Currently, explanations and suggested courses of action are generated
4477 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4480 if len(slot_nodes) != 2:
4481 # Suggestions are only implemented for
4482 # conflicts between two packages.
4485 all_conflict_atoms = self._slot_conflict_parent_atoms
4487 matched_atoms = None
4488 unmatched_node = None
4489 for node in slot_nodes:
4490 parent_atoms = self._parent_atoms.get(node)
4491 if not parent_atoms:
4492 # Normally, there are always parent atoms. If there are
4493 # none then something unexpected is happening and there's
4494 # currently no suggestion for this case.
4496 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4497 for parent_atom in conflict_atoms:
4498 parent, atom = parent_atom
4500 # Suggestions are currently only implemented for cases
4501 # in which all conflict atoms have USE deps.
4504 if matched_node is not None:
4505 # If conflict atoms match multiple nodes
4506 # then there's no suggestion.
4509 matched_atoms = conflict_atoms
4511 if unmatched_node is not None:
4512 # Neither node is matched by conflict atoms, and
4513 # there is no suggestion for this case.
4515 unmatched_node = node
4517 if matched_node is None or unmatched_node is None:
4518 # This shouldn't happen.
4521 if unmatched_node.installed and not matched_node.installed:
4522 return "New USE are correctly set, but --newuse wasn't" + \
4523 " requested, so an installed package with incorrect USE " + \
4524 "happened to get pulled into the dependency graph. " + \
4525 "In order to solve " + \
4526 "this, either specify the --newuse option or explicitly " + \
4527 " reinstall '%s'." % matched_node.slot_atom
4529 if matched_node.installed and not unmatched_node.installed:
4530 atoms = sorted(set(atom for parent, atom in matched_atoms))
4531 explanation = ("New USE for '%s' are incorrectly set. " + \
4532 "In order to solve this, adjust USE to satisfy '%s'") % \
4533 (matched_node.slot_atom, atoms[0])
4535 for atom in atoms[1:-1]:
4536 explanation += ", '%s'" % (atom,)
4539 explanation += " and '%s'" % (atoms[-1],)
4545 def _process_slot_conflicts(self):
4547 Process slot conflict data to identify specific atoms which
4548 lead to conflict. These atoms only match a subset of the
4549 packages that have been pulled into a given slot.
4551 for (slot_atom, root), slot_nodes \
4552 in self._slot_collision_info.iteritems():
4554 all_parent_atoms = set()
4555 for pkg in slot_nodes:
4556 parent_atoms = self._parent_atoms.get(pkg)
4557 if not parent_atoms:
4559 all_parent_atoms.update(parent_atoms)
4561 for pkg in slot_nodes:
4562 parent_atoms = self._parent_atoms.get(pkg)
4563 if parent_atoms is None:
4564 parent_atoms = set()
4565 self._parent_atoms[pkg] = parent_atoms
4566 for parent_atom in all_parent_atoms:
4567 if parent_atom in parent_atoms:
4569 # Use package set for matching since it will match via
4570 # PROVIDE when necessary, while match_from_list does not.
4571 parent, atom = parent_atom
4572 atom_set = InternalPackageSet(
4573 initial_atoms=(atom,))
4574 if atom_set.findAtomForPackage(pkg):
4575 parent_atoms.add(parent_atom)
4577 self._slot_conflict_parent_atoms.add(parent_atom)
4579 def _reinstall_for_flags(self, forced_flags,
4580 orig_use, orig_iuse, cur_use, cur_iuse):
4581 """Return a set of flags that trigger reinstallation, or None if there
4582 are no such flags."""
4583 if "--newuse" in self.myopts:
4584 flags = set(orig_iuse.symmetric_difference(
4585 cur_iuse).difference(forced_flags))
4586 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4587 cur_iuse.intersection(cur_use)))
4590 elif "changed-use" == self.myopts.get("--reinstall"):
4591 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4592 cur_iuse.intersection(cur_use))
4597 def _create_graph(self, allow_unsatisfied=False):
4598 dep_stack = self._dep_stack
4600 self.spinner.update()
4601 dep = dep_stack.pop()
4602 if isinstance(dep, Package):
4603 if not self._add_pkg_deps(dep,
4604 allow_unsatisfied=allow_unsatisfied):
4607 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4611 def _add_dep(self, dep, allow_unsatisfied=False):
4612 debug = "--debug" in self.myopts
4613 buildpkgonly = "--buildpkgonly" in self.myopts
4614 nodeps = "--nodeps" in self.myopts
4615 empty = "empty" in self.myparams
4616 deep = "deep" in self.myparams
4617 update = "--update" in self.myopts and dep.depth <= 1
4619 if not buildpkgonly and \
4621 dep.parent not in self._slot_collision_nodes:
4622 if dep.parent.onlydeps:
4623 # It's safe to ignore blockers if the
4624 # parent is an --onlydeps node.
4626 # The blocker applies to the root where
4627 # the parent is or will be installed.
4628 blocker = Blocker(atom=dep.atom,
4629 eapi=dep.parent.metadata["EAPI"],
4630 root=dep.parent.root)
4631 self._blocker_parents.add(blocker, dep.parent)
4633 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4634 onlydeps=dep.onlydeps)
4636 if allow_unsatisfied:
4637 self._unsatisfied_deps.append(dep)
4639 self._unsatisfied_deps_for_display.append(
4640 ((dep.root, dep.atom), {"myparent":dep.parent}))
4642 # In some cases, dep_check will return deps that shouldn't
4643 # be proccessed any further, so they are identified and
4644 # discarded here. Try to discard as few as possible since
4645 # discarded dependencies reduce the amount of information
4646 # available for optimization of merge order.
4647 if dep.priority.satisfied and \
4648 not (existing_node or empty or deep or update):
4650 if dep.root == self.target_root:
4652 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4653 except StopIteration:
4655 except portage.exception.InvalidDependString:
4656 if not dep_pkg.installed:
4657 # This shouldn't happen since the package
4658 # should have been masked.
4661 self._ignored_deps.append(dep)
4664 if not self._add_pkg(dep_pkg, dep):
4668 def _add_pkg(self, pkg, dep):
4675 myparent = dep.parent
4676 priority = dep.priority
4678 if priority is None:
4679 priority = DepPriority()
4681 Fills the digraph with nodes comprised of packages to merge.
4682 mybigkey is the package spec of the package to merge.
4683 myparent is the package depending on mybigkey ( or None )
4684 addme = Should we add this package to the digraph or are we just looking at it's deps?
4685 Think --onlydeps, we need to ignore packages in that case.
4688 #IUSE-aware emerge -> USE DEP aware depgraph
4689 #"no downgrade" emerge
4691 # Ensure that the dependencies of the same package
4692 # are never processed more than once.
4693 previously_added = pkg in self.digraph
4695 # select the correct /var database that we'll be checking against
4696 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4697 pkgsettings = self.pkgsettings[pkg.root]
4702 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4703 except portage.exception.InvalidDependString, e:
4704 if not pkg.installed:
4705 show_invalid_depstring_notice(
4706 pkg, pkg.metadata["PROVIDE"], str(e))
4710 if not pkg.onlydeps:
4711 if not pkg.installed and \
4712 "empty" not in self.myparams and \
4713 vardbapi.match(pkg.slot_atom):
4714 # Increase the priority of dependencies on packages that
4715 # are being rebuilt. This optimizes merge order so that
4716 # dependencies are rebuilt/updated as soon as possible,
4717 # which is needed especially when emerge is called by
4718 # revdep-rebuild since dependencies may be affected by ABI
4719 # breakage that has rendered them useless. Don't adjust
4720 # priority here when in "empty" mode since all packages
4721 # are being merged in that case.
4722 priority.rebuild = True
4724 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4725 slot_collision = False
4727 existing_node_matches = pkg.cpv == existing_node.cpv
4728 if existing_node_matches and \
4729 pkg != existing_node and \
4730 dep.atom is not None:
4731 # Use package set for matching since it will match via
4732 # PROVIDE when necessary, while match_from_list does not.
4733 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4734 if not atom_set.findAtomForPackage(existing_node):
4735 existing_node_matches = False
4736 if existing_node_matches:
4737 # The existing node can be reused.
4739 for parent_atom in arg_atoms:
4740 parent, atom = parent_atom
4741 self.digraph.add(existing_node, parent,
4743 self._add_parent_atom(existing_node, parent_atom)
4744 # If a direct circular dependency is not an unsatisfied
4745 # buildtime dependency then drop it here since otherwise
4746 # it can skew the merge order calculation in an unwanted
4748 if existing_node != myparent or \
4749 (priority.buildtime and not priority.satisfied):
4750 self.digraph.addnode(existing_node, myparent,
4752 if dep.atom is not None and dep.parent is not None:
4753 self._add_parent_atom(existing_node,
4754 (dep.parent, dep.atom))
4758 # A slot collision has occurred. Sometimes this coincides
4759 # with unresolvable blockers, so the slot collision will be
4760 # shown later if there are no unresolvable blockers.
4761 self._add_slot_conflict(pkg)
4762 slot_collision = True
4765 # Now add this node to the graph so that self.display()
4766 # can show use flags and --tree portage.output. This node is
4767 # only being partially added to the graph. It must not be
4768 # allowed to interfere with the other nodes that have been
4769 # added. Do not overwrite data for existing nodes in
4770 # self.mydbapi since that data will be used for blocker
4772 # Even though the graph is now invalid, continue to process
4773 # dependencies so that things like --fetchonly can still
4774 # function despite collisions.
4777 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4778 self.mydbapi[pkg.root].cpv_inject(pkg)
4780 if not pkg.installed:
4781 # Allow this package to satisfy old-style virtuals in case it
4782 # doesn't already. Any pre-existing providers will be preferred
4785 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4786 # For consistency, also update the global virtuals.
4787 settings = self.roots[pkg.root].settings
4789 settings.setinst(pkg.cpv, pkg.metadata)
4791 except portage.exception.InvalidDependString, e:
4792 show_invalid_depstring_notice(
4793 pkg, pkg.metadata["PROVIDE"], str(e))
4798 self._set_nodes.add(pkg)
4800 # Do this even when addme is False (--onlydeps) so that the
4801 # parent/child relationship is always known in case
4802 # self._show_slot_collision_notice() needs to be called later.
4803 self.digraph.add(pkg, myparent, priority=priority)
4804 if dep.atom is not None and dep.parent is not None:
4805 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4808 for parent_atom in arg_atoms:
4809 parent, atom = parent_atom
4810 self.digraph.add(pkg, parent, priority=priority)
4811 self._add_parent_atom(pkg, parent_atom)
4813 """ This section determines whether we go deeper into dependencies or not.
4814 We want to go deeper on a few occasions:
4815 Installing package A, we need to make sure package A's deps are met.
4816 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4817 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4819 dep_stack = self._dep_stack
4820 if "recurse" not in self.myparams:
4822 elif pkg.installed and \
4823 "deep" not in self.myparams:
4824 dep_stack = self._ignored_deps
4826 self.spinner.update()
4831 if not previously_added:
4832 dep_stack.append(pkg)
4835 def _add_parent_atom(self, pkg, parent_atom):
4836 parent_atoms = self._parent_atoms.get(pkg)
4837 if parent_atoms is None:
4838 parent_atoms = set()
4839 self._parent_atoms[pkg] = parent_atoms
4840 parent_atoms.add(parent_atom)
4842 def _add_slot_conflict(self, pkg):
4843 self._slot_collision_nodes.add(pkg)
4844 slot_key = (pkg.slot_atom, pkg.root)
4845 slot_nodes = self._slot_collision_info.get(slot_key)
4846 if slot_nodes is None:
4848 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
4849 self._slot_collision_info[slot_key] = slot_nodes
4852 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4854 mytype = pkg.type_name
4857 metadata = pkg.metadata
4858 myuse = pkg.use.enabled
4860 depth = pkg.depth + 1
4861 removal_action = "remove" in self.myparams
4864 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4866 edepend[k] = metadata[k]
4868 if not pkg.built and \
4869 "--buildpkgonly" in self.myopts and \
4870 "deep" not in self.myparams and \
4871 "empty" not in self.myparams:
4872 edepend["RDEPEND"] = ""
4873 edepend["PDEPEND"] = ""
4874 bdeps_satisfied = False
4876 if pkg.built and not removal_action:
4877 if self.myopts.get("--with-bdeps", "n") == "y":
4878 # Pull in build time deps as requested, but marked them as
4879 # "satisfied" since they are not strictly required. This allows
4880 # more freedom in the merge order calculation for solving
4881 # circular dependencies. Don't convert to PDEPEND since that
4882 # could make --with-bdeps=y less effective if it is used to
4883 # adjust merge order to prevent built_with_use() calls from
4885 bdeps_satisfied = True
4887 # built packages do not have build time dependencies.
4888 edepend["DEPEND"] = ""
4890 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4891 edepend["DEPEND"] = ""
4894 ("/", edepend["DEPEND"],
4895 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4896 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4897 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4900 debug = "--debug" in self.myopts
4901 strict = mytype != "installed"
4903 for dep_root, dep_string, dep_priority in deps:
4905 # Decrease priority so that --buildpkgonly
4906 # hasallzeros() works correctly.
4907 dep_priority = DepPriority()
4912 print "Parent: ", jbigkey
4913 print "Depstring:", dep_string
4914 print "Priority:", dep_priority
4915 vardb = self.roots[dep_root].trees["vartree"].dbapi
4917 selected_atoms = self._select_atoms(dep_root,
4918 dep_string, myuse=myuse, parent=pkg, strict=strict)
4919 except portage.exception.InvalidDependString, e:
4920 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4923 print "Candidates:", selected_atoms
4925 for atom in selected_atoms:
4928 atom = portage.dep.Atom(atom)
4930 mypriority = dep_priority.copy()
4931 if not atom.blocker and vardb.match(atom):
4932 mypriority.satisfied = True
4934 if not self._add_dep(Dependency(atom=atom,
4935 blocker=atom.blocker, depth=depth, parent=pkg,
4936 priority=mypriority, root=dep_root),
4937 allow_unsatisfied=allow_unsatisfied):
4940 except portage.exception.InvalidAtom, e:
4941 show_invalid_depstring_notice(
4942 pkg, dep_string, str(e))
4944 if not pkg.installed:
4948 print "Exiting...", jbigkey
4949 except portage.exception.AmbiguousPackageName, e:
4951 portage.writemsg("\n\n!!! An atom in the dependencies " + \
4952 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4954 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
4955 portage.writemsg("\n", noiselevel=-1)
4956 if mytype == "binary":
4958 "!!! This binary package cannot be installed: '%s'\n" % \
4959 mykey, noiselevel=-1)
4960 elif mytype == "ebuild":
4961 portdb = self.roots[myroot].trees["porttree"].dbapi
4962 myebuild, mylocation = portdb.findname2(mykey)
4963 portage.writemsg("!!! This ebuild cannot be installed: " + \
4964 "'%s'\n" % myebuild, noiselevel=-1)
4965 portage.writemsg("!!! Please notify the package maintainer " + \
4966 "that atoms must be fully-qualified.\n", noiselevel=-1)
4970 def _priority(self, **kwargs):
4971 if "remove" in self.myparams:
4972 priority_constructor = UnmergeDepPriority
4974 priority_constructor = DepPriority
4975 return priority_constructor(**kwargs)
4977 def _dep_expand(self, root_config, atom_without_category):
4979 @param root_config: a root config instance
4980 @type root_config: RootConfig
4981 @param atom_without_category: an atom without a category component
4982 @type atom_without_category: String
4984 @returns: a list of atoms containing categories (possibly empty)
4986 null_cp = portage.dep_getkey(insert_category_into_atom(
4987 atom_without_category, "null"))
4988 cat, atom_pn = portage.catsplit(null_cp)
4991 for db, pkg_type, built, installed, db_keys in \
4992 self._filtered_trees[root_config.root]["dbs"]:
4993 cp_set.update(db.cp_all())
4994 for cp in list(cp_set):
4995 cat, pn = portage.catsplit(cp)
5000 cat, pn = portage.catsplit(cp)
5001 deps.append(insert_category_into_atom(
5002 atom_without_category, cat))
5005 def _have_new_virt(self, root, atom_cp):
5007 for db, pkg_type, built, installed, db_keys in \
5008 self._filtered_trees[root]["dbs"]:
5009 if db.cp_list(atom_cp):
5014 def _iter_atoms_for_pkg(self, pkg):
5015 # TODO: add multiple $ROOT support
5016 if pkg.root != self.target_root:
5018 atom_arg_map = self._atom_arg_map
5019 root_config = self.roots[pkg.root]
5020 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5021 atom_cp = portage.dep_getkey(atom)
5022 if atom_cp != pkg.cp and \
5023 self._have_new_virt(pkg.root, atom_cp):
5025 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5026 visible_pkgs.reverse() # descending order
5028 for visible_pkg in visible_pkgs:
5029 if visible_pkg.cp != atom_cp:
5031 if pkg >= visible_pkg:
5032 # This is descending order, and we're not
5033 # interested in any versions <= pkg given.
5035 if pkg.slot_atom != visible_pkg.slot_atom:
5036 higher_slot = visible_pkg
5038 if higher_slot is not None:
5040 for arg in atom_arg_map[(atom, pkg.root)]:
5041 if isinstance(arg, PackageArg) and \
5046 def select_files(self, myfiles):
5047 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5048 appropriate depgraph and return a favorite list."""
5049 debug = "--debug" in self.myopts
5050 root_config = self.roots[self.target_root]
5051 sets = root_config.sets
5052 getSetAtoms = root_config.setconfig.getSetAtoms
5054 myroot = self.target_root
5055 dbs = self._filtered_trees[myroot]["dbs"]
5056 vardb = self.trees[myroot]["vartree"].dbapi
5057 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5058 portdb = self.trees[myroot]["porttree"].dbapi
5059 bindb = self.trees[myroot]["bintree"].dbapi
5060 pkgsettings = self.pkgsettings[myroot]
5062 onlydeps = "--onlydeps" in self.myopts
5065 ext = os.path.splitext(x)[1]
5067 if not os.path.exists(x):
5069 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5070 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5071 elif os.path.exists(
5072 os.path.join(pkgsettings["PKGDIR"], x)):
5073 x = os.path.join(pkgsettings["PKGDIR"], x)
5075 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5076 print "!!! Please ensure the tbz2 exists as specified.\n"
5077 return 0, myfavorites
5078 mytbz2=portage.xpak.tbz2(x)
5079 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5080 if os.path.realpath(x) != \
5081 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5082 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5083 return 0, myfavorites
5084 db_keys = list(bindb._aux_cache_keys)
5085 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5086 pkg = Package(type_name="binary", root_config=root_config,
5087 cpv=mykey, built=True, metadata=metadata,
5089 self._pkg_cache[pkg] = pkg
5090 args.append(PackageArg(arg=x, package=pkg,
5091 root_config=root_config))
5092 elif ext==".ebuild":
5093 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5094 pkgdir = os.path.dirname(ebuild_path)
5095 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5096 cp = pkgdir[len(tree_root)+1:]
5097 e = portage.exception.PackageNotFound(
5098 ("%s is not in a valid portage tree " + \
5099 "hierarchy or does not exist") % x)
5100 if not portage.isvalidatom(cp):
5102 cat = portage.catsplit(cp)[0]
5103 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5104 if not portage.isvalidatom("="+mykey):
5106 ebuild_path = portdb.findname(mykey)
5108 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5109 cp, os.path.basename(ebuild_path)):
5110 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5111 return 0, myfavorites
5112 if mykey not in portdb.xmatch(
5113 "match-visible", portage.dep_getkey(mykey)):
5114 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5115 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5116 print colorize("BAD", "*** page for details.")
5117 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5120 raise portage.exception.PackageNotFound(
5121 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5122 db_keys = list(portdb._aux_cache_keys)
5123 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5124 pkg = Package(type_name="ebuild", root_config=root_config,
5125 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5126 pkgsettings.setcpv(pkg)
5127 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5128 self._pkg_cache[pkg] = pkg
5129 args.append(PackageArg(arg=x, package=pkg,
5130 root_config=root_config))
5131 elif x.startswith(os.path.sep):
5132 if not x.startswith(myroot):
5133 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5134 " $ROOT.\n") % x, noiselevel=-1)
5136 # Queue these up since it's most efficient to handle
5137 # multiple files in a single iter_owners() call.
5138 lookup_owners.append(x)
5140 if x in ("system", "world"):
5142 if x.startswith(SETPREFIX):
5143 s = x[len(SETPREFIX):]
5145 raise portage.exception.PackageSetNotFound(s)
5148 # Recursively expand sets so that containment tests in
5149 # self._get_parent_sets() properly match atoms in nested
5150 # sets (like if world contains system).
5151 expanded_set = InternalPackageSet(
5152 initial_atoms=getSetAtoms(s))
5153 self._sets[s] = expanded_set
5154 args.append(SetArg(arg=x, set=expanded_set,
5155 root_config=root_config))
5156 myfavorites.append(x)
5158 if not is_valid_package_atom(x):
5159 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5161 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5162 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5164 # Don't expand categories or old-style virtuals here unless
5165 # necessary. Expansion of old-style virtuals here causes at
5166 # least the following problems:
5167 # 1) It's more difficult to determine which set(s) an atom
5168 # came from, if any.
5169 # 2) It takes away freedom from the resolver to choose other
5170 # possible expansions when necessary.
5172 args.append(AtomArg(arg=x, atom=x,
5173 root_config=root_config))
5175 expanded_atoms = self._dep_expand(root_config, x)
5176 installed_cp_set = set()
5177 for atom in expanded_atoms:
5178 atom_cp = portage.dep_getkey(atom)
5179 if vardb.cp_list(atom_cp):
5180 installed_cp_set.add(atom_cp)
5181 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5182 installed_cp = iter(installed_cp_set).next()
5183 expanded_atoms = [atom for atom in expanded_atoms \
5184 if portage.dep_getkey(atom) == installed_cp]
5186 if len(expanded_atoms) > 1:
5189 ambiguous_package_name(x, expanded_atoms, root_config,
5190 self.spinner, self.myopts)
5191 return False, myfavorites
5193 atom = expanded_atoms[0]
5195 null_atom = insert_category_into_atom(x, "null")
5196 null_cp = portage.dep_getkey(null_atom)
5197 cat, atom_pn = portage.catsplit(null_cp)
5198 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5200 # Allow the depgraph to choose which virtual.
5201 atom = insert_category_into_atom(x, "virtual")
5203 atom = insert_category_into_atom(x, "null")
5205 args.append(AtomArg(arg=x, atom=atom,
5206 root_config=root_config))
5210 search_for_multiple = False
5211 if len(lookup_owners) > 1:
5212 search_for_multiple = True
5214 for x in lookup_owners:
5215 if not search_for_multiple and os.path.isdir(x):
5216 search_for_multiple = True
5217 relative_paths.append(x[len(myroot):])
5220 for pkg, relative_path in \
5221 real_vardb._owners.iter_owners(relative_paths):
5222 owners.add(pkg.mycpv)
5223 if not search_for_multiple:
5227 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5228 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5232 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5234 # portage now masks packages with missing slot, but it's
5235 # possible that one was installed by an older version
5236 atom = portage.cpv_getkey(cpv)
5238 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5239 args.append(AtomArg(arg=atom, atom=atom,
5240 root_config=root_config))
5242 if "--update" in self.myopts:
5243 # Enable greedy SLOT atoms for atoms given as arguments.
5244 # This is currently disabled for sets since greedy SLOT
5245 # atoms could be a property of the set itself.
5248 # In addition to any installed slots, also try to pull
5249 # in the latest new slot that may be available.
5250 greedy_atoms.append(arg)
5251 if not isinstance(arg, (AtomArg, PackageArg)):
5253 atom_cp = portage.dep_getkey(arg.atom)
5255 for cpv in vardb.match(arg.atom):
5256 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5258 greedy_atoms.append(
5259 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5260 root_config=root_config))
5264 # Create the "args" package set from atoms and
5265 # packages given as arguments.
5266 args_set = self._sets["args"]
5268 if not isinstance(arg, (AtomArg, PackageArg)):
5271 if myatom in args_set:
5273 args_set.add(myatom)
5274 myfavorites.append(myatom)
5275 self._set_atoms.update(chain(*self._sets.itervalues()))
5276 atom_arg_map = self._atom_arg_map
5278 for atom in arg.set:
5279 atom_key = (atom, myroot)
5280 refs = atom_arg_map.get(atom_key)
5283 atom_arg_map[atom_key] = refs
5286 pprovideddict = pkgsettings.pprovideddict
5288 portage.writemsg("\n", noiselevel=-1)
5289 # Order needs to be preserved since a feature of --nodeps
5290 # is to allow the user to force a specific merge order.
5294 for atom in arg.set:
5295 self.spinner.update()
5296 dep = Dependency(atom=atom, onlydeps=onlydeps,
5297 root=myroot, parent=arg)
5298 atom_cp = portage.dep_getkey(atom)
5300 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5301 if pprovided and portage.match_from_list(atom, pprovided):
5302 # A provided package has been specified on the command line.
5303 self._pprovided_args.append((arg, atom))
5305 if isinstance(arg, PackageArg):
5306 if not self._add_pkg(arg.package, dep) or \
5307 not self._create_graph():
5308 sys.stderr.write(("\n\n!!! Problem resolving " + \
5309 "dependencies for %s\n") % arg.arg)
5310 return 0, myfavorites
5313 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5314 (arg, atom), noiselevel=-1)
5315 pkg, existing_node = self._select_package(
5316 myroot, atom, onlydeps=onlydeps)
5318 if not (isinstance(arg, SetArg) and \
5319 arg.name in ("system", "world")):
5320 self._unsatisfied_deps_for_display.append(
5321 ((myroot, atom), {}))
5322 return 0, myfavorites
5323 self._missing_args.append((arg, atom))
5325 if atom_cp != pkg.cp:
5326 # For old-style virtuals, we need to repeat the
5327 # package.provided check against the selected package.
5328 expanded_atom = atom.replace(atom_cp, pkg.cp)
5329 pprovided = pprovideddict.get(pkg.cp)
5331 portage.match_from_list(expanded_atom, pprovided):
5332 # A provided package has been
5333 # specified on the command line.
5334 self._pprovided_args.append((arg, atom))
5336 if pkg.installed and "selective" not in self.myparams:
5337 self._unsatisfied_deps_for_display.append(
5338 ((myroot, atom), {}))
5339 # Previous behavior was to bail out in this case, but
5340 # since the dep is satisfied by the installed package,
5341 # it's more friendly to continue building the graph
5342 # and just show a warning message. Therefore, only bail
5343 # out here if the atom is not from either the system or
5345 if not (isinstance(arg, SetArg) and \
5346 arg.name in ("system", "world")):
5347 return 0, myfavorites
5349 # Add the selected package to the graph as soon as possible
5350 # so that later dep_check() calls can use it as feedback
5351 # for making more consistent atom selections.
5352 if not self._add_pkg(pkg, dep):
5353 if isinstance(arg, SetArg):
5354 sys.stderr.write(("\n\n!!! Problem resolving " + \
5355 "dependencies for %s from %s\n") % \
5358 sys.stderr.write(("\n\n!!! Problem resolving " + \
5359 "dependencies for %s\n") % atom)
5360 return 0, myfavorites
5362 except portage.exception.MissingSignature, e:
5363 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5364 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5365 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5366 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5367 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5368 return 0, myfavorites
5369 except portage.exception.InvalidSignature, e:
5370 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5371 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5372 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5373 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5374 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5375 return 0, myfavorites
5376 except SystemExit, e:
5377 raise # Needed else can't exit
5378 except Exception, e:
5379 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5380 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5383 # Now that the root packages have been added to the graph,
5384 # process the dependencies.
5385 if not self._create_graph():
5386 return 0, myfavorites
5389 if "--usepkgonly" in self.myopts:
5390 for xs in self.digraph.all_nodes():
5391 if not isinstance(xs, Package):
5393 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5397 print "Missing binary for:",xs[2]
5401 except self._unknown_internal_error:
5402 return False, myfavorites
5404 # We're true here unless we are missing binaries.
5405 return (not missing,myfavorites)
5407 def _select_atoms_from_graph(self, *pargs, **kwargs):
5409 Prefer atoms matching packages that have already been
5410 added to the graph or those that are installed and have
5411 not been scheduled for replacement.
5413 kwargs["trees"] = self._graph_trees
5414 return self._select_atoms_highest_available(*pargs, **kwargs)
5416 def _select_atoms_highest_available(self, root, depstring,
5417 myuse=None, parent=None, strict=True, trees=None):
5418 """This will raise InvalidDependString if necessary. If trees is
5419 None then self._filtered_trees is used."""
5420 pkgsettings = self.pkgsettings[root]
5422 trees = self._filtered_trees
5425 if parent is not None:
5426 trees[root]["parent"] = parent
5428 portage.dep._dep_check_strict = False
5429 mycheck = portage.dep_check(depstring, None,
5430 pkgsettings, myuse=myuse,
5431 myroot=root, trees=trees)
5433 if parent is not None:
5434 trees[root].pop("parent")
5435 portage.dep._dep_check_strict = True
5437 raise portage.exception.InvalidDependString(mycheck[1])
5438 selected_atoms = mycheck[1]
5439 return selected_atoms
5441 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5442 atom = portage.dep.Atom(atom)
5443 atom_set = InternalPackageSet(initial_atoms=(atom,))
5444 atom_without_use = atom
5446 atom_without_use = portage.dep.remove_slot(atom)
5448 atom_without_use += ":" + atom.slot
5449 atom_without_use = portage.dep.Atom(atom_without_use)
5450 xinfo = '"%s"' % atom
5453 # Discard null/ from failed cpv_expand category expansion.
5454 xinfo = xinfo.replace("null/", "")
5455 masked_packages = []
5457 missing_licenses = []
5458 have_eapi_mask = False
5459 pkgsettings = self.pkgsettings[root]
5460 implicit_iuse = pkgsettings._get_implicit_iuse()
5461 root_config = self.roots[root]
5462 portdb = self.roots[root].trees["porttree"].dbapi
5463 dbs = self._filtered_trees[root]["dbs"]
5464 for db, pkg_type, built, installed, db_keys in dbs:
5468 if hasattr(db, "xmatch"):
5469 cpv_list = db.xmatch("match-all", atom_without_use)
5471 cpv_list = db.match(atom_without_use)
5474 for cpv in cpv_list:
5475 metadata, mreasons = get_mask_info(root_config, cpv,
5476 pkgsettings, db, pkg_type, built, installed, db_keys)
5477 if metadata is not None:
5478 pkg = Package(built=built, cpv=cpv,
5479 installed=installed, metadata=metadata,
5480 root_config=root_config)
5481 if pkg.cp != atom.cp:
5482 # A cpv can be returned from dbapi.match() as an
5483 # old-style virtual match even in cases when the
5484 # package does not actually PROVIDE the virtual.
5485 # Filter out any such false matches here.
5486 if not atom_set.findAtomForPackage(pkg):
5488 if atom.use and not mreasons:
5489 missing_use.append(pkg)
5491 masked_packages.append(
5492 (root_config, pkgsettings, cpv, metadata, mreasons))
5494 missing_use_reasons = []
5495 missing_iuse_reasons = []
5496 for pkg in missing_use:
5497 use = pkg.use.enabled
5498 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5499 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5501 for x in atom.use.required:
5502 if iuse_re.match(x) is None:
5503 missing_iuse.append(x)
5506 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5507 missing_iuse_reasons.append((pkg, mreasons))
5509 need_enable = sorted(atom.use.enabled.difference(use))
5510 need_disable = sorted(atom.use.disabled.intersection(use))
5511 if need_enable or need_disable:
5513 changes.extend(colorize("red", "+" + x) \
5514 for x in need_enable)
5515 changes.extend(colorize("blue", "-" + x) \
5516 for x in need_disable)
5517 mreasons.append("Change USE: %s" % " ".join(changes))
5518 missing_use_reasons.append((pkg, mreasons))
5520 if missing_iuse_reasons and not missing_use_reasons:
5521 missing_use_reasons = missing_iuse_reasons
5522 elif missing_use_reasons:
5523 # Only show the latest version.
5524 del missing_use_reasons[1:]
5526 if missing_use_reasons:
5527 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5528 print "!!! One of the following packages is required to complete your request:"
5529 for pkg, mreasons in missing_use_reasons:
5530 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5532 elif masked_packages:
5534 colorize("BAD", "All ebuilds that could satisfy ") + \
5535 colorize("INFORM", xinfo) + \
5536 colorize("BAD", " have been masked.")
5537 print "!!! One of the following masked packages is required to complete your request:"
5538 have_eapi_mask = show_masked_packages(masked_packages)
5541 msg = ("The current version of portage supports " + \
5542 "EAPI '%s'. You must upgrade to a newer version" + \
5543 " of portage before EAPI masked packages can" + \
5544 " be installed.") % portage.const.EAPI
5545 from textwrap import wrap
5546 for line in wrap(msg, 75):
5551 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5553 # Show parent nodes and the argument that pulled them in.
5554 traversed_nodes = set()
5557 while node is not None:
5558 traversed_nodes.add(node)
5559 msg.append('(dependency required by "%s" [%s])' % \
5560 (colorize('INFORM', str(node.cpv)), node.type_name))
5561 # When traversing to parents, prefer arguments over packages
5562 # since arguments are root nodes. Never traverse the same
5563 # package twice, in order to prevent an infinite loop.
5564 selected_parent = None
5565 for parent in self.digraph.parent_nodes(node):
5566 if isinstance(parent, DependencyArg):
5567 msg.append('(dependency required by "%s" [argument])' % \
5568 (colorize('INFORM', str(parent))))
5569 selected_parent = None
5571 if parent not in traversed_nodes:
5572 selected_parent = parent
5573 node = selected_parent
5579 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5580 cache_key = (root, atom, onlydeps)
5581 ret = self._highest_pkg_cache.get(cache_key)
5584 if pkg and not existing:
5585 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5586 if existing and existing == pkg:
5587 # Update the cache to reflect that the
5588 # package has been added to the graph.
5590 self._highest_pkg_cache[cache_key] = ret
5592 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5593 self._highest_pkg_cache[cache_key] = ret
5596 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5597 root_config = self.roots[root]
5598 pkgsettings = self.pkgsettings[root]
5599 dbs = self._filtered_trees[root]["dbs"]
5600 vardb = self.roots[root].trees["vartree"].dbapi
5601 portdb = self.roots[root].trees["porttree"].dbapi
5602 # List of acceptable packages, ordered by type preference.
5603 matched_packages = []
5604 highest_version = None
5605 if not isinstance(atom, portage.dep.Atom):
5606 atom = portage.dep.Atom(atom)
5608 atom_set = InternalPackageSet(initial_atoms=(atom,))
5609 existing_node = None
5611 usepkgonly = "--usepkgonly" in self.myopts
5612 empty = "empty" in self.myparams
5613 selective = "selective" in self.myparams
5615 noreplace = "--noreplace" in self.myopts
5616 # Behavior of the "selective" parameter depends on
5617 # whether or not a package matches an argument atom.
5618 # If an installed package provides an old-style
5619 # virtual that is no longer provided by an available
5620 # package, the installed package may match an argument
5621 # atom even though none of the available packages do.
5622 # Therefore, "selective" logic does not consider
5623 # whether or not an installed package matches an
5624 # argument atom. It only considers whether or not
5625 # available packages match argument atoms, which is
5626 # represented by the found_available_arg flag.
5627 found_available_arg = False
5628 for find_existing_node in True, False:
5631 for db, pkg_type, built, installed, db_keys in dbs:
5634 if installed and not find_existing_node:
5635 want_reinstall = reinstall or empty or \
5636 (found_available_arg and not selective)
5637 if want_reinstall and matched_packages:
5639 if hasattr(db, "xmatch"):
5640 cpv_list = db.xmatch("match-all", atom)
5642 cpv_list = db.match(atom)
5644 # USE=multislot can make an installed package appear as if
5645 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5646 # won't do any good as long as USE=multislot is enabled since
5647 # the newly built package still won't have the expected slot.
5648 # Therefore, assume that such SLOT dependencies are already
5649 # satisfied rather than forcing a rebuild.
5650 if installed and not cpv_list and atom.slot:
5651 for cpv in db.match(atom.cp):
5652 slot_available = False
5653 for other_db, other_type, other_built, \
5654 other_installed, other_keys in dbs:
5657 other_db.aux_get(cpv, ["SLOT"])[0]:
5658 slot_available = True
5662 if not slot_available:
5664 inst_pkg = self._pkg(cpv, "installed",
5665 root_config, installed=installed)
5666 # Remove the slot from the atom and verify that
5667 # the package matches the resulting atom.
5668 atom_without_slot = portage.dep.remove_slot(atom)
5670 atom_without_slot += str(atom.use)
5671 atom_without_slot = portage.dep.Atom(atom_without_slot)
5672 if portage.match_from_list(
5673 atom_without_slot, [inst_pkg]):
5674 cpv_list = [inst_pkg.cpv]
5679 pkg_status = "merge"
5680 if installed or onlydeps:
5681 pkg_status = "nomerge"
5684 for cpv in cpv_list:
5685 # Make --noreplace take precedence over --newuse.
5686 if not installed and noreplace and \
5687 cpv in vardb.match(atom):
5688 # If the installed version is masked, it may
5689 # be necessary to look at lower versions,
5690 # in case there is a visible downgrade.
5692 reinstall_for_flags = None
5693 cache_key = (pkg_type, root, cpv, pkg_status)
5694 calculated_use = True
5695 pkg = self._pkg_cache.get(cache_key)
5697 calculated_use = False
5699 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5702 pkg = Package(built=built, cpv=cpv,
5703 installed=installed, metadata=metadata,
5704 onlydeps=onlydeps, root_config=root_config,
5706 metadata = pkg.metadata
5707 if not built and ("?" in metadata["LICENSE"] or \
5708 "?" in metadata["PROVIDE"]):
5709 # This is avoided whenever possible because
5710 # it's expensive. It only needs to be done here
5711 # if it has an effect on visibility.
5712 pkgsettings.setcpv(pkg)
5713 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5714 calculated_use = True
5715 self._pkg_cache[pkg] = pkg
5717 if not installed or (installed and matched_packages):
5718 # Only enforce visibility on installed packages
5719 # if there is at least one other visible package
5720 # available. By filtering installed masked packages
5721 # here, packages that have been masked since they
5722 # were installed can be automatically downgraded
5723 # to an unmasked version.
5725 if not visible(pkgsettings, pkg):
5727 except portage.exception.InvalidDependString:
5731 # Enable upgrade or downgrade to a version
5732 # with visible KEYWORDS when the installed
5733 # version is masked by KEYWORDS, but never
5734 # reinstall the same exact version only due
5735 # to a KEYWORDS mask.
5736 if installed and matched_packages and \
5737 pkgsettings._getMissingKeywords(
5738 pkg.cpv, pkg.metadata):
5739 different_version = None
5740 for avail_pkg in matched_packages:
5741 if not portage.dep.cpvequal(
5742 pkg.cpv, avail_pkg.cpv):
5743 different_version = avail_pkg
5745 if different_version is not None:
5746 # Only reinstall for KEYWORDS if
5747 # it's not the same version.
5750 if not pkg.built and not calculated_use:
5751 # This is avoided whenever possible because
5753 pkgsettings.setcpv(pkg)
5754 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5756 if pkg.cp != atom.cp:
5757 # A cpv can be returned from dbapi.match() as an
5758 # old-style virtual match even in cases when the
5759 # package does not actually PROVIDE the virtual.
5760 # Filter out any such false matches here.
5761 if not atom_set.findAtomForPackage(pkg):
5765 if root == self.target_root:
5767 # Ebuild USE must have been calculated prior
5768 # to this point, in case atoms have USE deps.
5769 myarg = self._iter_atoms_for_pkg(pkg).next()
5770 except StopIteration:
5772 except portage.exception.InvalidDependString:
5774 # masked by corruption
5776 if not installed and myarg:
5777 found_available_arg = True
5779 if atom.use and not pkg.built:
5780 use = pkg.use.enabled
5781 if atom.use.enabled.difference(use):
5783 if atom.use.disabled.intersection(use):
5785 if pkg.cp == atom_cp:
5786 if highest_version is None:
5787 highest_version = pkg
5788 elif pkg > highest_version:
5789 highest_version = pkg
5790 # At this point, we've found the highest visible
5791 # match from the current repo. Any lower versions
5792 # from this repo are ignored, so this so the loop
5793 # will always end with a break statement below
5795 if find_existing_node:
5796 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5799 if portage.dep.match_from_list(atom, [e_pkg]):
5800 if highest_version and \
5801 e_pkg.cp == atom_cp and \
5802 e_pkg < highest_version and \
5803 e_pkg.slot_atom != highest_version.slot_atom:
5804 # There is a higher version available in a
5805 # different slot, so this existing node is
5809 matched_packages.append(e_pkg)
5810 existing_node = e_pkg
5812 # Compare built package to current config and
5813 # reject the built package if necessary.
5814 if built and not installed and \
5815 ("--newuse" in self.myopts or \
5816 "--reinstall" in self.myopts):
5817 iuses = pkg.iuse.all
5818 old_use = pkg.use.enabled
5820 pkgsettings.setcpv(myeb)
5822 pkgsettings.setcpv(pkg)
5823 now_use = pkgsettings["PORTAGE_USE"].split()
5824 forced_flags = set()
5825 forced_flags.update(pkgsettings.useforce)
5826 forced_flags.update(pkgsettings.usemask)
5828 if myeb and not usepkgonly:
5829 cur_iuse = myeb.iuse.all
5830 if self._reinstall_for_flags(forced_flags,
5834 # Compare current config to installed package
5835 # and do not reinstall if possible.
5836 if not installed and \
5837 ("--newuse" in self.myopts or \
5838 "--reinstall" in self.myopts) and \
5839 cpv in vardb.match(atom):
5840 pkgsettings.setcpv(pkg)
5841 forced_flags = set()
5842 forced_flags.update(pkgsettings.useforce)
5843 forced_flags.update(pkgsettings.usemask)
5844 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5845 old_iuse = set(filter_iuse_defaults(
5846 vardb.aux_get(cpv, ["IUSE"])[0].split()))
5847 cur_use = pkgsettings["PORTAGE_USE"].split()
5848 cur_iuse = pkg.iuse.all
5849 reinstall_for_flags = \
5850 self._reinstall_for_flags(
5851 forced_flags, old_use, old_iuse,
5853 if reinstall_for_flags:
5857 matched_packages.append(pkg)
5858 if reinstall_for_flags:
5859 self._reinstall_nodes[pkg] = \
5863 if not matched_packages:
5866 if "--debug" in self.myopts:
5867 for pkg in matched_packages:
5868 portage.writemsg("%s %s\n" % \
5869 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5871 # Filter out any old-style virtual matches if they are
5872 # mixed with new-style virtual matches.
5873 cp = portage.dep_getkey(atom)
5874 if len(matched_packages) > 1 and \
5875 "virtual" == portage.catsplit(cp)[0]:
5876 for pkg in matched_packages:
5879 # Got a new-style virtual, so filter
5880 # out any old-style virtuals.
5881 matched_packages = [pkg for pkg in matched_packages \
5885 # If the installed version is in a different slot and it is higher than
5886 # the highest available visible package, _iter_atoms_for_pkg() may fail
5887 # to properly match the available package with a corresponding argument
5888 # atom. Detect this case and correct it here.
5889 if not selective and len(matched_packages) > 1 and \
5890 matched_packages[-1].installed and \
5891 matched_packages[-1].slot_atom != \
5892 matched_packages[-2].slot_atom and \
5893 matched_packages[-1] > matched_packages[-2]:
5894 pkg = matched_packages[-2]
5895 if pkg.root == self.target_root and \
5896 self._set_atoms.findAtomForPackage(pkg):
5897 # Select the available package instead
5898 # of the installed package.
5899 matched_packages.pop()
5901 if len(matched_packages) > 1:
5902 bestmatch = portage.best(
5903 [pkg.cpv for pkg in matched_packages])
5904 matched_packages = [pkg for pkg in matched_packages \
5905 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5907 # ordered by type preference ("ebuild" type is the last resort)
5908 return matched_packages[-1], existing_node
5910 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5912 Select packages that have already been added to the graph or
5913 those that are installed and have not been scheduled for
5916 graph_db = self._graph_trees[root]["porttree"].dbapi
5917 matches = graph_db.match(atom)
5920 cpv = matches[-1] # highest match
5921 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5922 graph_db.aux_get(cpv, ["SLOT"])[0])
5923 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5926 # Since this cpv exists in the graph_db,
5927 # we must have a cached Package instance.
5928 cache_key = ("installed", root, cpv, "nomerge")
5929 return (self._pkg_cache[cache_key], None)
5931 def _complete_graph(self):
5933 Add any deep dependencies of required sets (args, system, world) that
5934 have not been pulled into the graph yet. This ensures that the graph
5935 is consistent such that initially satisfied deep dependencies are not
5936 broken in the new graph. Initially unsatisfied dependencies are
5937 irrelevant since we only want to avoid breaking dependencies that are
5940 Since this method can consume enough time to disturb users, it is
5941 currently only enabled by the --complete-graph option.
5943 if "--buildpkgonly" in self.myopts or \
5944 "recurse" not in self.myparams:
5947 if "complete" not in self.myparams:
5948 # Skip this to avoid consuming enough time to disturb users.
5951 # Put the depgraph into a mode that causes it to only
5952 # select packages that have already been added to the
5953 # graph or those that are installed and have not been
5954 # scheduled for replacement. Also, toggle the "deep"
5955 # parameter so that all dependencies are traversed and
5957 self._select_atoms = self._select_atoms_from_graph
5958 self._select_package = self._select_pkg_from_graph
5959 already_deep = "deep" in self.myparams
5960 if not already_deep:
5961 self.myparams.add("deep")
5963 for root in self.roots:
5964 required_set_names = self._required_set_names.copy()
5965 if root == self.target_root and \
5966 (already_deep or "empty" in self.myparams):
5967 required_set_names.difference_update(self._sets)
5968 if not required_set_names and not self._ignored_deps:
5970 root_config = self.roots[root]
5971 setconfig = root_config.setconfig
5973 # Reuse existing SetArg instances when available.
5974 for arg in self.digraph.root_nodes():
5975 if not isinstance(arg, SetArg):
5977 if arg.root_config != root_config:
5979 if arg.name in required_set_names:
5981 required_set_names.remove(arg.name)
5982 # Create new SetArg instances only when necessary.
5983 for s in required_set_names:
5984 expanded_set = InternalPackageSet(
5985 initial_atoms=setconfig.getSetAtoms(s))
5986 atom = SETPREFIX + s
5987 args.append(SetArg(arg=atom, set=expanded_set,
5988 root_config=root_config))
5989 vardb = root_config.trees["vartree"].dbapi
5991 for atom in arg.set:
5992 self._dep_stack.append(
5993 Dependency(atom=atom, root=root, parent=arg))
5994 if self._ignored_deps:
5995 self._dep_stack.extend(self._ignored_deps)
5996 self._ignored_deps = []
5997 if not self._create_graph(allow_unsatisfied=True):
5999 # Check the unsatisfied deps to see if any initially satisfied deps
6000 # will become unsatisfied due to an upgrade. Initially unsatisfied
6001 # deps are irrelevant since we only want to avoid breaking deps
6002 # that are initially satisfied.
6003 while self._unsatisfied_deps:
6004 dep = self._unsatisfied_deps.pop()
6005 matches = vardb.match_pkgs(dep.atom)
6007 self._initially_unsatisfied_deps.append(dep)
6009 # An scheduled installation broke a deep dependency.
6010 # Add the installed package to the graph so that it
6011 # will be appropriately reported as a slot collision
6012 # (possibly solvable via backtracking).
6013 pkg = matches[-1] # highest match
6014 if not self._add_pkg(pkg, dep):
6016 if not self._create_graph(allow_unsatisfied=True):
6020 def _pkg(self, cpv, type_name, root_config, installed=False):
6022 Get a package instance from the cache, or create a new
6023 one if necessary. Raises KeyError from aux_get if it
6024 failures for some reason (package does not exist or is
6029 operation = "nomerge"
6030 pkg = self._pkg_cache.get(
6031 (type_name, root_config.root, cpv, operation))
6033 tree_type = self.pkg_tree_map[type_name]
6034 db = root_config.trees[tree_type].dbapi
6035 db_keys = list(self._trees_orig[root_config.root][
6036 tree_type].dbapi._aux_cache_keys)
6037 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6038 pkg = Package(cpv=cpv, metadata=metadata,
6039 root_config=root_config, installed=installed)
6040 if type_name == "ebuild":
6041 settings = self.pkgsettings[root_config.root]
6042 settings.setcpv(pkg)
6043 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6044 self._pkg_cache[pkg] = pkg
6047 def validate_blockers(self):
6048 """Remove any blockers from the digraph that do not match any of the
6049 packages within the graph. If necessary, create hard deps to ensure
6050 correct merge order such that mutually blocking packages are never
6051 installed simultaneously."""
6053 if "--buildpkgonly" in self.myopts or \
6054 "--nodeps" in self.myopts:
6057 #if "deep" in self.myparams:
6059 # Pull in blockers from all installed packages that haven't already
6060 # been pulled into the depgraph. This is not enabled by default
6061 # due to the performance penalty that is incurred by all the
6062 # additional dep_check calls that are required.
6064 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6065 for myroot in self.trees:
6066 vardb = self.trees[myroot]["vartree"].dbapi
6067 portdb = self.trees[myroot]["porttree"].dbapi
6068 pkgsettings = self.pkgsettings[myroot]
6069 final_db = self.mydbapi[myroot]
6071 blocker_cache = BlockerCache(myroot, vardb)
6072 stale_cache = set(blocker_cache)
6075 stale_cache.discard(cpv)
6076 pkg_in_graph = self.digraph.contains(pkg)
6078 # Check for masked installed packages. Only warn about
6079 # packages that are in the graph in order to avoid warning
6080 # about those that will be automatically uninstalled during
6081 # the merge process or by --depclean.
6083 if pkg_in_graph and not visible(pkgsettings, pkg):
6084 self._masked_installed.add(pkg)
6086 blocker_atoms = None
6092 self._blocker_parents.child_nodes(pkg))
6097 self._irrelevant_blockers.child_nodes(pkg))
6100 if blockers is not None:
6101 blockers = set(str(blocker.atom) \
6102 for blocker in blockers)
6104 # If this node has any blockers, create a "nomerge"
6105 # node for it so that they can be enforced.
6106 self.spinner.update()
6107 blocker_data = blocker_cache.get(cpv)
6108 if blocker_data is not None and \
6109 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6112 # If blocker data from the graph is available, use
6113 # it to validate the cache and update the cache if
6115 if blocker_data is not None and \
6116 blockers is not None:
6117 if not blockers.symmetric_difference(
6118 blocker_data.atoms):
6122 if blocker_data is None and \
6123 blockers is not None:
6124 # Re-use the blockers from the graph.
6125 blocker_atoms = sorted(blockers)
6126 counter = long(pkg.metadata["COUNTER"])
6128 blocker_cache.BlockerData(counter, blocker_atoms)
6129 blocker_cache[pkg.cpv] = blocker_data
6133 blocker_atoms = blocker_data.atoms
6135 # Use aux_get() to trigger FakeVartree global
6136 # updates on *DEPEND when appropriate.
6137 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6138 # It is crucial to pass in final_db here in order to
6139 # optimize dep_check calls by eliminating atoms via
6140 # dep_wordreduce and dep_eval calls.
6142 portage.dep._dep_check_strict = False
6144 success, atoms = portage.dep_check(depstr,
6145 final_db, pkgsettings, myuse=pkg.use.enabled,
6146 trees=self._graph_trees, myroot=myroot)
6147 except Exception, e:
6148 if isinstance(e, SystemExit):
6150 # This is helpful, for example, if a ValueError
6151 # is thrown from cpv_expand due to multiple
6152 # matches (this can happen if an atom lacks a
6154 show_invalid_depstring_notice(
6155 pkg, depstr, str(e))
6159 portage.dep._dep_check_strict = True
6161 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6162 if replacement_pkg and \
6163 replacement_pkg[0].operation == "merge":
6164 # This package is being replaced anyway, so
6165 # ignore invalid dependencies so as not to
6166 # annoy the user too much (otherwise they'd be
6167 # forced to manually unmerge it first).
6169 show_invalid_depstring_notice(pkg, depstr, atoms)
6171 blocker_atoms = [myatom for myatom in atoms \
6172 if myatom.startswith("!")]
6173 blocker_atoms.sort()
6174 counter = long(pkg.metadata["COUNTER"])
6175 blocker_cache[cpv] = \
6176 blocker_cache.BlockerData(counter, blocker_atoms)
6179 for atom in blocker_atoms:
6180 blocker = Blocker(atom=portage.dep.Atom(atom),
6181 eapi=pkg.metadata["EAPI"], root=myroot)
6182 self._blocker_parents.add(blocker, pkg)
6183 except portage.exception.InvalidAtom, e:
6184 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6185 show_invalid_depstring_notice(
6186 pkg, depstr, "Invalid Atom: %s" % (e,))
6188 for cpv in stale_cache:
6189 del blocker_cache[cpv]
6190 blocker_cache.flush()
6193 # Discard any "uninstall" tasks scheduled by previous calls
6194 # to this method, since those tasks may not make sense given
6195 # the current graph state.
6196 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6197 if previous_uninstall_tasks:
6198 self._blocker_uninstalls = digraph()
6199 self.digraph.difference_update(previous_uninstall_tasks)
6201 for blocker in self._blocker_parents.leaf_nodes():
6202 self.spinner.update()
6203 root_config = self.roots[blocker.root]
6204 virtuals = root_config.settings.getvirtuals()
6205 myroot = blocker.root
6206 initial_db = self.trees[myroot]["vartree"].dbapi
6207 final_db = self.mydbapi[myroot]
6209 provider_virtual = False
6210 if blocker.cp in virtuals and \
6211 not self._have_new_virt(blocker.root, blocker.cp):
6212 provider_virtual = True
6214 if provider_virtual:
6216 for provider_entry in virtuals[blocker.cp]:
6218 portage.dep_getkey(provider_entry)
6219 atoms.append(blocker.atom.replace(
6220 blocker.cp, provider_cp))
6222 atoms = [blocker.atom]
6224 blocked_initial = []
6226 blocked_initial.extend(initial_db.match_pkgs(atom))
6230 blocked_final.extend(final_db.match_pkgs(atom))
6232 if not blocked_initial and not blocked_final:
6233 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6234 self._blocker_parents.remove(blocker)
6235 # Discard any parents that don't have any more blockers.
6236 for pkg in parent_pkgs:
6237 self._irrelevant_blockers.add(blocker, pkg)
6238 if not self._blocker_parents.child_nodes(pkg):
6239 self._blocker_parents.remove(pkg)
6241 for parent in self._blocker_parents.parent_nodes(blocker):
6242 unresolved_blocks = False
6243 depends_on_order = set()
6244 for pkg in blocked_initial:
6245 if pkg.slot_atom == parent.slot_atom:
6246 # TODO: Support blocks within slots in cases where it
6247 # might make sense. For example, a new version might
6248 # require that the old version be uninstalled at build
6251 if parent.installed:
6252 # Two currently installed packages conflict with
6253 # eachother. Ignore this case since the damage
6254 # is already done and this would be likely to
6255 # confuse users if displayed like a normal blocker.
6257 if parent.operation == "merge":
6258 # Maybe the blocked package can be replaced or simply
6259 # unmerged to resolve this block.
6260 depends_on_order.add((pkg, parent))
6262 # None of the above blocker resolutions techniques apply,
6263 # so apparently this one is unresolvable.
6264 unresolved_blocks = True
6265 for pkg in blocked_final:
6266 if pkg.slot_atom == parent.slot_atom:
6267 # TODO: Support blocks within slots.
6269 if parent.operation == "nomerge" and \
6270 pkg.operation == "nomerge":
6271 # This blocker will be handled the next time that a
6272 # merge of either package is triggered.
6275 # Maybe the blocking package can be
6276 # unmerged to resolve this block.
6277 if parent.operation == "merge" and pkg.installed:
6278 depends_on_order.add((pkg, parent))
6280 elif parent.operation == "nomerge":
6281 depends_on_order.add((parent, pkg))
6283 # None of the above blocker resolutions techniques apply,
6284 # so apparently this one is unresolvable.
6285 unresolved_blocks = True
6287 # Make sure we don't unmerge any package that have been pulled
6289 if not unresolved_blocks and depends_on_order:
6290 for inst_pkg, inst_task in depends_on_order:
6291 if self.digraph.contains(inst_pkg) and \
6292 self.digraph.parent_nodes(inst_pkg):
6293 unresolved_blocks = True
6296 if not unresolved_blocks and depends_on_order:
6297 for inst_pkg, inst_task in depends_on_order:
6298 uninst_task = Package(built=inst_pkg.built,
6299 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6300 metadata=inst_pkg.metadata,
6301 operation="uninstall",
6302 root_config=inst_pkg.root_config,
6303 type_name=inst_pkg.type_name)
6304 self._pkg_cache[uninst_task] = uninst_task
6305 # Enforce correct merge order with a hard dep.
6306 self.digraph.addnode(uninst_task, inst_task,
6307 priority=BlockerDepPriority.instance)
6308 # Count references to this blocker so that it can be
6309 # invalidated after nodes referencing it have been
6311 self._blocker_uninstalls.addnode(uninst_task, blocker)
6312 if not unresolved_blocks and not depends_on_order:
6313 self._irrelevant_blockers.add(blocker, parent)
6314 self._blocker_parents.remove_edge(blocker, parent)
6315 if not self._blocker_parents.parent_nodes(blocker):
6316 self._blocker_parents.remove(blocker)
6317 if not self._blocker_parents.child_nodes(parent):
6318 self._blocker_parents.remove(parent)
6319 if unresolved_blocks:
6320 self._unsolvable_blockers.add(blocker, parent)
6324 def _accept_blocker_conflicts(self):
6326 for x in ("--buildpkgonly", "--fetchonly",
6327 "--fetch-all-uri", "--nodeps", "--pretend"):
6328 if x in self.myopts:
6333 def _merge_order_bias(self, mygraph):
6334 """Order nodes from highest to lowest overall reference count for
6335 optimal leaf node selection."""
6337 for node in mygraph.order:
6338 node_info[node] = len(mygraph.parent_nodes(node))
6339 def cmp_merge_preference(node1, node2):
6340 return node_info[node2] - node_info[node1]
6341 mygraph.order.sort(cmp_merge_preference)
6343 def altlist(self, reversed=False):
6345 while self._serialized_tasks_cache is None:
6346 self._resolve_conflicts()
6348 self._serialized_tasks_cache, self._scheduler_graph = \
6349 self._serialize_tasks()
6350 except self._serialize_tasks_retry:
6353 retlist = self._serialized_tasks_cache[:]
6358 def schedulerGraph(self):
6360 The scheduler graph is identical to the normal one except that
6361 uninstall edges are reversed in specific cases that require
6362 conflicting packages to be temporarily installed simultaneously.
6363 This is intended for use by the Scheduler in it's parallelization
6364 logic. It ensures that temporary simultaneous installation of
6365 conflicting packages is avoided when appropriate (especially for
6366 !!atom blockers), but allowed in specific cases that require it.
6368 Note that this method calls break_refs() which alters the state of
6369 internal Package instances such that this depgraph instance should
6370 not be used to perform any more calculations.
6372 if self._scheduler_graph is None:
6374 self.break_refs(self._scheduler_graph.order)
6375 return self._scheduler_graph
6377 def break_refs(self, nodes):
6379 Take a mergelist like that returned from self.altlist() and
6380 break any references that lead back to the depgraph. This is
6381 useful if you want to hold references to packages without
6382 also holding the depgraph on the heap.
6385 if hasattr(node, "root_config"):
6386 # The FakeVartree references the _package_cache which
6387 # references the depgraph. So that Package instances don't
6388 # hold the depgraph and FakeVartree on the heap, replace
6389 # the RootConfig that references the FakeVartree with the
6390 # original RootConfig instance which references the actual
6392 node.root_config = \
6393 self._trees_orig[node.root_config.root]["root_config"]
6395 def _resolve_conflicts(self):
6396 if not self._complete_graph():
6397 raise self._unknown_internal_error()
6399 if not self.validate_blockers():
6400 raise self._unknown_internal_error()
6402 if self._slot_collision_info:
6403 self._process_slot_conflicts()
6405 def _serialize_tasks(self):
6406 scheduler_graph = self.digraph.copy()
6407 mygraph=self.digraph.copy()
6408 # Prune "nomerge" root nodes if nothing depends on them, since
6409 # otherwise they slow down merge order calculation. Don't remove
6410 # non-root nodes since they help optimize merge order in some cases
6411 # such as revdep-rebuild.
6412 removed_nodes = set()
6414 for node in mygraph.root_nodes():
6415 if not isinstance(node, Package) or \
6416 node.installed or node.onlydeps:
6417 removed_nodes.add(node)
6419 self.spinner.update()
6420 mygraph.difference_update(removed_nodes)
6421 if not removed_nodes:
6423 removed_nodes.clear()
6424 self._merge_order_bias(mygraph)
6425 def cmp_circular_bias(n1, n2):
6427 RDEPEND is stronger than PDEPEND and this function
6428 measures such a strength bias within a circular
6429 dependency relationship.
6431 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6432 ignore_priority=DepPriority.MEDIUM_SOFT)
6433 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6434 ignore_priority=DepPriority.MEDIUM_SOFT)
6435 if n1_n2_medium == n2_n1_medium:
6440 myblocker_uninstalls = self._blocker_uninstalls.copy()
6442 # Contains uninstall tasks that have been scheduled to
6443 # occur after overlapping blockers have been installed.
6444 scheduled_uninstalls = set()
6445 # Contains any Uninstall tasks that have been ignored
6446 # in order to avoid the circular deps code path. These
6447 # correspond to blocker conflicts that could not be
6449 ignored_uninstall_tasks = set()
6450 have_uninstall_task = False
6451 complete = "complete" in self.myparams
6452 myblocker_parents = self._blocker_parents.copy()
6455 def get_nodes(**kwargs):
6457 Returns leaf nodes excluding Uninstall instances
6458 since those should be executed as late as possible.
6460 return [node for node in mygraph.leaf_nodes(**kwargs) \
6461 if isinstance(node, Package) and \
6462 (node.operation != "uninstall" or \
6463 node in scheduled_uninstalls)]
6465 # sys-apps/portage needs special treatment if ROOT="/"
6466 running_root = self._running_root.root
6467 from portage.const import PORTAGE_PACKAGE_ATOM
6468 runtime_deps = InternalPackageSet(
6469 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6470 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6471 PORTAGE_PACKAGE_ATOM)
6472 replacement_portage = self.mydbapi[running_root].match_pkgs(
6473 PORTAGE_PACKAGE_ATOM)
6476 running_portage = running_portage[0]
6478 running_portage = None
6480 if replacement_portage:
6481 replacement_portage = replacement_portage[0]
6483 replacement_portage = None
6485 if replacement_portage == running_portage:
6486 replacement_portage = None
6488 if replacement_portage is not None:
6489 # update from running_portage to replacement_portage asap
6490 asap_nodes.append(replacement_portage)
6492 if running_portage is not None:
6494 portage_rdepend = self._select_atoms_highest_available(
6495 running_root, running_portage.metadata["RDEPEND"],
6496 myuse=running_portage.use.enabled,
6497 parent=running_portage, strict=False)
6498 except portage.exception.InvalidDependString, e:
6499 portage.writemsg("!!! Invalid RDEPEND in " + \
6500 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6501 (running_root, running_portage.cpv, e), noiselevel=-1)
6503 portage_rdepend = []
6504 runtime_deps.update(atom for atom in portage_rdepend \
6505 if not atom.startswith("!"))
6507 ignore_priority_soft_range = [None]
6508 ignore_priority_soft_range.extend(
6509 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6510 tree_mode = "--tree" in self.myopts
6511 # Tracks whether or not the current iteration should prefer asap_nodes
6512 # if available. This is set to False when the previous iteration
6513 # failed to select any nodes. It is reset whenever nodes are
6514 # successfully selected.
6517 # By default, try to avoid selecting root nodes whenever possible. This
6518 # helps ensure that the maximimum possible number of soft dependencies
6519 # have been removed from the graph before their parent nodes have
6520 # selected. This is especially important when those dependencies are
6521 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6522 # CHOST has been changed (like when building a stage3 from a stage2).
6523 accept_root_node = False
6525 # State of prefer_asap and accept_root_node flags for successive
6526 # iterations that loosen the criteria for node selection.
6528 # iteration prefer_asap accept_root_node
6533 # If no nodes are selected on the 3rd iteration, it is due to
6534 # unresolved blockers or circular dependencies.
6536 while not mygraph.empty():
6537 self.spinner.update()
6538 selected_nodes = None
6539 ignore_priority = None
6540 if prefer_asap and asap_nodes:
6541 """ASAP nodes are merged before their soft deps."""
6542 asap_nodes = [node for node in asap_nodes \
6543 if mygraph.contains(node)]
6544 for node in asap_nodes:
6545 if not mygraph.child_nodes(node,
6546 ignore_priority=DepPriority.SOFT):
6547 selected_nodes = [node]
6548 asap_nodes.remove(node)
6550 if not selected_nodes and \
6551 not (prefer_asap and asap_nodes):
6552 for ignore_priority in ignore_priority_soft_range:
6553 nodes = get_nodes(ignore_priority=ignore_priority)
6557 if ignore_priority is None and not tree_mode:
6558 # Greedily pop all of these nodes since no relationship
6559 # has been ignored. This optimization destroys --tree
6560 # output, so it's disabled in reversed mode. If there
6561 # is a mix of merge and uninstall nodes, save the
6562 # uninstall nodes from later since sometimes a merge
6563 # node will render an install node unnecessary, and
6564 # we want to avoid doing a separate uninstall task in
6566 merge_nodes = [node for node in nodes \
6567 if node.operation == "merge"]
6569 selected_nodes = merge_nodes
6571 selected_nodes = nodes
6573 # For optimal merge order:
6574 # * Only pop one node.
6575 # * Removing a root node (node without a parent)
6576 # will not produce a leaf node, so avoid it.
6578 if mygraph.parent_nodes(node):
6579 # found a non-root node
6580 selected_nodes = [node]
6582 if not selected_nodes and \
6583 (accept_root_node or ignore_priority is None):
6584 # settle for a root node
6585 selected_nodes = [nodes[0]]
6587 if not selected_nodes:
6588 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6590 """Recursively gather a group of nodes that RDEPEND on
6591 eachother. This ensures that they are merged as a group
6592 and get their RDEPENDs satisfied as soon as possible."""
6593 def gather_deps(ignore_priority,
6594 mergeable_nodes, selected_nodes, node):
6595 if node in selected_nodes:
6597 if node not in mergeable_nodes:
6599 if node == replacement_portage and \
6600 mygraph.child_nodes(node,
6601 ignore_priority=DepPriority.MEDIUM_SOFT):
6602 # Make sure that portage always has all of it's
6603 # RDEPENDs installed first.
6605 selected_nodes.add(node)
6606 for child in mygraph.child_nodes(node,
6607 ignore_priority=ignore_priority):
6608 if not gather_deps(ignore_priority,
6609 mergeable_nodes, selected_nodes, child):
6612 mergeable_nodes = set(nodes)
6613 if prefer_asap and asap_nodes:
6615 for ignore_priority in xrange(DepPriority.SOFT,
6616 DepPriority.MEDIUM_SOFT + 1):
6618 if nodes is not asap_nodes and \
6619 not accept_root_node and \
6620 not mygraph.parent_nodes(node):
6622 selected_nodes = set()
6623 if gather_deps(ignore_priority,
6624 mergeable_nodes, selected_nodes, node):
6627 selected_nodes = None
6631 # If any nodes have been selected here, it's always
6632 # possible that anything up to a MEDIUM_SOFT priority
6633 # relationship has been ignored. This state is recorded
6634 # in ignore_priority so that relevant nodes will be
6635 # added to asap_nodes when appropriate.
6637 ignore_priority = DepPriority.MEDIUM_SOFT
6639 if prefer_asap and asap_nodes and not selected_nodes:
6640 # We failed to find any asap nodes to merge, so ignore
6641 # them for the next iteration.
6645 if not selected_nodes and not accept_root_node:
6646 # Maybe there are only root nodes left, so accept them
6647 # for the next iteration.
6648 accept_root_node = True
6651 if selected_nodes and ignore_priority > DepPriority.SOFT:
6652 # Try to merge ignored medium deps as soon as possible.
6653 for node in selected_nodes:
6654 children = set(mygraph.child_nodes(node))
6655 soft = children.difference(
6656 mygraph.child_nodes(node,
6657 ignore_priority=DepPriority.SOFT))
6658 medium_soft = children.difference(
6659 mygraph.child_nodes(node,
6660 ignore_priority=DepPriority.MEDIUM_SOFT))
6661 medium_soft.difference_update(soft)
6662 for child in medium_soft:
6663 if child in selected_nodes:
6665 if child in asap_nodes:
6667 asap_nodes.append(child)
6669 if selected_nodes and len(selected_nodes) > 1:
6670 if not isinstance(selected_nodes, list):
6671 selected_nodes = list(selected_nodes)
6672 selected_nodes.sort(cmp_circular_bias)
6674 if not selected_nodes and not myblocker_uninstalls.is_empty():
6675 # An Uninstall task needs to be executed in order to
6676 # avoid conflict if possible.
6677 min_parent_deps = None
6679 for task in myblocker_uninstalls.leaf_nodes():
6680 # Do some sanity checks so that system or world packages
6681 # don't get uninstalled inappropriately here (only really
6682 # necessary when --complete-graph has not been enabled).
6684 if task in ignored_uninstall_tasks:
6687 if task in scheduled_uninstalls:
6688 # It's been scheduled but it hasn't
6689 # been executed yet due to dependence
6690 # on installation of blocking packages.
6693 root_config = self.roots[task.root]
6694 inst_pkg = self._pkg_cache[
6695 ("installed", task.root, task.cpv, "nomerge")]
6697 if self.digraph.contains(inst_pkg):
6700 forbid_overlap = False
6701 heuristic_overlap = False
6702 for blocker in myblocker_uninstalls.parent_nodes(task):
6703 if blocker.eapi in ("0", "1"):
6704 heuristic_overlap = True
6705 elif blocker.atom.blocker.overlap.forbid:
6706 forbid_overlap = True
6708 if forbid_overlap and running_root == task.root:
6711 if heuristic_overlap and running_root == task.root:
6712 # Never uninstall sys-apps/portage or it's essential
6713 # dependencies, except through replacement.
6715 runtime_dep_atoms = \
6716 list(runtime_deps.iterAtomsForPackage(task))
6717 except portage.exception.InvalidDependString, e:
6718 portage.writemsg("!!! Invalid PROVIDE in " + \
6719 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6720 (task.root, task.cpv, e), noiselevel=-1)
6724 # Don't uninstall a runtime dep if it appears
6725 # to be the only suitable one installed.
6727 vardb = root_config.trees["vartree"].dbapi
6728 for atom in runtime_dep_atoms:
6729 other_version = None
6730 for pkg in vardb.match_pkgs(atom):
6731 if pkg.cpv == task.cpv and \
6732 pkg.metadata["COUNTER"] == \
6733 task.metadata["COUNTER"]:
6737 if other_version is None:
6743 # For packages in the system set, don't take
6744 # any chances. If the conflict can't be resolved
6745 # by a normal replacement operation then abort.
6748 for atom in root_config.sets[
6749 "system"].iterAtomsForPackage(task):
6752 except portage.exception.InvalidDependString, e:
6753 portage.writemsg("!!! Invalid PROVIDE in " + \
6754 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6755 (task.root, task.cpv, e), noiselevel=-1)
6761 # Note that the world check isn't always
6762 # necessary since self._complete_graph() will
6763 # add all packages from the system and world sets to the
6764 # graph. This just allows unresolved conflicts to be
6765 # detected as early as possible, which makes it possible
6766 # to avoid calling self._complete_graph() when it is
6767 # unnecessary due to blockers triggering an abortion.
6769 # For packages in the world set, go ahead an uninstall
6770 # when necessary, as long as the atom will be satisfied
6771 # in the final state.
6772 graph_db = self.mydbapi[task.root]
6775 for atom in root_config.sets[
6776 "world"].iterAtomsForPackage(task):
6778 for pkg in graph_db.match_pkgs(atom):
6786 except portage.exception.InvalidDependString, e:
6787 portage.writemsg("!!! Invalid PROVIDE in " + \
6788 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6789 (task.root, task.cpv, e), noiselevel=-1)
6795 # Check the deps of parent nodes to ensure that
6796 # the chosen task produces a leaf node. Maybe
6797 # this can be optimized some more to make the
6798 # best possible choice, but the current algorithm
6799 # is simple and should be near optimal for most
6802 for parent in mygraph.parent_nodes(task):
6803 parent_deps.update(mygraph.child_nodes(parent,
6804 ignore_priority=DepPriority.MEDIUM_SOFT))
6805 parent_deps.remove(task)
6806 if min_parent_deps is None or \
6807 len(parent_deps) < min_parent_deps:
6808 min_parent_deps = len(parent_deps)
6811 if uninst_task is not None:
6812 # The uninstall is performed only after blocking
6813 # packages have been merged on top of it. File
6814 # collisions between blocking packages are detected
6815 # and removed from the list of files to be uninstalled.
6816 scheduled_uninstalls.add(uninst_task)
6817 parent_nodes = mygraph.parent_nodes(uninst_task)
6819 # Reverse the parent -> uninstall edges since we want
6820 # to do the uninstall after blocking packages have
6821 # been merged on top of it.
6822 mygraph.remove(uninst_task)
6823 for blocked_pkg in parent_nodes:
6824 mygraph.add(blocked_pkg, uninst_task,
6825 priority=BlockerDepPriority.instance)
6826 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6827 scheduler_graph.add(blocked_pkg, uninst_task,
6828 priority=BlockerDepPriority.instance)
6831 # None of the Uninstall tasks are acceptable, so
6832 # the corresponding blockers are unresolvable.
6833 # We need to drop an Uninstall task here in order
6834 # to avoid the circular deps code path, but the
6835 # blocker will still be counted as an unresolved
6837 for node in myblocker_uninstalls.leaf_nodes():
6839 mygraph.remove(node)
6843 ignored_uninstall_tasks.add(node)
6846 # After dropping an Uninstall task, reset
6847 # the state variables for leaf node selection and
6848 # continue trying to select leaf nodes.
6850 accept_root_node = False
6853 if not selected_nodes:
6854 self._circular_deps_for_display = mygraph
6855 raise self._unknown_internal_error()
6857 # At this point, we've succeeded in selecting one or more nodes, so
6858 # it's now safe to reset the prefer_asap and accept_root_node flags
6859 # to their default states.
6861 accept_root_node = False
6863 mygraph.difference_update(selected_nodes)
6865 for node in selected_nodes:
6866 if isinstance(node, Package) and \
6867 node.operation == "nomerge":
6870 # Handle interactions between blockers
6871 # and uninstallation tasks.
6872 solved_blockers = set()
6874 if isinstance(node, Package) and \
6875 "uninstall" == node.operation:
6876 have_uninstall_task = True
6879 vardb = self.trees[node.root]["vartree"].dbapi
6880 previous_cpv = vardb.match(node.slot_atom)
6882 # The package will be replaced by this one, so remove
6883 # the corresponding Uninstall task if necessary.
6884 previous_cpv = previous_cpv[0]
6886 ("installed", node.root, previous_cpv, "uninstall")
6888 mygraph.remove(uninst_task)
6892 if uninst_task is not None and \
6893 uninst_task not in ignored_uninstall_tasks and \
6894 myblocker_uninstalls.contains(uninst_task):
6895 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6896 myblocker_uninstalls.remove(uninst_task)
6897 # Discard any blockers that this Uninstall solves.
6898 for blocker in blocker_nodes:
6899 if not myblocker_uninstalls.child_nodes(blocker):
6900 myblocker_uninstalls.remove(blocker)
6901 solved_blockers.add(blocker)
6903 retlist.append(node)
6905 if (isinstance(node, Package) and \
6906 "uninstall" == node.operation) or \
6907 (uninst_task is not None and \
6908 uninst_task in scheduled_uninstalls):
6909 # Include satisfied blockers in the merge list
6910 # since the user might be interested and also
6911 # it serves as an indicator that blocking packages
6912 # will be temporarily installed simultaneously.
6913 for blocker in solved_blockers:
6914 retlist.append(Blocker(atom=blocker.atom,
6915 root=blocker.root, eapi=blocker.eapi,
6918 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6919 for node in myblocker_uninstalls.root_nodes():
6920 unsolvable_blockers.add(node)
6922 for blocker in unsolvable_blockers:
6923 retlist.append(blocker)
6925 # If any Uninstall tasks need to be executed in order
6926 # to avoid a conflict, complete the graph with any
6927 # dependencies that may have been initially
6928 # neglected (to ensure that unsafe Uninstall tasks
6929 # are properly identified and blocked from execution).
6930 if have_uninstall_task and \
6932 not unsolvable_blockers:
6933 self.myparams.add("complete")
6934 raise self._serialize_tasks_retry("")
6936 if unsolvable_blockers and \
6937 not self._accept_blocker_conflicts():
6938 self._unsatisfied_blockers_for_display = unsolvable_blockers
6939 self._serialized_tasks_cache = retlist[:]
6940 self._scheduler_graph = scheduler_graph
6941 raise self._unknown_internal_error()
6943 if self._slot_collision_info and \
6944 not self._accept_blocker_conflicts():
6945 self._serialized_tasks_cache = retlist[:]
6946 self._scheduler_graph = scheduler_graph
6947 raise self._unknown_internal_error()
6949 return retlist, scheduler_graph
6951 def _show_circular_deps(self, mygraph):
6952 # No leaf nodes are available, so we have a circular
6953 # dependency panic situation. Reduce the noise level to a
6954 # minimum via repeated elimination of root nodes since they
6955 # have no parents and thus can not be part of a cycle.
6957 root_nodes = mygraph.root_nodes(
6958 ignore_priority=DepPriority.MEDIUM_SOFT)
6961 mygraph.difference_update(root_nodes)
6962 # Display the USE flags that are enabled on nodes that are part
6963 # of dependency cycles in case that helps the user decide to
6964 # disable some of them.
6966 tempgraph = mygraph.copy()
6967 while not tempgraph.empty():
6968 nodes = tempgraph.leaf_nodes()
6970 node = tempgraph.order[0]
6973 display_order.append(node)
6974 tempgraph.remove(node)
6975 display_order.reverse()
6976 self.myopts.pop("--quiet", None)
6977 self.myopts.pop("--verbose", None)
6978 self.myopts["--tree"] = True
6979 portage.writemsg("\n\n", noiselevel=-1)
6980 self.display(display_order)
6981 prefix = colorize("BAD", " * ")
6982 portage.writemsg("\n", noiselevel=-1)
6983 portage.writemsg(prefix + "Error: circular dependencies:\n",
6985 portage.writemsg("\n", noiselevel=-1)
6986 mygraph.debug_print()
6987 portage.writemsg("\n", noiselevel=-1)
6988 portage.writemsg(prefix + "Note that circular dependencies " + \
6989 "can often be avoided by temporarily\n", noiselevel=-1)
6990 portage.writemsg(prefix + "disabling USE flags that trigger " + \
6991 "optional dependencies.\n", noiselevel=-1)
6993 def _show_merge_list(self):
6994 if self._serialized_tasks_cache is not None and \
6995 not (self._displayed_list and \
6996 (self._displayed_list == self._serialized_tasks_cache or \
6997 self._displayed_list == \
6998 list(reversed(self._serialized_tasks_cache)))):
6999 display_list = self._serialized_tasks_cache[:]
7000 if "--tree" in self.myopts:
7001 display_list.reverse()
7002 self.display(display_list)
7004 def _show_unsatisfied_blockers(self, blockers):
7005 self._show_merge_list()
7006 msg = "Error: The above package list contains " + \
7007 "packages which cannot be installed " + \
7008 "at the same time on the same system."
7009 prefix = colorize("BAD", " * ")
7010 from textwrap import wrap
7011 portage.writemsg("\n", noiselevel=-1)
7012 for line in wrap(msg, 70):
7013 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7014 if "--quiet" not in self.myopts:
7015 show_blocker_docs_link()
7017 def display(self, mylist, favorites=[], verbosity=None):
7019 # This is used to prevent display_problems() from
7020 # redundantly displaying this exact same merge list
7021 # again via _show_merge_list().
7022 self._displayed_list = mylist
7024 if verbosity is None:
7025 verbosity = ("--quiet" in self.myopts and 1 or \
7026 "--verbose" in self.myopts and 3 or 2)
7027 favorites_set = InternalPackageSet(favorites)
7028 oneshot = "--oneshot" in self.myopts or \
7029 "--onlydeps" in self.myopts
7030 columns = "--columns" in self.myopts
7035 counters = PackageCounters()
7037 if verbosity == 1 and "--verbose" not in self.myopts:
7038 def create_use_string(*args):
7041 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7043 is_new, reinst_flags,
7044 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7045 alphabetical=("--alphabetical" in self.myopts)):
7053 cur_iuse = set(cur_iuse)
7054 enabled_flags = cur_iuse.intersection(cur_use)
7055 removed_iuse = set(old_iuse).difference(cur_iuse)
7056 any_iuse = cur_iuse.union(old_iuse)
7057 any_iuse = list(any_iuse)
7059 for flag in any_iuse:
7062 reinst_flag = reinst_flags and flag in reinst_flags
7063 if flag in enabled_flags:
7065 if is_new or flag in old_use and \
7066 (all_flags or reinst_flag):
7067 flag_str = red(flag)
7068 elif flag not in old_iuse:
7069 flag_str = yellow(flag) + "%*"
7070 elif flag not in old_use:
7071 flag_str = green(flag) + "*"
7072 elif flag in removed_iuse:
7073 if all_flags or reinst_flag:
7074 flag_str = yellow("-" + flag) + "%"
7077 flag_str = "(" + flag_str + ")"
7078 removed.append(flag_str)
7081 if is_new or flag in old_iuse and \
7082 flag not in old_use and \
7083 (all_flags or reinst_flag):
7084 flag_str = blue("-" + flag)
7085 elif flag not in old_iuse:
7086 flag_str = yellow("-" + flag)
7087 if flag not in iuse_forced:
7089 elif flag in old_use:
7090 flag_str = green("-" + flag) + "*"
7092 if flag in iuse_forced:
7093 flag_str = "(" + flag_str + ")"
7095 enabled.append(flag_str)
7097 disabled.append(flag_str)
7100 ret = " ".join(enabled)
7102 ret = " ".join(enabled + disabled + removed)
7104 ret = '%s="%s" ' % (name, ret)
7107 repo_display = RepoDisplay(self.roots)
7111 mygraph = self.digraph.copy()
7113 # If there are any Uninstall instances, add the corresponding
7114 # blockers to the digraph (useful for --tree display).
7116 executed_uninstalls = set(node for node in mylist \
7117 if isinstance(node, Package) and node.operation == "unmerge")
7119 for uninstall in self._blocker_uninstalls.leaf_nodes():
7120 uninstall_parents = \
7121 self._blocker_uninstalls.parent_nodes(uninstall)
7122 if not uninstall_parents:
7125 # Remove the corresponding "nomerge" node and substitute
7126 # the Uninstall node.
7127 inst_pkg = self._pkg_cache[
7128 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7130 mygraph.remove(inst_pkg)
7135 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7137 inst_pkg_blockers = []
7139 # Break the Package -> Uninstall edges.
7140 mygraph.remove(uninstall)
7142 # Resolution of a package's blockers
7143 # depend on it's own uninstallation.
7144 for blocker in inst_pkg_blockers:
7145 mygraph.add(uninstall, blocker)
7147 # Expand Package -> Uninstall edges into
7148 # Package -> Blocker -> Uninstall edges.
7149 for blocker in uninstall_parents:
7150 mygraph.add(uninstall, blocker)
7151 for parent in self._blocker_parents.parent_nodes(blocker):
7152 if parent != inst_pkg:
7153 mygraph.add(blocker, parent)
7155 # If the uninstall task did not need to be executed because
7156 # of an upgrade, display Blocker -> Upgrade edges since the
7157 # corresponding Blocker -> Uninstall edges will not be shown.
7159 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7160 if upgrade_node is not None and \
7161 uninstall not in executed_uninstalls:
7162 for blocker in uninstall_parents:
7163 mygraph.add(upgrade_node, blocker)
7165 unsatisfied_blockers = []
7170 if isinstance(x, Blocker) and not x.satisfied:
7171 unsatisfied_blockers.append(x)
7174 if "--tree" in self.myopts:
7175 depth = len(tree_nodes)
7176 while depth and graph_key not in \
7177 mygraph.child_nodes(tree_nodes[depth-1]):
7180 tree_nodes = tree_nodes[:depth]
7181 tree_nodes.append(graph_key)
7182 display_list.append((x, depth, True))
7183 shown_edges.add((graph_key, tree_nodes[depth-1]))
7185 traversed_nodes = set() # prevent endless circles
7186 traversed_nodes.add(graph_key)
7187 def add_parents(current_node, ordered):
7189 # Do not traverse to parents if this node is an
7190 # an argument or a direct member of a set that has
7191 # been specified as an argument (system or world).
7192 if current_node not in self._set_nodes:
7193 parent_nodes = mygraph.parent_nodes(current_node)
7195 child_nodes = set(mygraph.child_nodes(current_node))
7196 selected_parent = None
7197 # First, try to avoid a direct cycle.
7198 for node in parent_nodes:
7199 if not isinstance(node, (Blocker, Package)):
7201 if node not in traversed_nodes and \
7202 node not in child_nodes:
7203 edge = (current_node, node)
7204 if edge in shown_edges:
7206 selected_parent = node
7208 if not selected_parent:
7209 # A direct cycle is unavoidable.
7210 for node in parent_nodes:
7211 if not isinstance(node, (Blocker, Package)):
7213 if node not in traversed_nodes:
7214 edge = (current_node, node)
7215 if edge in shown_edges:
7217 selected_parent = node
7220 shown_edges.add((current_node, selected_parent))
7221 traversed_nodes.add(selected_parent)
7222 add_parents(selected_parent, False)
7223 display_list.append((current_node,
7224 len(tree_nodes), ordered))
7225 tree_nodes.append(current_node)
7227 add_parents(graph_key, True)
7229 display_list.append((x, depth, True))
7230 mylist = display_list
7231 for x in unsatisfied_blockers:
7232 mylist.append((x, 0, True))
7234 last_merge_depth = 0
7235 for i in xrange(len(mylist)-1,-1,-1):
7236 graph_key, depth, ordered = mylist[i]
7237 if not ordered and depth == 0 and i > 0 \
7238 and graph_key == mylist[i-1][0] and \
7239 mylist[i-1][1] == 0:
7240 # An ordered node got a consecutive duplicate when the tree was
7244 if ordered and graph_key[-1] != "nomerge":
7245 last_merge_depth = depth
7247 if depth >= last_merge_depth or \
7248 i < len(mylist) - 1 and \
7249 depth >= mylist[i+1][1]:
7252 from portage import flatten
7253 from portage.dep import use_reduce, paren_reduce
7254 # files to fetch list - avoids counting a same file twice
7255 # in size display (verbose mode)
7258 # Use this set to detect when all the "repoadd" strings are "[0]"
7259 # and disable the entire repo display in this case.
7262 for mylist_index in xrange(len(mylist)):
7263 x, depth, ordered = mylist[mylist_index]
7267 portdb = self.trees[myroot]["porttree"].dbapi
7268 bindb = self.trees[myroot]["bintree"].dbapi
7269 vardb = self.trees[myroot]["vartree"].dbapi
7270 vartree = self.trees[myroot]["vartree"]
7271 pkgsettings = self.pkgsettings[myroot]
7274 indent = " " * depth
7276 if isinstance(x, Blocker):
7278 blocker_style = "PKG_BLOCKER_SATISFIED"
7279 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7281 blocker_style = "PKG_BLOCKER"
7282 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7284 counters.blocks += 1
7286 counters.blocks_satisfied += 1
7287 resolved = portage.key_expand(
7288 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7289 if "--columns" in self.myopts and "--quiet" in self.myopts:
7290 addl += " " + colorize(blocker_style, resolved)
7292 addl = "[%s %s] %s%s" % \
7293 (colorize(blocker_style, "blocks"),
7294 addl, indent, colorize(blocker_style, resolved))
7295 block_parents = self._blocker_parents.parent_nodes(x)
7296 block_parents = set([pnode[2] for pnode in block_parents])
7297 block_parents = ", ".join(block_parents)
7299 addl += colorize(blocker_style,
7300 " (\"%s\" is blocking %s)") % \
7301 (str(x.atom).lstrip("!"), block_parents)
7303 addl += colorize(blocker_style,
7304 " (is blocking %s)") % block_parents
7305 if isinstance(x, Blocker) and x.satisfied:
7310 blockers.append(addl)
7313 pkg_merge = ordered and pkg_status == "merge"
7314 if not pkg_merge and pkg_status == "merge":
7315 pkg_status = "nomerge"
7316 built = pkg_type != "ebuild"
7317 installed = pkg_type == "installed"
7319 metadata = pkg.metadata
7321 repo_name = metadata["repository"]
7322 if pkg_type == "ebuild":
7323 ebuild_path = portdb.findname(pkg_key)
7324 if not ebuild_path: # shouldn't happen
7325 raise portage.exception.PackageNotFound(pkg_key)
7326 repo_path_real = os.path.dirname(os.path.dirname(
7327 os.path.dirname(ebuild_path)))
7329 repo_path_real = portdb.getRepositoryPath(repo_name)
7330 pkg_use = list(pkg.use.enabled)
7332 restrict = flatten(use_reduce(paren_reduce(
7333 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7334 except portage.exception.InvalidDependString, e:
7335 if not pkg.installed:
7336 show_invalid_depstring_notice(x,
7337 pkg.metadata["RESTRICT"], str(e))
7341 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7342 "fetch" in restrict:
7345 counters.restrict_fetch += 1
7346 if portdb.fetch_check(pkg_key, pkg_use):
7349 counters.restrict_fetch_satisfied += 1
7351 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7352 #param is used for -u, where you still *do* want to see when something is being upgraded.
7355 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7356 if vardb.cpv_exists(pkg_key):
7357 addl=" "+yellow("R")+fetch+" "
7360 counters.reinst += 1
7361 elif pkg_status == "uninstall":
7362 counters.uninst += 1
7363 # filter out old-style virtual matches
7364 elif installed_versions and \
7365 portage.cpv_getkey(installed_versions[0]) == \
7366 portage.cpv_getkey(pkg_key):
7367 myinslotlist = vardb.match(pkg.slot_atom)
7368 # If this is the first install of a new-style virtual, we
7369 # need to filter out old-style virtual matches.
7370 if myinslotlist and \
7371 portage.cpv_getkey(myinslotlist[0]) != \
7372 portage.cpv_getkey(pkg_key):
7375 myoldbest = myinslotlist[:]
7377 if not portage.dep.cpvequal(pkg_key,
7378 portage.best([pkg_key] + myoldbest)):
7380 addl += turquoise("U")+blue("D")
7382 counters.downgrades += 1
7385 addl += turquoise("U") + " "
7387 counters.upgrades += 1
7389 # New slot, mark it new.
7390 addl = " " + green("NS") + fetch + " "
7391 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7393 counters.newslot += 1
7395 if "--changelog" in self.myopts:
7396 inst_matches = vardb.match(pkg.slot_atom)
7398 changelogs.extend(self.calc_changelog(
7399 portdb.findname(pkg_key),
7400 inst_matches[0], pkg_key))
7402 addl = " " + green("N") + " " + fetch + " "
7411 forced_flags = set()
7412 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7413 forced_flags.update(pkgsettings.useforce)
7414 forced_flags.update(pkgsettings.usemask)
7416 cur_use = [flag for flag in pkg.use.enabled \
7417 if flag in pkg.iuse.all]
7418 cur_iuse = sorted(pkg.iuse.all)
7420 if myoldbest and myinslotlist:
7421 previous_cpv = myoldbest[0]
7423 previous_cpv = pkg.cpv
7424 if vardb.cpv_exists(previous_cpv):
7425 old_iuse, old_use = vardb.aux_get(
7426 previous_cpv, ["IUSE", "USE"])
7427 old_iuse = list(set(
7428 filter_iuse_defaults(old_iuse.split())))
7430 old_use = old_use.split()
7437 old_use = [flag for flag in old_use if flag in old_iuse]
7439 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7441 use_expand.reverse()
7442 use_expand_hidden = \
7443 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7445 def map_to_use_expand(myvals, forcedFlags=False,
7449 for exp in use_expand:
7452 for val in myvals[:]:
7453 if val.startswith(exp.lower()+"_"):
7454 if val in forced_flags:
7455 forced[exp].add(val[len(exp)+1:])
7456 ret[exp].append(val[len(exp)+1:])
7459 forced["USE"] = [val for val in myvals \
7460 if val in forced_flags]
7462 for exp in use_expand_hidden:
7468 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7469 # are the only thing that triggered reinstallation.
7470 reinst_flags_map = {}
7471 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7472 reinst_expand_map = None
7473 if reinstall_for_flags:
7474 reinst_flags_map = map_to_use_expand(
7475 list(reinstall_for_flags), removeHidden=False)
7476 for k in list(reinst_flags_map):
7477 if not reinst_flags_map[k]:
7478 del reinst_flags_map[k]
7479 if not reinst_flags_map.get("USE"):
7480 reinst_expand_map = reinst_flags_map.copy()
7481 reinst_expand_map.pop("USE", None)
7482 if reinst_expand_map and \
7483 not set(reinst_expand_map).difference(
7485 use_expand_hidden = \
7486 set(use_expand_hidden).difference(
7489 cur_iuse_map, iuse_forced = \
7490 map_to_use_expand(cur_iuse, forcedFlags=True)
7491 cur_use_map = map_to_use_expand(cur_use)
7492 old_iuse_map = map_to_use_expand(old_iuse)
7493 old_use_map = map_to_use_expand(old_use)
7496 use_expand.insert(0, "USE")
7498 for key in use_expand:
7499 if key in use_expand_hidden:
7501 verboseadd += create_use_string(key.upper(),
7502 cur_iuse_map[key], iuse_forced[key],
7503 cur_use_map[key], old_iuse_map[key],
7504 old_use_map[key], is_new,
7505 reinst_flags_map.get(key))
7510 if pkg_type == "ebuild" and pkg_merge:
7512 myfilesdict = portdb.getfetchsizes(pkg_key,
7513 useflags=pkg_use, debug=self.edebug)
7514 except portage.exception.InvalidDependString, e:
7515 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7516 show_invalid_depstring_notice(x, src_uri, str(e))
7519 if myfilesdict is None:
7520 myfilesdict="[empty/missing/bad digest]"
7522 for myfetchfile in myfilesdict:
7523 if myfetchfile not in myfetchlist:
7524 mysize+=myfilesdict[myfetchfile]
7525 myfetchlist.append(myfetchfile)
7527 counters.totalsize += mysize
7528 verboseadd += format_size(mysize)
7531 # assign index for a previous version in the same slot
7532 has_previous = False
7533 repo_name_prev = None
7534 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7536 slot_matches = vardb.match(slot_atom)
7539 repo_name_prev = vardb.aux_get(slot_matches[0],
7542 # now use the data to generate output
7543 if pkg.installed or not has_previous:
7544 repoadd = repo_display.repoStr(repo_path_real)
7546 repo_path_prev = None
7548 repo_path_prev = portdb.getRepositoryPath(
7550 if repo_path_prev == repo_path_real:
7551 repoadd = repo_display.repoStr(repo_path_real)
7553 repoadd = "%s=>%s" % (
7554 repo_display.repoStr(repo_path_prev),
7555 repo_display.repoStr(repo_path_real))
7557 repoadd_set.add(repoadd)
7559 xs = [portage.cpv_getkey(pkg_key)] + \
7560 list(portage.catpkgsplit(pkg_key)[2:])
7567 if "COLUMNWIDTH" in self.settings:
7569 mywidth = int(self.settings["COLUMNWIDTH"])
7570 except ValueError, e:
7571 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7573 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7574 self.settings["COLUMNWIDTH"], noiselevel=-1)
7576 oldlp = mywidth - 30
7579 # Convert myoldbest from a list to a string.
7583 for pos, key in enumerate(myoldbest):
7584 key = portage.catpkgsplit(key)[2] + \
7585 "-" + portage.catpkgsplit(key)[3]
7586 if key[-3:] == "-r0":
7588 myoldbest[pos] = key
7589 myoldbest = blue("["+", ".join(myoldbest)+"]")
7592 root_config = self.roots[myroot]
7593 system_set = root_config.sets["system"]
7594 world_set = root_config.sets["world"]
7599 pkg_system = system_set.findAtomForPackage(pkg)
7600 pkg_world = world_set.findAtomForPackage(pkg)
7601 if not (oneshot or pkg_world) and \
7602 myroot == self.target_root and \
7603 favorites_set.findAtomForPackage(pkg):
7604 # Maybe it will be added to world now.
7605 if create_world_atom(pkg, favorites_set, root_config):
7607 except portage.exception.InvalidDependString:
7608 # This is reported elsewhere if relevant.
7611 def pkgprint(pkg_str):
7614 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7616 return colorize("PKG_MERGE_WORLD", pkg_str)
7618 return colorize("PKG_MERGE", pkg_str)
7619 elif pkg_status == "uninstall":
7620 return colorize("PKG_UNINSTALL", pkg_str)
7623 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7625 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7627 return colorize("PKG_NOMERGE", pkg_str)
7630 properties = flatten(use_reduce(paren_reduce(
7631 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7632 except portage.exception.InvalidDependString, e:
7633 if not pkg.installed:
7634 show_invalid_depstring_notice(pkg,
7635 pkg.metadata["PROPERTIES"], str(e))
7639 interactive = "interactive" in properties
7640 if interactive and pkg.operation == "merge":
7641 addl = colorize("WARN", "I") + addl[1:]
7643 counters.interactive += 1
7648 if "--columns" in self.myopts:
7649 if "--quiet" in self.myopts:
7650 myprint=addl+" "+indent+pkgprint(pkg_cp)
7651 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7652 myprint=myprint+myoldbest
7653 myprint=myprint+darkgreen("to "+x[1])
7657 myprint = "[%s] %s%s" % \
7658 (pkgprint(pkg_status.ljust(13)),
7659 indent, pkgprint(pkg.cp))
7661 myprint = "[%s %s] %s%s" % \
7662 (pkgprint(pkg.type_name), addl,
7663 indent, pkgprint(pkg.cp))
7664 if (newlp-nc_len(myprint)) > 0:
7665 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7666 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7667 if (oldlp-nc_len(myprint)) > 0:
7668 myprint=myprint+" "*(oldlp-nc_len(myprint))
7669 myprint=myprint+myoldbest
7670 myprint += darkgreen("to " + pkg.root)
7673 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7675 myprint = "[" + pkg_type + " " + addl + "] "
7676 myprint += indent + pkgprint(pkg_key) + " " + \
7677 myoldbest + darkgreen("to " + myroot)
7679 if "--columns" in self.myopts:
7680 if "--quiet" in self.myopts:
7681 myprint=addl+" "+indent+pkgprint(pkg_cp)
7682 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7683 myprint=myprint+myoldbest
7687 myprint = "[%s] %s%s" % \
7688 (pkgprint(pkg_status.ljust(13)),
7689 indent, pkgprint(pkg.cp))
7691 myprint = "[%s %s] %s%s" % \
7692 (pkgprint(pkg.type_name), addl,
7693 indent, pkgprint(pkg.cp))
7694 if (newlp-nc_len(myprint)) > 0:
7695 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7696 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7697 if (oldlp-nc_len(myprint)) > 0:
7698 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7699 myprint += myoldbest
7702 myprint = "[%s] %s%s %s" % \
7703 (pkgprint(pkg_status.ljust(13)),
7704 indent, pkgprint(pkg.cpv),
7707 myprint = "[%s %s] %s%s %s" % \
7708 (pkgprint(pkg_type), addl, indent,
7709 pkgprint(pkg.cpv), myoldbest)
7711 if columns and pkg.operation == "uninstall":
7713 p.append((myprint, verboseadd, repoadd))
7715 if "--tree" not in self.myopts and \
7716 "--quiet" not in self.myopts and \
7717 not self._opts_no_restart.intersection(self.myopts) and \
7718 pkg.root == self._running_root.root and \
7719 portage.match_from_list(
7720 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7721 not vardb.cpv_exists(pkg.cpv) and \
7722 "--quiet" not in self.myopts:
7723 if mylist_index < len(mylist) - 1:
7724 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7725 p.append(colorize("WARN", " then resume the merge."))
7728 show_repos = repoadd_set and repoadd_set != set(["0"])
7731 if isinstance(x, basestring):
7732 out.write("%s\n" % (x,))
7735 myprint, verboseadd, repoadd = x
7738 myprint += " " + verboseadd
7740 if show_repos and repoadd:
7741 myprint += " " + teal("[%s]" % repoadd)
7743 out.write("%s\n" % (myprint,))
7752 sys.stdout.write(str(repo_display))
7754 if "--changelog" in self.myopts:
7756 for revision,text in changelogs:
7757 print bold('*'+revision)
7758 sys.stdout.write(text)
7763 def display_problems(self):
7765 Display problems with the dependency graph such as slot collisions.
7766 This is called internally by display() to show the problems _after_
7767 the merge list where it is most likely to be seen, but if display()
7768 is not going to be called then this method should be called explicitly
7769 to ensure that the user is notified of problems with the graph.
7771 All output goes to stderr, except for unsatisfied dependencies which
7772 go to stdout for parsing by programs such as autounmask.
7775 # Note that show_masked_packages() sends it's output to
7776 # stdout, and some programs such as autounmask parse the
7777 # output in cases when emerge bails out. However, when
7778 # show_masked_packages() is called for installed packages
7779 # here, the message is a warning that is more appropriate
7780 # to send to stderr, so temporarily redirect stdout to
7781 # stderr. TODO: Fix output code so there's a cleaner way
7782 # to redirect everything to stderr.
7787 sys.stdout = sys.stderr
7788 self._display_problems()
7794 # This goes to stdout for parsing by programs like autounmask.
7795 for pargs, kwargs in self._unsatisfied_deps_for_display:
7796 self._show_unsatisfied_dep(*pargs, **kwargs)
7798 def _display_problems(self):
7799 if self._circular_deps_for_display is not None:
7800 self._show_circular_deps(
7801 self._circular_deps_for_display)
7803 # The user is only notified of a slot conflict if
7804 # there are no unresolvable blocker conflicts.
7805 if self._unsatisfied_blockers_for_display is not None:
7806 self._show_unsatisfied_blockers(
7807 self._unsatisfied_blockers_for_display)
7809 self._show_slot_collision_notice()
7811 # TODO: Add generic support for "set problem" handlers so that
7812 # the below warnings aren't special cases for world only.
7814 if self._missing_args:
7815 world_problems = False
7816 if "world" in self._sets:
7817 # Filter out indirect members of world (from nested sets)
7818 # since only direct members of world are desired here.
7819 world_set = self.roots[self.target_root].sets["world"]
7820 for arg, atom in self._missing_args:
7821 if arg.name == "world" and atom in world_set:
7822 world_problems = True
7826 sys.stderr.write("\n!!! Problems have been " + \
7827 "detected with your world file\n")
7828 sys.stderr.write("!!! Please run " + \
7829 green("emaint --check world")+"\n\n")
7831 if self._missing_args:
7832 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7833 " Ebuilds for the following packages are either all\n")
7834 sys.stderr.write(colorize("BAD", "!!!") + \
7835 " masked or don't exist:\n")
7836 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7837 self._missing_args) + "\n")
7839 if self._pprovided_args:
7841 for arg, atom in self._pprovided_args:
7842 if isinstance(arg, SetArg):
7844 arg_atom = (atom, atom)
7847 arg_atom = (arg.arg, atom)
7848 refs = arg_refs.setdefault(arg_atom, [])
7849 if parent not in refs:
7852 msg.append(bad("\nWARNING: "))
7853 if len(self._pprovided_args) > 1:
7854 msg.append("Requested packages will not be " + \
7855 "merged because they are listed in\n")
7857 msg.append("A requested package will not be " + \
7858 "merged because it is listed in\n")
7859 msg.append("package.provided:\n\n")
7860 problems_sets = set()
7861 for (arg, atom), refs in arg_refs.iteritems():
7864 problems_sets.update(refs)
7866 ref_string = ", ".join(["'%s'" % name for name in refs])
7867 ref_string = " pulled in by " + ref_string
7868 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7870 if "world" in problems_sets:
7871 msg.append("This problem can be solved in one of the following ways:\n\n")
7872 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7873 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7874 msg.append(" C) Remove offending entries from package.provided.\n\n")
7875 msg.append("The best course of action depends on the reason that an offending\n")
7876 msg.append("package.provided entry exists.\n\n")
7877 sys.stderr.write("".join(msg))
7879 masked_packages = []
7880 for pkg in self._masked_installed:
7881 root_config = pkg.root_config
7882 pkgsettings = self.pkgsettings[pkg.root]
7883 mreasons = get_masking_status(pkg, pkgsettings, root_config)
7884 masked_packages.append((root_config, pkgsettings,
7885 pkg.cpv, pkg.metadata, mreasons))
7887 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7888 " The following installed packages are masked:\n")
7889 show_masked_packages(masked_packages)
7893 def calc_changelog(self,ebuildpath,current,next):
7894 if ebuildpath == None or not os.path.exists(ebuildpath):
7896 current = '-'.join(portage.catpkgsplit(current)[1:])
7897 if current.endswith('-r0'):
7898 current = current[:-3]
7899 next = '-'.join(portage.catpkgsplit(next)[1:])
7900 if next.endswith('-r0'):
7902 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7904 changelog = open(changelogpath).read()
7905 except SystemExit, e:
7906 raise # Needed else can't exit
7909 divisions = self.find_changelog_tags(changelog)
7910 #print 'XX from',current,'to',next
7911 #for div,text in divisions: print 'XX',div
7912 # skip entries for all revisions above the one we are about to emerge
7913 for i in range(len(divisions)):
7914 if divisions[i][0]==next:
7915 divisions = divisions[i:]
7917 # find out how many entries we are going to display
7918 for i in range(len(divisions)):
7919 if divisions[i][0]==current:
7920 divisions = divisions[:i]
7923 # couldnt find the current revision in the list. display nothing
7927 def find_changelog_tags(self,changelog):
7931 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7933 if release is not None:
7934 divs.append((release,changelog))
7936 if release is not None:
7937 divs.append((release,changelog[:match.start()]))
7938 changelog = changelog[match.end():]
7939 release = match.group(1)
7940 if release.endswith('.ebuild'):
7941 release = release[:-7]
7942 if release.endswith('-r0'):
7943 release = release[:-3]
7945 def saveNomergeFavorites(self):
7946 """Find atoms in favorites that are not in the mergelist and add them
7947 to the world file if necessary."""
7948 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7949 "--oneshot", "--onlydeps", "--pretend"):
7950 if x in self.myopts:
7952 root_config = self.roots[self.target_root]
7953 world_set = root_config.sets["world"]
7955 world_locked = False
7956 if hasattr(world_set, "lock"):
7960 if hasattr(world_set, "load"):
7961 world_set.load() # maybe it's changed on disk
7963 args_set = self._sets["args"]
7964 portdb = self.trees[self.target_root]["porttree"].dbapi
7965 added_favorites = set()
7966 for x in self._set_nodes:
7967 pkg_type, root, pkg_key, pkg_status = x
7968 if pkg_status != "nomerge":
7972 myfavkey = create_world_atom(x, args_set, root_config)
7974 if myfavkey in added_favorites:
7976 added_favorites.add(myfavkey)
7977 except portage.exception.InvalidDependString, e:
7978 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7979 (pkg_key, str(e)), noiselevel=-1)
7980 writemsg("!!! see '%s'\n\n" % os.path.join(
7981 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
7984 for k in self._sets:
7985 if k in ("args", "world") or not root_config.sets[k].world_candidate:
7990 all_added.append(SETPREFIX + k)
7991 all_added.extend(added_favorites)
7994 print ">>> Recording %s in \"world\" favorites file..." % \
7995 colorize("INFORM", str(a))
7997 world_set.update(all_added)
8002 def loadResumeCommand(self, resume_data, skip_masked=False):
8004 Add a resume command to the graph and validate it in the process. This
8005 will raise a PackageNotFound exception if a package is not available.
8008 if not isinstance(resume_data, dict):
8011 mergelist = resume_data.get("mergelist")
8012 if not isinstance(mergelist, list):
8015 fakedb = self.mydbapi
8017 serialized_tasks = []
8020 if not (isinstance(x, list) and len(x) == 4):
8022 pkg_type, myroot, pkg_key, action = x
8023 if pkg_type not in self.pkg_tree_map:
8025 if action != "merge":
8027 tree_type = self.pkg_tree_map[pkg_type]
8028 mydb = trees[myroot][tree_type].dbapi
8029 db_keys = list(self._trees_orig[myroot][
8030 tree_type].dbapi._aux_cache_keys)
8032 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8034 # It does no exist or it is corrupt.
8035 if action == "uninstall":
8037 raise portage.exception.PackageNotFound(pkg_key)
8038 installed = action == "uninstall"
8039 built = pkg_type != "ebuild"
8040 root_config = self.roots[myroot]
8041 pkg = Package(built=built, cpv=pkg_key,
8042 installed=installed, metadata=metadata,
8043 operation=action, root_config=root_config,
8045 if pkg_type == "ebuild":
8046 pkgsettings = self.pkgsettings[myroot]
8047 pkgsettings.setcpv(pkg)
8048 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8049 self._pkg_cache[pkg] = pkg
8051 root_config = self.roots[pkg.root]
8052 if "merge" == pkg.operation and \
8053 not visible(root_config.settings, pkg):
8055 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8057 self._unsatisfied_deps_for_display.append(
8058 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8060 fakedb[myroot].cpv_inject(pkg)
8061 serialized_tasks.append(pkg)
8062 self.spinner.update()
8064 if self._unsatisfied_deps_for_display:
8067 if not serialized_tasks or "--nodeps" in self.myopts:
8068 self._serialized_tasks_cache = serialized_tasks
8069 self._scheduler_graph = self.digraph
8071 self._select_package = self._select_pkg_from_graph
8072 self.myparams.add("selective")
8074 favorites = resume_data.get("favorites")
8075 args_set = self._sets["args"]
8076 if isinstance(favorites, list):
8077 args = self._load_favorites(favorites)
8081 for task in serialized_tasks:
8082 if isinstance(task, Package) and \
8083 task.operation == "merge":
8084 if not self._add_pkg(task, None):
8087 # Packages for argument atoms need to be explicitly
8088 # added via _add_pkg() so that they are included in the
8089 # digraph (needed at least for --tree display).
8091 for atom in arg.set:
8092 pkg, existing_node = self._select_package(
8093 arg.root_config.root, atom)
8094 if existing_node is None and \
8096 if not self._add_pkg(pkg, Dependency(atom=atom,
8097 root=pkg.root, parent=arg)):
8100 # Allow unsatisfied deps here to avoid showing a masking
8101 # message for an unsatisfied dep that isn't necessarily
8103 if not self._create_graph(allow_unsatisfied=True):
8105 if masked_tasks or self._unsatisfied_deps:
8106 # This probably means that a required package
8107 # was dropped via --skipfirst. It makes the
8108 # resume list invalid, so convert it to a
8109 # UnsatisfiedResumeDep exception.
8110 raise self.UnsatisfiedResumeDep(self,
8111 masked_tasks + self._unsatisfied_deps)
8112 self._serialized_tasks_cache = None
8115 except self._unknown_internal_error:
8120 def _load_favorites(self, favorites):
8122 Use a list of favorites to resume state from a
8123 previous select_files() call. This creates similar
8124 DependencyArg instances to those that would have
8125 been created by the original select_files() call.
8126 This allows Package instances to be matched with
8127 DependencyArg instances during graph creation.
8129 root_config = self.roots[self.target_root]
8130 getSetAtoms = root_config.setconfig.getSetAtoms
8131 sets = root_config.sets
8134 if not isinstance(x, basestring):
8136 if x in ("system", "world"):
8138 if x.startswith(SETPREFIX):
8139 s = x[len(SETPREFIX):]
8144 # Recursively expand sets so that containment tests in
8145 # self._get_parent_sets() properly match atoms in nested
8146 # sets (like if world contains system).
8147 expanded_set = InternalPackageSet(
8148 initial_atoms=getSetAtoms(s))
8149 self._sets[s] = expanded_set
8150 args.append(SetArg(arg=x, set=expanded_set,
8151 root_config=root_config))
8153 if not portage.isvalidatom(x):
8155 args.append(AtomArg(arg=x, atom=x,
8156 root_config=root_config))
8158 # Create the "args" package set from atoms and
8159 # packages given as arguments.
8160 args_set = self._sets["args"]
8162 if not isinstance(arg, (AtomArg, PackageArg)):
8165 if myatom in args_set:
8167 args_set.add(myatom)
8168 self._set_atoms.update(chain(*self._sets.itervalues()))
8169 atom_arg_map = self._atom_arg_map
8171 for atom in arg.set:
8172 atom_key = (atom, arg.root_config.root)
8173 refs = atom_arg_map.get(atom_key)
8176 atom_arg_map[atom_key] = refs
8181 class UnsatisfiedResumeDep(portage.exception.PortageException):
8183 A dependency of a resume list is not installed. This
8184 can occur when a required package is dropped from the
8185 merge list via --skipfirst.
8187 def __init__(self, depgraph, value):
8188 portage.exception.PortageException.__init__(self, value)
8189 self.depgraph = depgraph
8191 class _internal_exception(portage.exception.PortageException):
8192 def __init__(self, value=""):
8193 portage.exception.PortageException.__init__(self, value)
8195 class _unknown_internal_error(_internal_exception):
8197 Used by the depgraph internally to terminate graph creation.
8198 The specific reason for the failure should have been dumped
8199 to stderr, unfortunately, the exact reason for the failure
8203 class _serialize_tasks_retry(_internal_exception):
8205 This is raised by the _serialize_tasks() method when it needs to
8206 be called again for some reason. The only case that it's currently
8207 used for is when neglected dependencies need to be added to the
8208 graph in order to avoid making a potentially unsafe decision.
8211 class _dep_check_composite_db(portage.dbapi):
8213 A dbapi-like interface that is optimized for use in dep_check() calls.
8214 This is built on top of the existing depgraph package selection logic.
8215 Some packages that have been added to the graph may be masked from this
8216 view in order to influence the atom preference selection that occurs
8219 def __init__(self, depgraph, root):
8220 portage.dbapi.__init__(self)
8221 self._depgraph = depgraph
8223 self._match_cache = {}
8224 self._cpv_pkg_map = {}
8226 def match(self, atom):
8227 ret = self._match_cache.get(atom)
8232 atom = self._dep_expand(atom)
8233 pkg, existing = self._depgraph._select_package(self._root, atom)
8237 # Return the highest available from select_package() as well as
8238 # any matching slots in the graph db.
8240 slots.add(pkg.metadata["SLOT"])
8241 atom_cp = portage.dep_getkey(atom)
8242 if pkg.cp.startswith("virtual/"):
8243 # For new-style virtual lookahead that occurs inside
8244 # dep_check(), examine all slots. This is needed
8245 # so that newer slots will not unnecessarily be pulled in
8246 # when a satisfying lower slot is already installed. For
8247 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8248 # there's no need to pull in a newer slot to satisfy a
8249 # virtual/jdk dependency.
8250 for db, pkg_type, built, installed, db_keys in \
8251 self._depgraph._filtered_trees[self._root]["dbs"]:
8252 for cpv in db.match(atom):
8253 if portage.cpv_getkey(cpv) != pkg.cp:
8255 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8257 if self._visible(pkg):
8258 self._cpv_pkg_map[pkg.cpv] = pkg
8260 slots.remove(pkg.metadata["SLOT"])
8262 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8263 pkg, existing = self._depgraph._select_package(
8264 self._root, slot_atom)
8267 if not self._visible(pkg):
8269 self._cpv_pkg_map[pkg.cpv] = pkg
8272 self._cpv_sort_ascending(ret)
8273 self._match_cache[orig_atom] = ret
8276 def _visible(self, pkg):
8277 if pkg.installed and "selective" not in self._depgraph.myparams:
8279 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8280 except (StopIteration, portage.exception.InvalidDependString):
8287 self._depgraph.pkgsettings[pkg.root], pkg):
8289 except portage.exception.InvalidDependString:
8293 def _dep_expand(self, atom):
8295 This is only needed for old installed packages that may
8296 contain atoms that are not fully qualified with a specific
8297 category. Emulate the cpv_expand() function that's used by
8298 dbapi.match() in cases like this. If there are multiple
8299 matches, it's often due to a new-style virtual that has
8300 been added, so try to filter those out to avoid raising
8303 root_config = self._depgraph.roots[self._root]
8305 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8306 if len(expanded_atoms) > 1:
8307 non_virtual_atoms = []
8308 for x in expanded_atoms:
8309 if not portage.dep_getkey(x).startswith("virtual/"):
8310 non_virtual_atoms.append(x)
8311 if len(non_virtual_atoms) == 1:
8312 expanded_atoms = non_virtual_atoms
8313 if len(expanded_atoms) > 1:
8314 # compatible with portage.cpv_expand()
8315 raise portage.exception.AmbiguousPackageName(
8316 [portage.dep_getkey(x) for x in expanded_atoms])
8318 atom = expanded_atoms[0]
8320 null_atom = insert_category_into_atom(atom, "null")
8321 null_cp = portage.dep_getkey(null_atom)
8322 cat, atom_pn = portage.catsplit(null_cp)
8323 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8325 # Allow the resolver to choose which virtual.
8326 atom = insert_category_into_atom(atom, "virtual")
8328 atom = insert_category_into_atom(atom, "null")
8331 def aux_get(self, cpv, wants):
8332 metadata = self._cpv_pkg_map[cpv].metadata
8333 return [metadata.get(x, "") for x in wants]
8335 class _package_cache(dict):
8336 def __init__(self, depgraph):
8338 self._depgraph = depgraph
8340 def __setitem__(self, k, v):
8341 dict.__setitem__(self, k, v)
8342 root_config = self._depgraph.roots[v.root]
8344 if visible(root_config.settings, v) and \
8345 not (v.installed and \
8346 v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8347 root_config.visible_pkgs.cpv_inject(v)
8348 except portage.exception.InvalidDependString:
8351 class RepoDisplay(object):
8352 def __init__(self, roots):
8353 self._shown_repos = {}
8354 self._unknown_repo = False
8356 for root_config in roots.itervalues():
8357 portdir = root_config.settings.get("PORTDIR")
8359 repo_paths.add(portdir)
8360 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8362 repo_paths.update(overlays.split())
8363 repo_paths = list(repo_paths)
8364 self._repo_paths = repo_paths
8365 self._repo_paths_real = [ os.path.realpath(repo_path) \
8366 for repo_path in repo_paths ]
8368 # pre-allocate index for PORTDIR so that it always has index 0.
8369 for root_config in roots.itervalues():
8370 portdb = root_config.trees["porttree"].dbapi
8371 portdir = portdb.porttree_root
8373 self.repoStr(portdir)
8375 def repoStr(self, repo_path_real):
8378 real_index = self._repo_paths_real.index(repo_path_real)
8379 if real_index == -1:
8381 self._unknown_repo = True
8383 shown_repos = self._shown_repos
8384 repo_paths = self._repo_paths
8385 repo_path = repo_paths[real_index]
8386 index = shown_repos.get(repo_path)
8388 index = len(shown_repos)
8389 shown_repos[repo_path] = index
8395 shown_repos = self._shown_repos
8396 unknown_repo = self._unknown_repo
8397 if shown_repos or self._unknown_repo:
8398 output.append("Portage tree and overlays:\n")
8399 show_repo_paths = list(shown_repos)
8400 for repo_path, repo_index in shown_repos.iteritems():
8401 show_repo_paths[repo_index] = repo_path
8403 for index, repo_path in enumerate(show_repo_paths):
8404 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8406 output.append(" "+teal("[?]") + \
8407 " indicates that the source repository could not be determined\n")
8408 return "".join(output)
8410 class PackageCounters(object):
8420 self.blocks_satisfied = 0
8422 self.restrict_fetch = 0
8423 self.restrict_fetch_satisfied = 0
8424 self.interactive = 0
8427 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8430 myoutput.append("Total: %s package" % total_installs)
8431 if total_installs != 1:
8432 myoutput.append("s")
8433 if total_installs != 0:
8434 myoutput.append(" (")
8435 if self.upgrades > 0:
8436 details.append("%s upgrade" % self.upgrades)
8437 if self.upgrades > 1:
8439 if self.downgrades > 0:
8440 details.append("%s downgrade" % self.downgrades)
8441 if self.downgrades > 1:
8444 details.append("%s new" % self.new)
8445 if self.newslot > 0:
8446 details.append("%s in new slot" % self.newslot)
8447 if self.newslot > 1:
8450 details.append("%s reinstall" % self.reinst)
8454 details.append("%s uninstall" % self.uninst)
8457 if self.interactive > 0:
8458 details.append("%s %s" % (self.interactive,
8459 colorize("WARN", "interactive")))
8460 myoutput.append(", ".join(details))
8461 if total_installs != 0:
8462 myoutput.append(")")
8463 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8464 if self.restrict_fetch:
8465 myoutput.append("\nFetch Restriction: %s package" % \
8466 self.restrict_fetch)
8467 if self.restrict_fetch > 1:
8468 myoutput.append("s")
8469 if self.restrict_fetch_satisfied < self.restrict_fetch:
8470 myoutput.append(bad(" (%s unsatisfied)") % \
8471 (self.restrict_fetch - self.restrict_fetch_satisfied))
8473 myoutput.append("\nConflict: %s block" % \
8476 myoutput.append("s")
8477 if self.blocks_satisfied < self.blocks:
8478 myoutput.append(bad(" (%s unsatisfied)") % \
8479 (self.blocks - self.blocks_satisfied))
8480 return "".join(myoutput)
8482 class PollConstants(object):
8485 Provides POLL* constants that are equivalent to those from the
8486 select module, for use by PollSelectAdapter.
8489 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8492 locals()[k] = getattr(select, k, v)
8496 class PollSelectAdapter(PollConstants):
8499 Use select to emulate a poll object, for
8500 systems that don't support poll().
8504 self._registered = {}
8505 self._select_args = [[], [], []]
8507 def register(self, fd, *args):
8509 Only POLLIN is currently supported!
8513 "register expected at most 2 arguments, got " + \
8514 repr(1 + len(args)))
8516 eventmask = PollConstants.POLLIN | \
8517 PollConstants.POLLPRI | PollConstants.POLLOUT
8521 self._registered[fd] = eventmask
8522 self._select_args = None
8524 def unregister(self, fd):
8525 self._select_args = None
8526 del self._registered[fd]
8528 def poll(self, *args):
8531 "poll expected at most 2 arguments, got " + \
8532 repr(1 + len(args)))
8538 select_args = self._select_args
8539 if select_args is None:
8540 select_args = [self._registered.keys(), [], []]
8542 if timeout is not None:
8543 select_args = select_args[:]
8544 # Translate poll() timeout args to select() timeout args:
8546 # | units | value(s) for indefinite block
8547 # ---------|--------------|------------------------------
8548 # poll | milliseconds | omitted, negative, or None
8549 # ---------|--------------|------------------------------
8550 # select | seconds | omitted
8551 # ---------|--------------|------------------------------
8553 if timeout is not None and timeout < 0:
8555 if timeout is not None:
8556 select_args.append(timeout / 1000)
8558 select_events = select.select(*select_args)
8560 for fd in select_events[0]:
8561 poll_events.append((fd, PollConstants.POLLIN))
8564 class SequentialTaskQueue(SlotObject):
8566 __slots__ = ("max_jobs", "running_tasks") + \
8567 ("_dirty", "_scheduling", "_task_queue")
8569 def __init__(self, **kwargs):
8570 SlotObject.__init__(self, **kwargs)
8571 self._task_queue = deque()
8572 self.running_tasks = set()
8573 if self.max_jobs is None:
8577 def add(self, task):
8578 self._task_queue.append(task)
8581 def addFront(self, task):
8582 self._task_queue.appendleft(task)
8593 if self._scheduling:
8594 # Ignore any recursive schedule() calls triggered via
8595 # self._task_exit().
8598 self._scheduling = True
8600 task_queue = self._task_queue
8601 running_tasks = self.running_tasks
8602 max_jobs = self.max_jobs
8603 state_changed = False
8605 while task_queue and \
8606 (max_jobs is True or len(running_tasks) < max_jobs):
8607 task = task_queue.popleft()
8608 cancelled = getattr(task, "cancelled", None)
8610 running_tasks.add(task)
8611 task.addExitListener(self._task_exit)
8613 state_changed = True
8616 self._scheduling = False
8618 return state_changed
8620 def _task_exit(self, task):
8622 Since we can always rely on exit listeners being called, the set of
8623 running tasks is always pruned automatically and there is never any need
8624 to actively prune it.
8626 self.running_tasks.remove(task)
8627 if self._task_queue:
8631 self._task_queue.clear()
8632 running_tasks = self.running_tasks
8633 while running_tasks:
8634 task = running_tasks.pop()
8635 task.removeExitListener(self._task_exit)
8639 def __nonzero__(self):
8640 return bool(self._task_queue or self.running_tasks)
8643 return len(self._task_queue) + len(self.running_tasks)
8645 _can_poll_device = None
8647 def can_poll_device():
8649 Test if it's possible to use poll() on a device such as a pty. This
8650 is known to fail on Darwin.
8652 @returns: True if poll() on a device succeeds, False otherwise.
8655 global _can_poll_device
8656 if _can_poll_device is not None:
8657 return _can_poll_device
8659 if not hasattr(select, "poll"):
8660 _can_poll_device = False
8661 return _can_poll_device
8664 dev_null = open('/dev/null', 'rb')
8666 _can_poll_device = False
8667 return _can_poll_device
8670 p.register(dev_null.fileno(), PollConstants.POLLIN)
8672 invalid_request = False
8673 for f, event in p.poll():
8674 if event & PollConstants.POLLNVAL:
8675 invalid_request = True
8679 _can_poll_device = not invalid_request
8680 return _can_poll_device
8682 def create_poll_instance():
8684 Create an instance of select.poll, or an instance of
8685 PollSelectAdapter there is no poll() implementation or
8686 it is broken somehow.
8688 if can_poll_device():
8689 return select.poll()
8690 return PollSelectAdapter()
8692 class PollScheduler(object):
8694 class _sched_iface_class(SlotObject):
8695 __slots__ = ("register", "schedule", "unregister")
8699 self._max_load = None
8701 self._poll_event_queue = []
8702 self._poll_event_handlers = {}
8703 self._poll_event_handler_ids = {}
8704 # Increment id for each new handler.
8705 self._event_handler_id = 0
8706 self._poll_obj = create_poll_instance()
8707 self._scheduling = False
8709 def _schedule(self):
8711 Calls _schedule_tasks() and automatically returns early from
8712 any recursive calls to this method that the _schedule_tasks()
8713 call might trigger. This makes _schedule() safe to call from
8714 inside exit listeners.
8716 if self._scheduling:
8718 self._scheduling = True
8720 return self._schedule_tasks()
8722 self._scheduling = False
8724 def _running_job_count(self):
8727 def _can_add_job(self):
8728 max_jobs = self._max_jobs
8729 max_load = self._max_load
8731 if self._max_jobs is not True and \
8732 self._running_job_count() >= self._max_jobs:
8735 if max_load is not None and \
8736 (max_jobs is True or max_jobs > 1) and \
8737 self._running_job_count() >= 1:
8739 avg1, avg5, avg15 = os.getloadavg()
8740 except (AttributeError, OSError), e:
8741 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8746 if avg1 >= max_load:
8751 def _poll(self, timeout=None):
8753 All poll() calls pass through here. The poll events
8754 are added directly to self._poll_event_queue.
8755 In order to avoid endless blocking, this raises
8756 StopIteration if timeout is None and there are
8757 no file descriptors to poll.
8759 if not self._poll_event_handlers:
8761 if timeout is None and \
8762 not self._poll_event_handlers:
8763 raise StopIteration(
8764 "timeout is None and there are no poll() event handlers")
8766 # The following error is known to occur with Linux kernel versions
8769 # select.error: (4, 'Interrupted system call')
8771 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8772 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8773 # without any events.
8776 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8778 except select.error, e:
8779 writemsg_level("\n!!! select error: %s\n" % (e,),
8780 level=logging.ERROR, noiselevel=-1)
8782 if timeout is not None:
8785 def _next_poll_event(self, timeout=None):
8787 Since the _schedule_wait() loop is called by event
8788 handlers from _poll_loop(), maintain a central event
8789 queue for both of them to share events from a single
8790 poll() call. In order to avoid endless blocking, this
8791 raises StopIteration if timeout is None and there are
8792 no file descriptors to poll.
8794 if not self._poll_event_queue:
8796 return self._poll_event_queue.pop()
8798 def _poll_loop(self):
8800 event_handlers = self._poll_event_handlers
8801 event_handled = False
8804 while event_handlers:
8805 f, event = self._next_poll_event()
8806 handler, reg_id = event_handlers[f]
8808 event_handled = True
8809 except StopIteration:
8810 event_handled = True
8812 if not event_handled:
8813 raise AssertionError("tight loop")
8815 def _schedule_yield(self):
8817 Schedule for a short period of time chosen by the scheduler based
8818 on internal state. Synchronous tasks should call this periodically
8819 in order to allow the scheduler to service pending poll events. The
8820 scheduler will call poll() exactly once, without blocking, and any
8821 resulting poll events will be serviced.
8823 event_handlers = self._poll_event_handlers
8826 if not event_handlers:
8827 return bool(events_handled)
8829 if not self._poll_event_queue:
8833 while event_handlers and self._poll_event_queue:
8834 f, event = self._next_poll_event()
8835 handler, reg_id = event_handlers[f]
8838 except StopIteration:
8841 return bool(events_handled)
8843 def _register(self, f, eventmask, handler):
8846 @return: A unique registration id, for use in schedule() or
8849 if f in self._poll_event_handlers:
8850 raise AssertionError("fd %d is already registered" % f)
8851 self._event_handler_id += 1
8852 reg_id = self._event_handler_id
8853 self._poll_event_handler_ids[reg_id] = f
8854 self._poll_event_handlers[f] = (handler, reg_id)
8855 self._poll_obj.register(f, eventmask)
8858 def _unregister(self, reg_id):
8859 f = self._poll_event_handler_ids[reg_id]
8860 self._poll_obj.unregister(f)
8861 del self._poll_event_handlers[f]
8862 del self._poll_event_handler_ids[reg_id]
8864 def _schedule_wait(self, wait_ids):
8866 Schedule until wait_id is not longer registered
8869 @param wait_id: a task id to wait for
8871 event_handlers = self._poll_event_handlers
8872 handler_ids = self._poll_event_handler_ids
8873 event_handled = False
8875 if isinstance(wait_ids, int):
8876 wait_ids = frozenset([wait_ids])
8879 while wait_ids.intersection(handler_ids):
8880 f, event = self._next_poll_event()
8881 handler, reg_id = event_handlers[f]
8883 event_handled = True
8884 except StopIteration:
8885 event_handled = True
8887 return event_handled
8889 class QueueScheduler(PollScheduler):
8892 Add instances of SequentialTaskQueue and then call run(). The
8893 run() method returns when no tasks remain.
8896 def __init__(self, max_jobs=None, max_load=None):
8897 PollScheduler.__init__(self)
8899 if max_jobs is None:
8902 self._max_jobs = max_jobs
8903 self._max_load = max_load
8904 self.sched_iface = self._sched_iface_class(
8905 register=self._register,
8906 schedule=self._schedule_wait,
8907 unregister=self._unregister)
8910 self._schedule_listeners = []
8913 self._queues.append(q)
8915 def remove(self, q):
8916 self._queues.remove(q)
8920 while self._schedule():
8923 while self._running_job_count():
8926 def _schedule_tasks(self):
8929 @returns: True if there may be remaining tasks to schedule,
8932 while self._can_add_job():
8933 n = self._max_jobs - self._running_job_count()
8937 if not self._start_next_job(n):
8940 for q in self._queues:
8945 def _running_job_count(self):
8947 for q in self._queues:
8948 job_count += len(q.running_tasks)
8949 self._jobs = job_count
8952 def _start_next_job(self, n=1):
8954 for q in self._queues:
8955 initial_job_count = len(q.running_tasks)
8957 final_job_count = len(q.running_tasks)
8958 if final_job_count > initial_job_count:
8959 started_count += (final_job_count - initial_job_count)
8960 if started_count >= n:
8962 return started_count
8964 class TaskScheduler(object):
8967 A simple way to handle scheduling of AsynchrousTask instances. Simply
8968 add tasks and call run(). The run() method returns when no tasks remain.
8971 def __init__(self, max_jobs=None, max_load=None):
8972 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8973 self._scheduler = QueueScheduler(
8974 max_jobs=max_jobs, max_load=max_load)
8975 self.sched_iface = self._scheduler.sched_iface
8976 self.run = self._scheduler.run
8977 self._scheduler.add(self._queue)
8979 def add(self, task):
8980 self._queue.add(task)
8983 self._scheduler.schedule()
8985 class JobStatusDisplay(object):
8987 _bound_properties = ("curval", "failed", "running")
8988 _jobs_column_width = 48
8990 # Don't update the display unless at least this much
8991 # time has passed, in units of seconds.
8992 _min_display_latency = 2
8994 _default_term_codes = {
9000 _termcap_name_map = {
9001 'carriage_return' : 'cr',
9006 def __init__(self, out=sys.stdout, quiet=False):
9007 object.__setattr__(self, "out", out)
9008 object.__setattr__(self, "quiet", quiet)
9009 object.__setattr__(self, "maxval", 0)
9010 object.__setattr__(self, "merges", 0)
9011 object.__setattr__(self, "_changed", False)
9012 object.__setattr__(self, "_displayed", False)
9013 object.__setattr__(self, "_last_display_time", 0)
9014 object.__setattr__(self, "width", 80)
9017 isatty = hasattr(out, "isatty") and out.isatty()
9018 object.__setattr__(self, "_isatty", isatty)
9019 if not isatty or not self._init_term():
9021 for k, capname in self._termcap_name_map.iteritems():
9022 term_codes[k] = self._default_term_codes[capname]
9023 object.__setattr__(self, "_term_codes", term_codes)
9025 def _init_term(self):
9027 Initialize term control codes.
9029 @returns: True if term codes were successfully initialized,
9033 term_type = os.environ.get("TERM", "vt100")
9039 curses.setupterm(term_type, self.out.fileno())
9040 tigetstr = curses.tigetstr
9041 except curses.error:
9046 if tigetstr is None:
9050 for k, capname in self._termcap_name_map.iteritems():
9051 code = tigetstr(capname)
9053 code = self._default_term_codes[capname]
9054 term_codes[k] = code
9055 object.__setattr__(self, "_term_codes", term_codes)
9058 def _format_msg(self, msg):
9059 return ">>> %s" % msg
9063 self._term_codes['carriage_return'] + \
9064 self._term_codes['clr_eol'])
9066 self._displayed = False
9068 def _display(self, line):
9069 self.out.write(line)
9071 self._displayed = True
9073 def _update(self, msg):
9076 if not self._isatty:
9077 out.write(self._format_msg(msg) + self._term_codes['newline'])
9079 self._displayed = True
9085 self._display(self._format_msg(msg))
9087 def displayMessage(self, msg):
9089 was_displayed = self._displayed
9091 if self._isatty and self._displayed:
9094 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9096 self._displayed = False
9099 self._changed = True
9105 for name in self._bound_properties:
9106 object.__setattr__(self, name, 0)
9109 self.out.write(self._term_codes['newline'])
9111 self._displayed = False
9113 def __setattr__(self, name, value):
9114 old_value = getattr(self, name)
9115 if value == old_value:
9117 object.__setattr__(self, name, value)
9118 if name in self._bound_properties:
9119 self._property_change(name, old_value, value)
9121 def _property_change(self, name, old_value, new_value):
9122 self._changed = True
9125 def _load_avg_str(self):
9127 avg = os.getloadavg()
9128 except (AttributeError, OSError), e:
9140 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9144 Display status on stdout, but only if something has
9145 changed since the last call.
9151 current_time = time.time()
9152 time_delta = current_time - self._last_display_time
9153 if self._displayed and \
9155 if not self._isatty:
9157 if time_delta < self._min_display_latency:
9160 self._last_display_time = current_time
9161 self._changed = False
9162 self._display_status()
9164 def _display_status(self):
9165 # Don't use len(self._completed_tasks) here since that also
9166 # can include uninstall tasks.
9167 curval_str = str(self.curval)
9168 maxval_str = str(self.maxval)
9169 running_str = str(self.running)
9170 failed_str = str(self.failed)
9171 load_avg_str = self._load_avg_str()
9173 color_output = StringIO.StringIO()
9174 plain_output = StringIO.StringIO()
9175 style_file = portage.output.ConsoleStyleFile(color_output)
9176 style_file.write_listener = plain_output
9177 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9178 style_writer.style_listener = style_file.new_styles
9179 f = formatter.AbstractFormatter(style_writer)
9181 number_style = "INFORM"
9182 f.add_literal_data("Jobs: ")
9183 f.push_style(number_style)
9184 f.add_literal_data(curval_str)
9186 f.add_literal_data(" of ")
9187 f.push_style(number_style)
9188 f.add_literal_data(maxval_str)
9190 f.add_literal_data(" complete")
9193 f.add_literal_data(", ")
9194 f.push_style(number_style)
9195 f.add_literal_data(running_str)
9197 f.add_literal_data(" running")
9200 f.add_literal_data(", ")
9201 f.push_style(number_style)
9202 f.add_literal_data(failed_str)
9204 f.add_literal_data(" failed")
9206 padding = self._jobs_column_width - len(plain_output.getvalue())
9208 f.add_literal_data(padding * " ")
9210 f.add_literal_data("Load avg: ")
9211 f.add_literal_data(load_avg_str)
9213 # Truncate to fit width, to avoid making the terminal scroll if the
9214 # line overflows (happens when the load average is large).
9215 plain_output = plain_output.getvalue()
9216 if self._isatty and len(plain_output) > self.width:
9217 # Use plain_output here since it's easier to truncate
9218 # properly than the color output which contains console
9220 self._update(plain_output[:self.width])
9222 self._update(color_output.getvalue())
9224 xtermTitle(" ".join(plain_output.split()))
9226 class Scheduler(PollScheduler):
9228 _opts_ignore_blockers = \
9229 frozenset(["--buildpkgonly",
9230 "--fetchonly", "--fetch-all-uri",
9231 "--nodeps", "--pretend"])
9233 _opts_no_background = \
9234 frozenset(["--pretend",
9235 "--fetchonly", "--fetch-all-uri"])
9237 _opts_no_restart = frozenset(["--buildpkgonly",
9238 "--fetchonly", "--fetch-all-uri", "--pretend"])
9240 _bad_resume_opts = set(["--ask", "--changelog",
9241 "--resume", "--skipfirst"])
9243 _fetch_log = "/var/log/emerge-fetch.log"
9245 class _iface_class(SlotObject):
9246 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9247 "dblinkElog", "fetch", "register", "schedule",
9248 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9251 class _fetch_iface_class(SlotObject):
9252 __slots__ = ("log_file", "schedule")
9254 _task_queues_class = slot_dict_class(
9255 ("merge", "jobs", "fetch", "unpack"), prefix="")
9257 class _build_opts_class(SlotObject):
9258 __slots__ = ("buildpkg", "buildpkgonly",
9259 "fetch_all_uri", "fetchonly", "pretend")
9261 class _binpkg_opts_class(SlotObject):
9262 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9264 class _pkg_count_class(SlotObject):
9265 __slots__ = ("curval", "maxval")
9267 class _emerge_log_class(SlotObject):
9268 __slots__ = ("xterm_titles",)
9270 def log(self, *pargs, **kwargs):
9271 if not self.xterm_titles:
9272 # Avoid interference with the scheduler's status display.
9273 kwargs.pop("short_msg", None)
9274 emergelog(self.xterm_titles, *pargs, **kwargs)
9276 class _failed_pkg(SlotObject):
9277 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9279 class _ConfigPool(object):
9280 """Interface for a task to temporarily allocate a config
9281 instance from a pool. This allows a task to be constructed
9282 long before the config instance actually becomes needed, like
9283 when prefetchers are constructed for the whole merge list."""
9284 __slots__ = ("_root", "_allocate", "_deallocate")
9285 def __init__(self, root, allocate, deallocate):
9287 self._allocate = allocate
9288 self._deallocate = deallocate
9290 return self._allocate(self._root)
9291 def deallocate(self, settings):
9292 self._deallocate(settings)
9294 class _unknown_internal_error(portage.exception.PortageException):
9296 Used internally to terminate scheduling. The specific reason for
9297 the failure should have been dumped to stderr.
9299 def __init__(self, value=""):
9300 portage.exception.PortageException.__init__(self, value)
9302 def __init__(self, settings, trees, mtimedb, myopts,
9303 spinner, mergelist, favorites, digraph):
9304 PollScheduler.__init__(self)
9305 self.settings = settings
9306 self.target_root = settings["ROOT"]
9308 self.myopts = myopts
9309 self._spinner = spinner
9310 self._mtimedb = mtimedb
9311 self._mergelist = mergelist
9312 self._favorites = favorites
9313 self._args_set = InternalPackageSet(favorites)
9314 self._build_opts = self._build_opts_class()
9315 for k in self._build_opts.__slots__:
9316 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9317 self._binpkg_opts = self._binpkg_opts_class()
9318 for k in self._binpkg_opts.__slots__:
9319 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9322 self._logger = self._emerge_log_class()
9323 self._task_queues = self._task_queues_class()
9324 for k in self._task_queues.allowed_keys:
9325 setattr(self._task_queues, k,
9326 SequentialTaskQueue())
9327 self._status_display = JobStatusDisplay()
9328 self._max_load = myopts.get("--load-average")
9329 max_jobs = myopts.get("--jobs")
9330 if max_jobs is None:
9332 self._set_max_jobs(max_jobs)
9334 # The root where the currently running
9335 # portage instance is installed.
9336 self._running_root = trees["/"]["root_config"]
9338 if settings.get("PORTAGE_DEBUG", "") == "1":
9340 self.pkgsettings = {}
9341 self._config_pool = {}
9342 self._blocker_db = {}
9344 self._config_pool[root] = []
9345 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9347 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9348 schedule=self._schedule_fetch)
9349 self._sched_iface = self._iface_class(
9350 dblinkEbuildPhase=self._dblink_ebuild_phase,
9351 dblinkDisplayMerge=self._dblink_display_merge,
9352 dblinkElog=self._dblink_elog,
9353 fetch=fetch_iface, register=self._register,
9354 schedule=self._schedule_wait,
9355 scheduleSetup=self._schedule_setup,
9356 scheduleUnpack=self._schedule_unpack,
9357 scheduleYield=self._schedule_yield,
9358 unregister=self._unregister)
9360 self._prefetchers = weakref.WeakValueDictionary()
9361 self._pkg_queue = []
9362 self._completed_tasks = set()
9364 self._failed_pkgs = []
9365 self._failed_pkgs_all = []
9366 self._failed_pkgs_die_msgs = []
9367 self._post_mod_echo_msgs = []
9368 self._parallel_fetch = False
9369 merge_count = len([x for x in mergelist \
9370 if isinstance(x, Package) and x.operation == "merge"])
9371 self._pkg_count = self._pkg_count_class(
9372 curval=0, maxval=merge_count)
9373 self._status_display.maxval = self._pkg_count.maxval
9375 # The load average takes some time to respond when new
9376 # jobs are added, so we need to limit the rate of adding
9378 self._job_delay_max = 10
9379 self._job_delay_factor = 1.0
9380 self._job_delay_exp = 1.5
9381 self._previous_job_start_time = None
9383 self._set_digraph(digraph)
9385 # This is used to memoize the _choose_pkg() result when
9386 # no packages can be chosen until one of the existing
9388 self._choose_pkg_return_early = False
9390 features = self.settings.features
9391 if "parallel-fetch" in features and \
9392 not ("--pretend" in self.myopts or \
9393 "--fetch-all-uri" in self.myopts or \
9394 "--fetchonly" in self.myopts):
9395 if "distlocks" not in features:
9396 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9397 portage.writemsg(red("!!!")+" parallel-fetching " + \
9398 "requires the distlocks feature enabled"+"\n",
9400 portage.writemsg(red("!!!")+" you have it disabled, " + \
9401 "thus parallel-fetching is being disabled"+"\n",
9403 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9404 elif len(mergelist) > 1:
9405 self._parallel_fetch = True
9407 if self._parallel_fetch:
9408 # clear out existing fetch log if it exists
9410 open(self._fetch_log, 'w')
9411 except EnvironmentError:
9414 self._running_portage = None
9415 portage_match = self._running_root.trees["vartree"].dbapi.match(
9416 portage.const.PORTAGE_PACKAGE_ATOM)
9418 cpv = portage_match.pop()
9419 self._running_portage = self._pkg(cpv, "installed",
9420 self._running_root, installed=True)
9422 def _poll(self, timeout=None):
9424 PollScheduler._poll(self, timeout=timeout)
9426 def _set_max_jobs(self, max_jobs):
9427 self._max_jobs = max_jobs
9428 self._task_queues.jobs.max_jobs = max_jobs
9430 def _background_mode(self):
9432 Check if background mode is enabled and adjust states as necessary.
9435 @returns: True if background mode is enabled, False otherwise.
9437 background = (self._max_jobs is True or \
9438 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9439 not bool(self._opts_no_background.intersection(self.myopts))
9442 interactive_tasks = self._get_interactive_tasks()
9443 if interactive_tasks:
9445 writemsg_level(">>> Sending package output to stdio due " + \
9446 "to interactive package(s):\n",
9447 level=logging.INFO, noiselevel=-1)
9449 for pkg in interactive_tasks:
9450 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9452 pkg_str += " for " + pkg.root
9455 writemsg_level("".join("%s\n" % (l,) for l in msg),
9456 level=logging.INFO, noiselevel=-1)
9457 if self._max_jobs is True or self._max_jobs > 1:
9458 self._set_max_jobs(1)
9459 writemsg_level(">>> Setting --jobs=1 due " + \
9460 "to the above interactive package(s)\n",
9461 level=logging.INFO, noiselevel=-1)
9463 self._status_display.quiet = \
9465 ("--quiet" in self.myopts and \
9466 "--verbose" not in self.myopts)
9468 self._logger.xterm_titles = \
9469 "notitles" not in self.settings.features and \
9470 self._status_display.quiet
9474 def _get_interactive_tasks(self):
9475 from portage import flatten
9476 from portage.dep import use_reduce, paren_reduce
9477 interactive_tasks = []
9478 for task in self._mergelist:
9479 if not (isinstance(task, Package) and \
9480 task.operation == "merge"):
9483 properties = flatten(use_reduce(paren_reduce(
9484 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9485 except portage.exception.InvalidDependString, e:
9486 show_invalid_depstring_notice(task,
9487 task.metadata["PROPERTIES"], str(e))
9488 raise self._unknown_internal_error()
9489 if "interactive" in properties:
9490 interactive_tasks.append(task)
9491 return interactive_tasks
9493 def _set_digraph(self, digraph):
9494 if "--nodeps" in self.myopts or \
9495 (self._max_jobs is not True and self._max_jobs < 2):
9497 self._digraph = None
9500 self._digraph = digraph
9501 self._prune_digraph()
9503 def _prune_digraph(self):
9505 Prune any root nodes that are irrelevant.
9508 graph = self._digraph
9509 completed_tasks = self._completed_tasks
9510 removed_nodes = set()
9512 for node in graph.root_nodes():
9513 if not isinstance(node, Package) or \
9514 (node.installed and node.operation == "nomerge") or \
9516 node in completed_tasks:
9517 removed_nodes.add(node)
9519 graph.difference_update(removed_nodes)
9520 if not removed_nodes:
9522 removed_nodes.clear()
9524 class _pkg_failure(portage.exception.PortageException):
9526 An instance of this class is raised by unmerge() when
9527 an uninstallation fails.
9530 def __init__(self, *pargs):
9531 portage.exception.PortageException.__init__(self, pargs)
9533 self.status = pargs[0]
9535 def _schedule_fetch(self, fetcher):
9537 Schedule a fetcher on the fetch queue, in order to
9538 serialize access to the fetch log.
9540 self._task_queues.fetch.addFront(fetcher)
9542 def _schedule_setup(self, setup_phase):
9544 Schedule a setup phase on the merge queue, in order to
9545 serialize unsandboxed access to the live filesystem.
9547 self._task_queues.merge.addFront(setup_phase)
9550 def _schedule_unpack(self, unpack_phase):
9552 Schedule an unpack phase on the unpack queue, in order
9553 to serialize $DISTDIR access for live ebuilds.
9555 self._task_queues.unpack.add(unpack_phase)
9557 def _find_blockers(self, new_pkg):
9559 Returns a callable which should be called only when
9560 the vdb lock has been acquired.
9563 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9566 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9567 if self._opts_ignore_blockers.intersection(self.myopts):
9570 # Call gc.collect() here to avoid heap overflow that
9571 # triggers 'Cannot allocate memory' errors (reported
9576 blocker_db = self._blocker_db[new_pkg.root]
9578 blocker_dblinks = []
9579 for blocking_pkg in blocker_db.findInstalledBlockers(
9580 new_pkg, acquire_lock=acquire_lock):
9581 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9583 if new_pkg.cpv == blocking_pkg.cpv:
9585 blocker_dblinks.append(portage.dblink(
9586 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9587 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9588 vartree=self.trees[blocking_pkg.root]["vartree"]))
9592 return blocker_dblinks
9594 def _dblink_pkg(self, pkg_dblink):
9595 cpv = pkg_dblink.mycpv
9596 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9597 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9598 installed = type_name == "installed"
9599 return self._pkg(cpv, type_name, root_config, installed=installed)
9601 def _append_to_log_path(self, log_path, msg):
9602 f = open(log_path, 'a')
9608 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9610 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9613 background = self._background
9615 if background and log_path is not None:
9616 log_file = open(log_path, 'a')
9621 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9623 if log_file is not None:
9626 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9627 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9628 background = self._background
9630 if log_path is None:
9631 if not (background and level < logging.WARN):
9632 portage.util.writemsg_level(msg,
9633 level=level, noiselevel=noiselevel)
9636 portage.util.writemsg_level(msg,
9637 level=level, noiselevel=noiselevel)
9638 self._append_to_log_path(log_path, msg)
9640 def _dblink_ebuild_phase(self,
9641 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9643 Using this callback for merge phases allows the scheduler
9644 to run while these phases execute asynchronously, and allows
9645 the scheduler control output handling.
9648 scheduler = self._sched_iface
9649 settings = pkg_dblink.settings
9650 pkg = self._dblink_pkg(pkg_dblink)
9651 background = self._background
9652 log_path = settings.get("PORTAGE_LOG_FILE")
9654 ebuild_phase = EbuildPhase(background=background,
9655 pkg=pkg, phase=phase, scheduler=scheduler,
9656 settings=settings, tree=pkg_dblink.treetype)
9657 ebuild_phase.start()
9660 return ebuild_phase.returncode
9662 def _check_manifests(self):
9663 # Verify all the manifests now so that the user is notified of failure
9664 # as soon as possible.
9665 if "strict" not in self.settings.features or \
9666 "--fetchonly" in self.myopts or \
9667 "--fetch-all-uri" in self.myopts:
9670 shown_verifying_msg = False
9672 for myroot, pkgsettings in self.pkgsettings.iteritems():
9673 quiet_config = portage.config(clone=pkgsettings)
9674 quiet_config["PORTAGE_QUIET"] = "1"
9675 quiet_config.backup_changes("PORTAGE_QUIET")
9676 quiet_settings[myroot] = quiet_config
9679 for x in self._mergelist:
9680 if not isinstance(x, Package) or \
9681 x.type_name != "ebuild":
9684 if not shown_verifying_msg:
9685 shown_verifying_msg = True
9686 self._status_msg("Verifying ebuild manifests")
9688 root_config = x.root_config
9689 portdb = root_config.trees["porttree"].dbapi
9690 quiet_config = quiet_settings[root_config.root]
9691 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9692 if not portage.digestcheck([], quiet_config, strict=True):
9697 def _add_prefetchers(self):
9699 if not self._parallel_fetch:
9702 if self._parallel_fetch:
9703 self._status_msg("Starting parallel fetch")
9705 prefetchers = self._prefetchers
9706 getbinpkg = "--getbinpkg" in self.myopts
9708 # In order to avoid "waiting for lock" messages
9709 # at the beginning, which annoy users, never
9710 # spawn a prefetcher for the first package.
9711 for pkg in self._mergelist[1:]:
9712 prefetcher = self._create_prefetcher(pkg)
9713 if prefetcher is not None:
9714 self._task_queues.fetch.add(prefetcher)
9715 prefetchers[pkg] = prefetcher
9717 def _create_prefetcher(self, pkg):
9719 @return: a prefetcher, or None if not applicable
9723 if not isinstance(pkg, Package):
9726 elif pkg.type_name == "ebuild":
9728 prefetcher = EbuildFetcher(background=True,
9729 config_pool=self._ConfigPool(pkg.root,
9730 self._allocate_config, self._deallocate_config),
9731 fetchonly=1, logfile=self._fetch_log,
9732 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9734 elif pkg.type_name == "binary" and \
9735 "--getbinpkg" in self.myopts and \
9736 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9738 prefetcher = BinpkgFetcher(background=True,
9739 logfile=self._fetch_log, pkg=pkg,
9740 scheduler=self._sched_iface)
9744 def _is_restart_scheduled(self):
9746 Check if the merge list contains a replacement
9747 for the current running instance, that will result
9748 in restart after merge.
9750 @returns: True if a restart is scheduled, False otherwise.
9752 if self._opts_no_restart.intersection(self.myopts):
9755 mergelist = self._mergelist
9757 for i, pkg in enumerate(mergelist):
9758 if self._is_restart_necessary(pkg) and \
9759 i != len(mergelist) - 1:
9764 def _is_restart_necessary(self, pkg):
9766 @return: True if merging the given package
9767 requires restart, False otherwise.
9770 # Figure out if we need a restart.
9771 if pkg.root == self._running_root.root and \
9772 portage.match_from_list(
9773 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9774 if self._running_portage:
9775 return cmp(pkg, self._running_portage) != 0
9779 def _restart_if_necessary(self, pkg):
9781 Use execv() to restart emerge. This happens
9782 if portage upgrades itself and there are
9783 remaining packages in the list.
9786 if self._opts_no_restart.intersection(self.myopts):
9789 if not self._is_restart_necessary(pkg):
9792 if pkg == self._mergelist[-1]:
9795 self._main_loop_cleanup()
9797 logger = self._logger
9798 pkg_count = self._pkg_count
9799 mtimedb = self._mtimedb
9800 bad_resume_opts = self._bad_resume_opts
9802 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9803 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9805 logger.log(" *** RESTARTING " + \
9806 "emerge via exec() after change of " + \
9809 mtimedb["resume"]["mergelist"].remove(list(pkg))
9811 portage.run_exitfuncs()
9812 mynewargv = [sys.argv[0], "--resume"]
9813 resume_opts = self.myopts.copy()
9814 # For automatic resume, we need to prevent
9815 # any of bad_resume_opts from leaking in
9816 # via EMERGE_DEFAULT_OPTS.
9817 resume_opts["--ignore-default-opts"] = True
9818 for myopt, myarg in resume_opts.iteritems():
9819 if myopt not in bad_resume_opts:
9821 mynewargv.append(myopt)
9823 mynewargv.append(myopt +"="+ str(myarg))
9824 # priority only needs to be adjusted on the first run
9825 os.environ["PORTAGE_NICENESS"] = "0"
9826 os.execv(mynewargv[0], mynewargv)
9830 if "--resume" in self.myopts:
9832 portage.writemsg_stdout(
9833 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9834 self._logger.log(" *** Resuming merge...")
9836 self._save_resume_list()
9839 self._background = self._background_mode()
9840 except self._unknown_internal_error:
9843 for root in self.trees:
9844 root_config = self.trees[root]["root_config"]
9846 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
9847 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
9848 # for ensuring sane $PWD (bug #239560) and storing elog messages.
9849 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
9850 if not tmpdir or not os.path.isdir(tmpdir):
9851 msg = "The directory specified in your " + \
9852 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
9853 "does not exist. Please create this " + \
9854 "directory or correct your PORTAGE_TMPDIR setting."
9855 msg = textwrap.wrap(msg, 70)
9856 out = portage.output.EOutput()
9861 if self._background:
9862 root_config.settings.unlock()
9863 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9864 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9865 root_config.settings.lock()
9867 self.pkgsettings[root] = portage.config(
9868 clone=root_config.settings)
9870 rval = self._check_manifests()
9871 if rval != os.EX_OK:
9874 keep_going = "--keep-going" in self.myopts
9875 fetchonly = self._build_opts.fetchonly
9876 mtimedb = self._mtimedb
9877 failed_pkgs = self._failed_pkgs
9880 rval = self._merge()
9881 if rval == os.EX_OK or fetchonly or not keep_going:
9883 if "resume" not in mtimedb:
9885 mergelist = self._mtimedb["resume"].get("mergelist")
9892 for failed_pkg in failed_pkgs:
9893 mergelist.remove(list(failed_pkg.pkg))
9895 self._failed_pkgs_all.extend(failed_pkgs)
9901 if not self._calc_resume_list():
9904 clear_caches(self.trees)
9905 if not self._mergelist:
9908 self._save_resume_list()
9909 self._pkg_count.curval = 0
9910 self._pkg_count.maxval = len([x for x in self._mergelist \
9911 if isinstance(x, Package) and x.operation == "merge"])
9912 self._status_display.maxval = self._pkg_count.maxval
9914 self._logger.log(" *** Finished. Cleaning up...")
9917 self._failed_pkgs_all.extend(failed_pkgs)
9920 background = self._background
9921 failure_log_shown = False
9922 if background and len(self._failed_pkgs_all) == 1:
9923 # If only one package failed then just show it's
9924 # whole log for easy viewing.
9925 failed_pkg = self._failed_pkgs_all[-1]
9926 build_dir = failed_pkg.build_dir
9929 log_paths = [failed_pkg.build_log]
9931 log_path = self._locate_failure_log(failed_pkg)
9932 if log_path is not None:
9934 log_file = open(log_path, 'rb')
9938 if log_file is not None:
9940 for line in log_file:
9941 writemsg_level(line, noiselevel=-1)
9944 failure_log_shown = True
9946 # Dump mod_echo output now since it tends to flood the terminal.
9947 # This allows us to avoid having more important output, generated
9948 # later, from being swept away by the mod_echo output.
9949 mod_echo_output = _flush_elog_mod_echo()
9951 if background and not failure_log_shown and \
9952 self._failed_pkgs_all and \
9953 self._failed_pkgs_die_msgs and \
9954 not mod_echo_output:
9956 printer = portage.output.EOutput()
9957 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9959 if mysettings["ROOT"] != "/":
9960 root_msg = " merged to %s" % mysettings["ROOT"]
9962 printer.einfo("Error messages for package %s%s:" % \
9963 (colorize("INFORM", key), root_msg))
9965 for phase in portage.const.EBUILD_PHASES:
9966 if phase not in logentries:
9968 for msgtype, msgcontent in logentries[phase]:
9969 if isinstance(msgcontent, basestring):
9970 msgcontent = [msgcontent]
9971 for line in msgcontent:
9972 printer.eerror(line.strip("\n"))
9974 if self._post_mod_echo_msgs:
9975 for msg in self._post_mod_echo_msgs:
9978 if len(self._failed_pkgs_all) > 1:
9979 msg = "The following packages have " + \
9980 "failed to build or install:"
9982 writemsg(prefix + "\n", noiselevel=-1)
9983 from textwrap import wrap
9984 for line in wrap(msg, 72):
9985 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
9986 writemsg(prefix + "\n", noiselevel=-1)
9987 for failed_pkg in self._failed_pkgs_all:
9988 writemsg("%s\t%s\n" % (prefix,
9989 colorize("INFORM", str(failed_pkg.pkg))),
9991 writemsg(prefix + "\n", noiselevel=-1)
9995 def _elog_listener(self, mysettings, key, logentries, fulltext):
9996 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
9998 self._failed_pkgs_die_msgs.append(
9999 (mysettings, key, errors))
10001 def _locate_failure_log(self, failed_pkg):
10003 build_dir = failed_pkg.build_dir
10006 log_paths = [failed_pkg.build_log]
10008 for log_path in log_paths:
10013 log_size = os.stat(log_path).st_size
10024 def _add_packages(self):
10025 pkg_queue = self._pkg_queue
10026 for pkg in self._mergelist:
10027 if isinstance(pkg, Package):
10028 pkg_queue.append(pkg)
10029 elif isinstance(pkg, Blocker):
10032 def _merge_exit(self, merge):
10033 self._do_merge_exit(merge)
10034 self._deallocate_config(merge.merge.settings)
10035 if merge.returncode == os.EX_OK and \
10036 not merge.merge.pkg.installed:
10037 self._status_display.curval += 1
10038 self._status_display.merges = len(self._task_queues.merge)
10041 def _do_merge_exit(self, merge):
10042 pkg = merge.merge.pkg
10043 if merge.returncode != os.EX_OK:
10044 settings = merge.merge.settings
10045 build_dir = settings.get("PORTAGE_BUILDDIR")
10046 build_log = settings.get("PORTAGE_LOG_FILE")
10048 self._failed_pkgs.append(self._failed_pkg(
10049 build_dir=build_dir, build_log=build_log,
10051 returncode=merge.returncode))
10052 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10054 self._status_display.failed = len(self._failed_pkgs)
10057 self._task_complete(pkg)
10058 pkg_to_replace = merge.merge.pkg_to_replace
10059 if pkg_to_replace is not None:
10060 # When a package is replaced, mark it's uninstall
10061 # task complete (if any).
10062 uninst_hash_key = \
10063 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10064 self._task_complete(uninst_hash_key)
10069 self._restart_if_necessary(pkg)
10071 # Call mtimedb.commit() after each merge so that
10072 # --resume still works after being interrupted
10073 # by reboot, sigkill or similar.
10074 mtimedb = self._mtimedb
10075 mtimedb["resume"]["mergelist"].remove(list(pkg))
10076 if not mtimedb["resume"]["mergelist"]:
10077 del mtimedb["resume"]
10080 def _build_exit(self, build):
10081 if build.returncode == os.EX_OK:
10083 merge = PackageMerge(merge=build)
10084 merge.addExitListener(self._merge_exit)
10085 self._task_queues.merge.add(merge)
10086 self._status_display.merges = len(self._task_queues.merge)
10088 settings = build.settings
10089 build_dir = settings.get("PORTAGE_BUILDDIR")
10090 build_log = settings.get("PORTAGE_LOG_FILE")
10092 self._failed_pkgs.append(self._failed_pkg(
10093 build_dir=build_dir, build_log=build_log,
10095 returncode=build.returncode))
10096 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10098 self._status_display.failed = len(self._failed_pkgs)
10099 self._deallocate_config(build.settings)
10101 self._status_display.running = self._jobs
10104 def _extract_exit(self, build):
10105 self._build_exit(build)
10107 def _task_complete(self, pkg):
10108 self._completed_tasks.add(pkg)
10109 self._choose_pkg_return_early = False
10113 self._add_prefetchers()
10114 self._add_packages()
10115 pkg_queue = self._pkg_queue
10116 failed_pkgs = self._failed_pkgs
10117 portage.locks._quiet = self._background
10118 portage.elog._emerge_elog_listener = self._elog_listener
10124 self._main_loop_cleanup()
10125 portage.locks._quiet = False
10126 portage.elog._emerge_elog_listener = None
10128 rval = failed_pkgs[-1].returncode
10132 def _main_loop_cleanup(self):
10133 del self._pkg_queue[:]
10134 self._completed_tasks.clear()
10135 self._choose_pkg_return_early = False
10136 self._status_display.reset()
10137 self._digraph = None
10138 self._task_queues.fetch.clear()
10140 def _choose_pkg(self):
10142 Choose a task that has all it's dependencies satisfied.
10145 if self._choose_pkg_return_early:
10148 if self._digraph is None:
10149 if (self._jobs or self._task_queues.merge) and \
10150 not ("--nodeps" in self.myopts and \
10151 (self._max_jobs is True or self._max_jobs > 1)):
10152 self._choose_pkg_return_early = True
10154 return self._pkg_queue.pop(0)
10156 if not (self._jobs or self._task_queues.merge):
10157 return self._pkg_queue.pop(0)
10159 self._prune_digraph()
10162 later = set(self._pkg_queue)
10163 for pkg in self._pkg_queue:
10165 if not self._dependent_on_scheduled_merges(pkg, later):
10169 if chosen_pkg is not None:
10170 self._pkg_queue.remove(chosen_pkg)
10172 if chosen_pkg is None:
10173 # There's no point in searching for a package to
10174 # choose until at least one of the existing jobs
10176 self._choose_pkg_return_early = True
10180 def _dependent_on_scheduled_merges(self, pkg, later):
10182 Traverse the subgraph of the given packages deep dependencies
10183 to see if it contains any scheduled merges.
10184 @param pkg: a package to check dependencies for
10186 @param later: packages for which dependence should be ignored
10187 since they will be merged later than pkg anyway and therefore
10188 delaying the merge of pkg will not result in a more optimal
10192 @returns: True if the package is dependent, False otherwise.
10195 graph = self._digraph
10196 completed_tasks = self._completed_tasks
10199 traversed_nodes = set([pkg])
10200 direct_deps = graph.child_nodes(pkg)
10201 node_stack = direct_deps
10202 direct_deps = frozenset(direct_deps)
10204 node = node_stack.pop()
10205 if node in traversed_nodes:
10207 traversed_nodes.add(node)
10208 if not ((node.installed and node.operation == "nomerge") or \
10209 (node.operation == "uninstall" and \
10210 node not in direct_deps) or \
10211 node in completed_tasks or \
10215 node_stack.extend(graph.child_nodes(node))
10219 def _allocate_config(self, root):
10221 Allocate a unique config instance for a task in order
10222 to prevent interference between parallel tasks.
10224 if self._config_pool[root]:
10225 temp_settings = self._config_pool[root].pop()
10227 temp_settings = portage.config(clone=self.pkgsettings[root])
10228 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10229 # performance reasons, call it here to make sure all settings from the
10230 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10231 temp_settings.reload()
10232 temp_settings.reset()
10233 return temp_settings
10235 def _deallocate_config(self, settings):
10236 self._config_pool[settings["ROOT"]].append(settings)
10238 def _main_loop(self):
10240 # Only allow 1 job max if a restart is scheduled
10241 # due to portage update.
10242 if self._is_restart_scheduled() or \
10243 self._opts_no_background.intersection(self.myopts):
10244 self._set_max_jobs(1)
10246 merge_queue = self._task_queues.merge
10248 while self._schedule():
10249 if self._poll_event_handlers:
10254 if not (self._jobs or merge_queue):
10256 if self._poll_event_handlers:
10259 def _keep_scheduling(self):
10260 return bool(self._pkg_queue and \
10261 not (self._failed_pkgs and not self._build_opts.fetchonly))
10263 def _schedule_tasks(self):
10264 self._schedule_tasks_imp()
10265 self._status_display.display()
10268 for q in self._task_queues.values():
10272 # Cancel prefetchers if they're the only reason
10273 # the main poll loop is still running.
10274 if self._failed_pkgs and not self._build_opts.fetchonly and \
10275 not (self._jobs or self._task_queues.merge) and \
10276 self._task_queues.fetch:
10277 self._task_queues.fetch.clear()
10281 self._schedule_tasks_imp()
10282 self._status_display.display()
10284 return self._keep_scheduling()
10286 def _job_delay(self):
10289 @returns: True if job scheduling should be delayed, False otherwise.
10292 if self._jobs and self._max_load is not None:
10294 current_time = time.time()
10296 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10297 if delay > self._job_delay_max:
10298 delay = self._job_delay_max
10299 if (current_time - self._previous_job_start_time) < delay:
10304 def _schedule_tasks_imp(self):
10307 @returns: True if state changed, False otherwise.
10314 if not self._keep_scheduling():
10315 return bool(state_change)
10317 if self._choose_pkg_return_early or \
10318 not self._can_add_job() or \
10320 return bool(state_change)
10322 pkg = self._choose_pkg()
10324 return bool(state_change)
10328 if not pkg.installed:
10329 self._pkg_count.curval += 1
10331 task = self._task(pkg)
10334 merge = PackageMerge(merge=task)
10335 merge.addExitListener(self._merge_exit)
10336 self._task_queues.merge.add(merge)
10340 self._previous_job_start_time = time.time()
10341 self._status_display.running = self._jobs
10342 task.addExitListener(self._extract_exit)
10343 self._task_queues.jobs.add(task)
10347 self._previous_job_start_time = time.time()
10348 self._status_display.running = self._jobs
10349 task.addExitListener(self._build_exit)
10350 self._task_queues.jobs.add(task)
10352 return bool(state_change)
10354 def _task(self, pkg):
10356 pkg_to_replace = None
10357 if pkg.operation != "uninstall":
10358 vardb = pkg.root_config.trees["vartree"].dbapi
10359 previous_cpv = vardb.match(pkg.slot_atom)
10361 previous_cpv = previous_cpv.pop()
10362 pkg_to_replace = self._pkg(previous_cpv,
10363 "installed", pkg.root_config, installed=True)
10365 task = MergeListItem(args_set=self._args_set,
10366 background=self._background, binpkg_opts=self._binpkg_opts,
10367 build_opts=self._build_opts,
10368 config_pool=self._ConfigPool(pkg.root,
10369 self._allocate_config, self._deallocate_config),
10370 emerge_opts=self.myopts,
10371 find_blockers=self._find_blockers(pkg), logger=self._logger,
10372 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10373 pkg_to_replace=pkg_to_replace,
10374 prefetcher=self._prefetchers.get(pkg),
10375 scheduler=self._sched_iface,
10376 settings=self._allocate_config(pkg.root),
10377 statusMessage=self._status_msg,
10378 world_atom=self._world_atom)
10382 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10383 pkg = failed_pkg.pkg
10384 msg = "%s to %s %s" % \
10385 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10386 if pkg.root != "/":
10387 msg += " %s %s" % (preposition, pkg.root)
10389 log_path = self._locate_failure_log(failed_pkg)
10390 if log_path is not None:
10391 msg += ", Log file:"
10392 self._status_msg(msg)
10394 if log_path is not None:
10395 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10397 def _status_msg(self, msg):
10399 Display a brief status message (no newlines) in the status display.
10400 This is called by tasks to provide feedback to the user. This
10401 delegates the resposibility of generating \r and \n control characters,
10402 to guarantee that lines are created or erased when necessary and
10406 @param msg: a brief status message (no newlines allowed)
10408 if not self._background:
10409 writemsg_level("\n")
10410 self._status_display.displayMessage(msg)
10412 def _save_resume_list(self):
10414 Do this before verifying the ebuild Manifests since it might
10415 be possible for the user to use --resume --skipfirst get past
10416 a non-essential package with a broken digest.
10418 mtimedb = self._mtimedb
10419 mtimedb["resume"]["mergelist"] = [list(x) \
10420 for x in self._mergelist \
10421 if isinstance(x, Package) and x.operation == "merge"]
10425 def _calc_resume_list(self):
10427 Use the current resume list to calculate a new one,
10428 dropping any packages with unsatisfied deps.
10430 @returns: True if successful, False otherwise.
10432 print colorize("GOOD", "*** Resuming merge...")
10434 if self._show_list():
10435 if "--tree" in self.myopts:
10436 portage.writemsg_stdout("\n" + \
10437 darkgreen("These are the packages that " + \
10438 "would be merged, in reverse order:\n\n"))
10441 portage.writemsg_stdout("\n" + \
10442 darkgreen("These are the packages that " + \
10443 "would be merged, in order:\n\n"))
10445 show_spinner = "--quiet" not in self.myopts and \
10446 "--nodeps" not in self.myopts
10449 print "Calculating dependencies ",
10451 myparams = create_depgraph_params(self.myopts, None)
10455 success, mydepgraph, dropped_tasks = resume_depgraph(
10456 self.settings, self.trees, self._mtimedb, self.myopts,
10457 myparams, self._spinner, skip_unsatisfied=True)
10458 except depgraph.UnsatisfiedResumeDep, e:
10459 mydepgraph = e.depgraph
10460 dropped_tasks = set()
10463 print "\b\b... done!"
10466 def unsatisfied_resume_dep_msg():
10467 mydepgraph.display_problems()
10468 out = portage.output.EOutput()
10469 out.eerror("One or more packages are either masked or " + \
10470 "have missing dependencies:")
10473 show_parents = set()
10474 for dep in e.value:
10475 if dep.parent in show_parents:
10477 show_parents.add(dep.parent)
10478 if dep.atom is None:
10479 out.eerror(indent + "Masked package:")
10480 out.eerror(2 * indent + str(dep.parent))
10483 out.eerror(indent + str(dep.atom) + " pulled in by:")
10484 out.eerror(2 * indent + str(dep.parent))
10486 msg = "The resume list contains packages " + \
10487 "that are either masked or have " + \
10488 "unsatisfied dependencies. " + \
10489 "Please restart/continue " + \
10490 "the operation manually, or use --skipfirst " + \
10491 "to skip the first package in the list and " + \
10492 "any other packages that may be " + \
10493 "masked or have missing dependencies."
10494 for line in textwrap.wrap(msg, 72):
10496 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10499 if success and self._show_list():
10500 mylist = mydepgraph.altlist()
10502 if "--tree" in self.myopts:
10504 mydepgraph.display(mylist, favorites=self._favorites)
10507 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10509 mydepgraph.display_problems()
10511 mylist = mydepgraph.altlist()
10512 mydepgraph.break_refs(mylist)
10513 mydepgraph.break_refs(dropped_tasks)
10514 self._mergelist = mylist
10515 self._set_digraph(mydepgraph.schedulerGraph())
10518 for task in dropped_tasks:
10519 if not (isinstance(task, Package) and task.operation == "merge"):
10522 msg = "emerge --keep-going:" + \
10524 if pkg.root != "/":
10525 msg += " for %s" % (pkg.root,)
10526 msg += " dropped due to unsatisfied dependency."
10527 for line in textwrap.wrap(msg, msg_width):
10528 eerror(line, phase="other", key=pkg.cpv)
10529 settings = self.pkgsettings[pkg.root]
10530 # Ensure that log collection from $T is disabled inside
10531 # elog_process(), since any logs that might exist are
10533 settings.pop("T", None)
10534 portage.elog.elog_process(pkg.cpv, settings)
10535 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10539 def _show_list(self):
10540 myopts = self.myopts
10541 if "--quiet" not in myopts and \
10542 ("--ask" in myopts or "--tree" in myopts or \
10543 "--verbose" in myopts):
10547 def _world_atom(self, pkg):
10549 Add the package to the world file, but only if
10550 it's supposed to be added. Otherwise, do nothing.
10553 if set(("--buildpkgonly", "--fetchonly",
10555 "--oneshot", "--onlydeps",
10556 "--pretend")).intersection(self.myopts):
10559 if pkg.root != self.target_root:
10562 args_set = self._args_set
10563 if not args_set.findAtomForPackage(pkg):
10566 logger = self._logger
10567 pkg_count = self._pkg_count
10568 root_config = pkg.root_config
10569 world_set = root_config.sets["world"]
10570 world_locked = False
10571 if hasattr(world_set, "lock"):
10573 world_locked = True
10576 if hasattr(world_set, "load"):
10577 world_set.load() # maybe it's changed on disk
10579 atom = create_world_atom(pkg, args_set, root_config)
10581 if hasattr(world_set, "add"):
10582 self._status_msg(('Recording %s in "world" ' + \
10583 'favorites file...') % atom)
10584 logger.log(" === (%s of %s) Updating world file (%s)" % \
10585 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10586 world_set.add(atom)
10588 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10589 (atom,), level=logging.WARN, noiselevel=-1)
10594 def _pkg(self, cpv, type_name, root_config, installed=False):
10596 Get a package instance from the cache, or create a new
10597 one if necessary. Raises KeyError from aux_get if it
10598 failures for some reason (package does not exist or is
10601 operation = "merge"
10603 operation = "nomerge"
10605 if self._digraph is not None:
10606 # Reuse existing instance when available.
10607 pkg = self._digraph.get(
10608 (type_name, root_config.root, cpv, operation))
10609 if pkg is not None:
10612 tree_type = depgraph.pkg_tree_map[type_name]
10613 db = root_config.trees[tree_type].dbapi
10614 db_keys = list(self.trees[root_config.root][
10615 tree_type].dbapi._aux_cache_keys)
10616 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10617 pkg = Package(cpv=cpv, metadata=metadata,
10618 root_config=root_config, installed=installed)
10619 if type_name == "ebuild":
10620 settings = self.pkgsettings[root_config.root]
10621 settings.setcpv(pkg)
10622 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10626 class MetadataRegen(PollScheduler):
10628 def __init__(self, portdb, max_jobs=None, max_load=None):
10629 PollScheduler.__init__(self)
10630 self._portdb = portdb
10632 if max_jobs is None:
10635 self._max_jobs = max_jobs
10636 self._max_load = max_load
10637 self._sched_iface = self._sched_iface_class(
10638 register=self._register,
10639 schedule=self._schedule_wait,
10640 unregister=self._unregister)
10642 self._valid_pkgs = set()
10643 self._process_iter = self._iter_metadata_processes()
10645 def _iter_metadata_processes(self):
10646 portdb = self._portdb
10647 valid_pkgs = self._valid_pkgs
10648 every_cp = portdb.cp_all()
10649 every_cp.sort(reverse=True)
10652 cp = every_cp.pop()
10653 portage.writemsg_stdout("Processing %s\n" % cp)
10654 cpv_list = portdb.cp_list(cp)
10655 for cpv in cpv_list:
10656 valid_pkgs.add(cpv)
10657 ebuild_path, repo_path = portdb.findname2(cpv)
10658 metadata_process = portdb._metadata_process(
10659 cpv, ebuild_path, repo_path)
10660 if metadata_process is None:
10662 yield metadata_process
10666 portdb = self._portdb
10667 from portage.cache.cache_errors import CacheError
10670 for mytree in portdb.porttrees:
10672 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10673 except CacheError, e:
10674 portage.writemsg("Error listing cache entries for " + \
10675 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10680 while self._schedule():
10687 for y in self._valid_pkgs:
10688 for mytree in portdb.porttrees:
10689 if portdb.findname2(y, mytree=mytree)[0]:
10690 dead_nodes[mytree].discard(y)
10692 for mytree, nodes in dead_nodes.iteritems():
10693 auxdb = portdb.auxdb[mytree]
10697 except (KeyError, CacheError):
10700 def _schedule_tasks(self):
10703 @returns: True if there may be remaining tasks to schedule,
10706 while self._can_add_job():
10708 metadata_process = self._process_iter.next()
10709 except StopIteration:
10713 metadata_process.scheduler = self._sched_iface
10714 metadata_process.addExitListener(self._metadata_exit)
10715 metadata_process.start()
10718 def _metadata_exit(self, metadata_process):
10720 if metadata_process.returncode != os.EX_OK:
10721 self._valid_pkgs.discard(metadata_process.cpv)
10722 portage.writemsg("Error processing %s, continuing...\n" % \
10723 (metadata_process.cpv,))
10726 class UninstallFailure(portage.exception.PortageException):
10728 An instance of this class is raised by unmerge() when
10729 an uninstallation fails.
10732 def __init__(self, *pargs):
10733 portage.exception.PortageException.__init__(self, pargs)
10735 self.status = pargs[0]
10737 def unmerge(root_config, myopts, unmerge_action,
10738 unmerge_files, ldpath_mtimes, autoclean=0,
10739 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10740 scheduler=None, writemsg_level=portage.util.writemsg_level):
10742 quiet = "--quiet" in myopts
10743 settings = root_config.settings
10744 sets = root_config.sets
10745 vartree = root_config.trees["vartree"]
10746 candidate_catpkgs=[]
10748 xterm_titles = "notitles" not in settings.features
10749 out = portage.output.EOutput()
10751 db_keys = list(vartree.dbapi._aux_cache_keys)
10754 pkg = pkg_cache.get(cpv)
10756 pkg = Package(cpv=cpv, installed=True,
10757 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10758 root_config=root_config,
10759 type_name="installed")
10760 pkg_cache[cpv] = pkg
10763 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10765 # At least the parent needs to exist for the lock file.
10766 portage.util.ensure_dirs(vdb_path)
10767 except portage.exception.PortageException:
10771 if os.access(vdb_path, os.W_OK):
10772 vdb_lock = portage.locks.lockdir(vdb_path)
10773 realsyslist = sets["system"].getAtoms()
10775 for x in realsyslist:
10776 mycp = portage.dep_getkey(x)
10777 if mycp in settings.getvirtuals():
10779 for provider in settings.getvirtuals()[mycp]:
10780 if vartree.dbapi.match(provider):
10781 providers.append(provider)
10782 if len(providers) == 1:
10783 syslist.extend(providers)
10785 syslist.append(mycp)
10787 mysettings = portage.config(clone=settings)
10789 if not unmerge_files:
10790 if unmerge_action == "unmerge":
10792 print bold("emerge unmerge") + " can only be used with specific package names"
10798 localtree = vartree
10799 # process all arguments and add all
10800 # valid db entries to candidate_catpkgs
10802 if not unmerge_files:
10803 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10805 #we've got command-line arguments
10806 if not unmerge_files:
10807 print "\nNo packages to unmerge have been provided.\n"
10809 for x in unmerge_files:
10810 arg_parts = x.split('/')
10811 if x[0] not in [".","/"] and \
10812 arg_parts[-1][-7:] != ".ebuild":
10813 #possible cat/pkg or dep; treat as such
10814 candidate_catpkgs.append(x)
10815 elif unmerge_action in ["prune","clean"]:
10816 print "\n!!! Prune and clean do not accept individual" + \
10817 " ebuilds as arguments;\n skipping.\n"
10820 # it appears that the user is specifying an installed
10821 # ebuild and we're in "unmerge" mode, so it's ok.
10822 if not os.path.exists(x):
10823 print "\n!!! The path '"+x+"' doesn't exist.\n"
10826 absx = os.path.abspath(x)
10827 sp_absx = absx.split("/")
10828 if sp_absx[-1][-7:] == ".ebuild":
10830 absx = "/".join(sp_absx)
10832 sp_absx_len = len(sp_absx)
10834 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10835 vdb_len = len(vdb_path)
10837 sp_vdb = vdb_path.split("/")
10838 sp_vdb_len = len(sp_vdb)
10840 if not os.path.exists(absx+"/CONTENTS"):
10841 print "!!! Not a valid db dir: "+str(absx)
10844 if sp_absx_len <= sp_vdb_len:
10845 # The Path is shorter... so it can't be inside the vdb.
10848 print "\n!!!",x,"cannot be inside "+ \
10849 vdb_path+"; aborting.\n"
10852 for idx in range(0,sp_vdb_len):
10853 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10856 print "\n!!!", x, "is not inside "+\
10857 vdb_path+"; aborting.\n"
10860 print "="+"/".join(sp_absx[sp_vdb_len:])
10861 candidate_catpkgs.append(
10862 "="+"/".join(sp_absx[sp_vdb_len:]))
10865 if (not "--quiet" in myopts):
10867 if settings["ROOT"] != "/":
10868 writemsg_level(darkgreen(newline+ \
10869 ">>> Using system located in ROOT tree %s\n" % \
10872 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10873 not ("--quiet" in myopts):
10874 writemsg_level(darkgreen(newline+\
10875 ">>> These are the packages that would be unmerged:\n"))
10877 # Preservation of order is required for --depclean and --prune so
10878 # that dependencies are respected. Use all_selected to eliminate
10879 # duplicate packages since the same package may be selected by
10882 all_selected = set()
10883 for x in candidate_catpkgs:
10884 # cycle through all our candidate deps and determine
10885 # what will and will not get unmerged
10887 mymatch = vartree.dbapi.match(x)
10888 except portage.exception.AmbiguousPackageName, errpkgs:
10889 print "\n\n!!! The short ebuild name \"" + \
10890 x + "\" is ambiguous. Please specify"
10891 print "!!! one of the following fully-qualified " + \
10892 "ebuild names instead:\n"
10893 for i in errpkgs[0]:
10894 print " " + green(i)
10898 if not mymatch and x[0] not in "<>=~":
10899 mymatch = localtree.dep_match(x)
10901 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10902 (x, unmerge_action), noiselevel=-1)
10906 {"protected": set(), "selected": set(), "omitted": set()})
10907 mykey = len(pkgmap) - 1
10908 if unmerge_action=="unmerge":
10910 if y not in all_selected:
10911 pkgmap[mykey]["selected"].add(y)
10912 all_selected.add(y)
10913 elif unmerge_action == "prune":
10914 if len(mymatch) == 1:
10916 best_version = mymatch[0]
10917 best_slot = vartree.getslot(best_version)
10918 best_counter = vartree.dbapi.cpv_counter(best_version)
10919 for mypkg in mymatch[1:]:
10920 myslot = vartree.getslot(mypkg)
10921 mycounter = vartree.dbapi.cpv_counter(mypkg)
10922 if (myslot == best_slot and mycounter > best_counter) or \
10923 mypkg == portage.best([mypkg, best_version]):
10924 if myslot == best_slot:
10925 if mycounter < best_counter:
10926 # On slot collision, keep the one with the
10927 # highest counter since it is the most
10928 # recently installed.
10930 best_version = mypkg
10932 best_counter = mycounter
10933 pkgmap[mykey]["protected"].add(best_version)
10934 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10935 if mypkg != best_version and mypkg not in all_selected)
10936 all_selected.update(pkgmap[mykey]["selected"])
10938 # unmerge_action == "clean"
10940 for mypkg in mymatch:
10941 if unmerge_action == "clean":
10942 myslot = localtree.getslot(mypkg)
10944 # since we're pruning, we don't care about slots
10945 # and put all the pkgs in together
10947 if myslot not in slotmap:
10948 slotmap[myslot] = {}
10949 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10951 for myslot in slotmap:
10952 counterkeys = slotmap[myslot].keys()
10953 if not counterkeys:
10956 pkgmap[mykey]["protected"].add(
10957 slotmap[myslot][counterkeys[-1]])
10958 del counterkeys[-1]
10959 #be pretty and get them in order of merge:
10960 for ckey in counterkeys:
10961 mypkg = slotmap[myslot][ckey]
10962 if mypkg not in all_selected:
10963 pkgmap[mykey]["selected"].add(mypkg)
10964 all_selected.add(mypkg)
10965 # ok, now the last-merged package
10966 # is protected, and the rest are selected
10967 numselected = len(all_selected)
10968 if global_unmerge and not numselected:
10969 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10972 if not numselected:
10973 portage.writemsg_stdout(
10974 "\n>>> No packages selected for removal by " + \
10975 unmerge_action + "\n")
10979 vartree.dbapi.flush_cache()
10980 portage.locks.unlockdir(vdb_lock)
10982 for cp in xrange(len(pkgmap)):
10983 for cpv in pkgmap[cp]["selected"].copy():
10987 # It could have been uninstalled
10988 # by a concurrent process.
10991 if unmerge_action != "clean" and \
10992 root_config.root == "/" and \
10993 portage.match_from_list(
10994 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10995 msg = ("Not unmerging package %s since there is no valid " + \
10996 "reason for portage to unmerge itself.") % (pkg.cpv,)
10997 for line in textwrap.wrap(msg, 75):
10999 # adjust pkgmap so the display output is correct
11000 pkgmap[cp]["selected"].remove(cpv)
11001 all_selected.remove(cpv)
11002 pkgmap[cp]["protected"].add(cpv)
11005 numselected = len(all_selected)
11006 if not numselected:
11008 "\n>>> No packages selected for removal by " + \
11009 unmerge_action + "\n")
11012 # Unmerge order only matters in some cases
11016 selected = d["selected"]
11019 cp = portage.cpv_getkey(iter(selected).next())
11020 cp_dict = unordered.get(cp)
11021 if cp_dict is None:
11023 unordered[cp] = cp_dict
11026 for k, v in d.iteritems():
11027 cp_dict[k].update(v)
11028 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11030 for x in xrange(len(pkgmap)):
11031 selected = pkgmap[x]["selected"]
11034 for mytype, mylist in pkgmap[x].iteritems():
11035 if mytype == "selected":
11037 mylist.difference_update(all_selected)
11038 cp = portage.cpv_getkey(iter(selected).next())
11039 for y in localtree.dep_match(cp):
11040 if y not in pkgmap[x]["omitted"] and \
11041 y not in pkgmap[x]["selected"] and \
11042 y not in pkgmap[x]["protected"] and \
11043 y not in all_selected:
11044 pkgmap[x]["omitted"].add(y)
11045 if global_unmerge and not pkgmap[x]["selected"]:
11046 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11048 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11049 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11050 "'%s' is part of your system profile.\n" % cp),
11051 level=logging.WARNING, noiselevel=-1)
11052 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11053 "be damaging to your system.\n\n"),
11054 level=logging.WARNING, noiselevel=-1)
11055 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11056 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11057 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11059 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11061 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11062 for mytype in ["selected","protected","omitted"]:
11064 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11065 if pkgmap[x][mytype]:
11066 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11067 sorted_pkgs.sort(portage.pkgcmp)
11068 for pn, ver, rev in sorted_pkgs:
11072 myversion = ver + "-" + rev
11073 if mytype == "selected":
11075 colorize("UNMERGE_WARN", myversion + " "),
11079 colorize("GOOD", myversion + " "), noiselevel=-1)
11081 writemsg_level("none ", noiselevel=-1)
11083 writemsg_level("\n", noiselevel=-1)
11085 writemsg_level("\n", noiselevel=-1)
11087 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11088 " packages are slated for removal.\n")
11089 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11090 " and " + colorize("GOOD", "'omitted'") + \
11091 " packages will not be removed.\n\n")
11093 if "--pretend" in myopts:
11094 #we're done... return
11096 if "--ask" in myopts:
11097 if userquery("Would you like to unmerge these packages?")=="No":
11098 # enter pretend mode for correct formatting of results
11099 myopts["--pretend"] = True
11104 #the real unmerging begins, after a short delay....
11105 if clean_delay and not autoclean:
11106 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11108 for x in xrange(len(pkgmap)):
11109 for y in pkgmap[x]["selected"]:
11110 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11111 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11112 mysplit = y.split("/")
11114 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11115 mysettings, unmerge_action not in ["clean","prune"],
11116 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11117 scheduler=scheduler)
11119 if retval != os.EX_OK:
11120 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11122 raise UninstallFailure(retval)
11125 if clean_world and hasattr(sets["world"], "cleanPackage"):
11126 sets["world"].cleanPackage(vartree.dbapi, y)
11127 emergelog(xterm_titles, " >>> unmerge success: "+y)
11128 if clean_world and hasattr(sets["world"], "remove"):
11129 for s in root_config.setconfig.active:
11130 sets["world"].remove(SETPREFIX+s)
11133 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11135 if os.path.exists("/usr/bin/install-info"):
11136 out = portage.output.EOutput()
11141 inforoot=normpath(root+z)
11142 if os.path.isdir(inforoot):
11143 infomtime = long(os.stat(inforoot).st_mtime)
11144 if inforoot not in prev_mtimes or \
11145 prev_mtimes[inforoot] != infomtime:
11146 regen_infodirs.append(inforoot)
11148 if not regen_infodirs:
11149 portage.writemsg_stdout("\n")
11150 out.einfo("GNU info directory index is up-to-date.")
11152 portage.writemsg_stdout("\n")
11153 out.einfo("Regenerating GNU info directory index...")
11155 dir_extensions = ("", ".gz", ".bz2")
11159 for inforoot in regen_infodirs:
11163 if not os.path.isdir(inforoot) or \
11164 not os.access(inforoot, os.W_OK):
11167 file_list = os.listdir(inforoot)
11169 dir_file = os.path.join(inforoot, "dir")
11170 moved_old_dir = False
11171 processed_count = 0
11172 for x in file_list:
11173 if x.startswith(".") or \
11174 os.path.isdir(os.path.join(inforoot, x)):
11176 if x.startswith("dir"):
11178 for ext in dir_extensions:
11179 if x == "dir" + ext or \
11180 x == "dir" + ext + ".old":
11185 if processed_count == 0:
11186 for ext in dir_extensions:
11188 os.rename(dir_file + ext, dir_file + ext + ".old")
11189 moved_old_dir = True
11190 except EnvironmentError, e:
11191 if e.errno != errno.ENOENT:
11194 processed_count += 1
11195 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11196 existsstr="already exists, for file `"
11198 if re.search(existsstr,myso):
11199 # Already exists... Don't increment the count for this.
11201 elif myso[:44]=="install-info: warning: no info dir entry in ":
11202 # This info file doesn't contain a DIR-header: install-info produces this
11203 # (harmless) warning (the --quiet switch doesn't seem to work).
11204 # Don't increment the count for this.
11207 badcount=badcount+1
11208 errmsg += myso + "\n"
11211 if moved_old_dir and not os.path.exists(dir_file):
11212 # We didn't generate a new dir file, so put the old file
11213 # back where it was originally found.
11214 for ext in dir_extensions:
11216 os.rename(dir_file + ext + ".old", dir_file + ext)
11217 except EnvironmentError, e:
11218 if e.errno != errno.ENOENT:
11222 # Clean dir.old cruft so that they don't prevent
11223 # unmerge of otherwise empty directories.
11224 for ext in dir_extensions:
11226 os.unlink(dir_file + ext + ".old")
11227 except EnvironmentError, e:
11228 if e.errno != errno.ENOENT:
11232 #update mtime so we can potentially avoid regenerating.
11233 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11236 out.eerror("Processed %d info files; %d errors." % \
11237 (icount, badcount))
11238 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11241 out.einfo("Processed %d info files." % (icount,))
11244 def display_news_notification(root_config, myopts):
11245 target_root = root_config.root
11246 trees = root_config.trees
11247 settings = trees["vartree"].settings
11248 portdb = trees["porttree"].dbapi
11249 vardb = trees["vartree"].dbapi
11250 NEWS_PATH = os.path.join("metadata", "news")
11251 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11252 newsReaderDisplay = False
11253 update = "--pretend" not in myopts
11255 for repo in portdb.getRepositories():
11256 unreadItems = checkUpdatedNewsItems(
11257 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11259 if not newsReaderDisplay:
11260 newsReaderDisplay = True
11262 print colorize("WARN", " * IMPORTANT:"),
11263 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11266 if newsReaderDisplay:
11267 print colorize("WARN", " *"),
11268 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11271 def _flush_elog_mod_echo():
11273 Dump the mod_echo output now so that our other
11274 notifications are shown last.
11276 @returns: True if messages were shown, False otherwise.
11278 messages_shown = False
11280 from portage.elog import mod_echo
11281 except ImportError:
11282 pass # happens during downgrade to a version without the module
11284 messages_shown = bool(mod_echo._items)
11285 mod_echo.finalize()
11286 return messages_shown
11288 def post_emerge(root_config, myopts, mtimedb, retval):
11290 Misc. things to run at the end of a merge session.
11293 Update Config Files
11296 Display preserved libs warnings
11299 @param trees: A dictionary mapping each ROOT to it's package databases
11301 @param mtimedb: The mtimeDB to store data needed across merge invocations
11302 @type mtimedb: MtimeDB class instance
11303 @param retval: Emerge's return value
11307 1. Calls sys.exit(retval)
11310 target_root = root_config.root
11311 trees = { target_root : root_config.trees }
11312 vardbapi = trees[target_root]["vartree"].dbapi
11313 settings = vardbapi.settings
11314 info_mtimes = mtimedb["info"]
11316 # Load the most current variables from ${ROOT}/etc/profile.env
11319 settings.regenerate()
11322 config_protect = settings.get("CONFIG_PROTECT","").split()
11323 infodirs = settings.get("INFOPATH","").split(":") + \
11324 settings.get("INFODIR","").split(":")
11328 if retval == os.EX_OK:
11329 exit_msg = " *** exiting successfully."
11331 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11332 emergelog("notitles" not in settings.features, exit_msg)
11334 _flush_elog_mod_echo()
11336 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11337 if counter_hash is not None and \
11338 counter_hash == vardbapi._counter_hash():
11339 # If vdb state has not changed then there's nothing else to do.
11342 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11343 portage.util.ensure_dirs(vdb_path)
11345 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11346 vdb_lock = portage.locks.lockdir(vdb_path)
11350 if "noinfo" not in settings.features:
11351 chk_updated_info_files(target_root,
11352 infodirs, info_mtimes, retval)
11356 portage.locks.unlockdir(vdb_lock)
11358 chk_updated_cfg_files(target_root, config_protect)
11360 display_news_notification(root_config, myopts)
11365 def chk_updated_cfg_files(target_root, config_protect):
11367 #number of directories with some protect files in them
11369 for x in config_protect:
11370 x = os.path.join(target_root, x.lstrip(os.path.sep))
11371 if not os.access(x, os.W_OK):
11372 # Avoid Permission denied errors generated
11376 mymode = os.lstat(x).st_mode
11379 if stat.S_ISLNK(mymode):
11380 # We want to treat it like a directory if it
11381 # is a symlink to an existing directory.
11383 real_mode = os.stat(x).st_mode
11384 if stat.S_ISDIR(real_mode):
11388 if stat.S_ISDIR(mymode):
11389 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11391 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11392 os.path.split(x.rstrip(os.path.sep))
11393 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11394 a = commands.getstatusoutput(mycommand)
11396 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11398 # Show the error message alone, sending stdout to /dev/null.
11399 os.system(mycommand + " 1>/dev/null")
11401 files = a[1].split('\0')
11402 # split always produces an empty string as the last element
11403 if files and not files[-1]:
11407 print "\n"+colorize("WARN", " * IMPORTANT:"),
11408 if stat.S_ISDIR(mymode):
11409 print "%d config files in '%s' need updating." % \
11412 print "config file '%s' needs updating." % x
11415 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11416 " section of the " + bold("emerge")
11417 print " "+yellow("*")+" man page to learn how to update config files."
11419 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11422 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11423 Returns the number of unread (yet relevent) items.
11425 @param portdb: a portage tree database
11426 @type portdb: pordbapi
11427 @param vardb: an installed package database
11428 @type vardb: vardbapi
11431 @param UNREAD_PATH:
11437 1. The number of unread but relevant news items.
11440 from portage.news import NewsManager
11441 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11442 return manager.getUnreadItems( repo_id, update=update )
11444 def insert_category_into_atom(atom, category):
11445 alphanum = re.search(r'\w', atom)
11447 ret = atom[:alphanum.start()] + "%s/" % category + \
11448 atom[alphanum.start():]
11453 def is_valid_package_atom(x):
11455 alphanum = re.search(r'\w', x)
11457 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11458 return portage.isvalidatom(x)
11460 def show_blocker_docs_link():
11462 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11463 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11465 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11468 def show_mask_docs():
11469 print "For more information, see the MASKED PACKAGES section in the emerge"
11470 print "man page or refer to the Gentoo Handbook."
11472 def action_sync(settings, trees, mtimedb, myopts, myaction):
11473 xterm_titles = "notitles" not in settings.features
11474 emergelog(xterm_titles, " === sync")
11475 myportdir = settings.get("PORTDIR", None)
11476 out = portage.output.EOutput()
11478 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11480 if myportdir[-1]=="/":
11481 myportdir=myportdir[:-1]
11482 if not os.path.exists(myportdir):
11483 print ">>>",myportdir,"not found, creating it."
11484 os.makedirs(myportdir,0755)
11485 syncuri = settings.get("SYNC", "").strip()
11487 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11488 noiselevel=-1, level=logging.ERROR)
11492 updatecache_flg = False
11493 if myaction == "metadata":
11494 print "skipping sync"
11495 updatecache_flg = True
11496 elif syncuri[:8]=="rsync://":
11497 if not os.path.exists("/usr/bin/rsync"):
11498 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11499 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11504 import shlex, StringIO
11505 if settings["PORTAGE_RSYNC_OPTS"] == "":
11506 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11507 rsync_opts.extend([
11508 "--recursive", # Recurse directories
11509 "--links", # Consider symlinks
11510 "--safe-links", # Ignore links outside of tree
11511 "--perms", # Preserve permissions
11512 "--times", # Preserive mod times
11513 "--compress", # Compress the data transmitted
11514 "--force", # Force deletion on non-empty dirs
11515 "--whole-file", # Don't do block transfers, only entire files
11516 "--delete", # Delete files that aren't in the master tree
11517 "--stats", # Show final statistics about what was transfered
11518 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11519 "--exclude=/distfiles", # Exclude distfiles from consideration
11520 "--exclude=/local", # Exclude local from consideration
11521 "--exclude=/packages", # Exclude packages from consideration
11525 # The below validation is not needed when using the above hardcoded
11528 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11529 lexer = shlex.shlex(StringIO.StringIO(
11530 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11531 lexer.whitespace_split = True
11532 rsync_opts.extend(lexer)
11535 for opt in ("--recursive", "--times"):
11536 if opt not in rsync_opts:
11537 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11538 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11539 rsync_opts.append(opt)
11541 for exclude in ("distfiles", "local", "packages"):
11542 opt = "--exclude=/%s" % exclude
11543 if opt not in rsync_opts:
11544 portage.writemsg(yellow("WARNING:") + \
11545 " adding required option %s not included in " % opt + \
11546 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11547 rsync_opts.append(opt)
11549 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11550 def rsync_opt_startswith(opt_prefix):
11551 for x in rsync_opts:
11552 if x.startswith(opt_prefix):
11556 if not rsync_opt_startswith("--timeout="):
11557 rsync_opts.append("--timeout=%d" % mytimeout)
11559 for opt in ("--compress", "--whole-file"):
11560 if opt not in rsync_opts:
11561 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11562 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11563 rsync_opts.append(opt)
11565 if "--quiet" in myopts:
11566 rsync_opts.append("--quiet") # Shut up a lot
11568 rsync_opts.append("--verbose") # Print filelist
11570 if "--verbose" in myopts:
11571 rsync_opts.append("--progress") # Progress meter for each file
11573 if "--debug" in myopts:
11574 rsync_opts.append("--checksum") # Force checksum on all files
11576 # Real local timestamp file.
11577 servertimestampfile = os.path.join(
11578 myportdir, "metadata", "timestamp.chk")
11580 content = portage.util.grabfile(servertimestampfile)
11584 mytimestamp = time.mktime(time.strptime(content[0],
11585 "%a, %d %b %Y %H:%M:%S +0000"))
11586 except (OverflowError, ValueError):
11591 rsync_initial_timeout = \
11592 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11594 rsync_initial_timeout = 15
11597 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11598 except SystemExit, e:
11599 raise # Needed else can't exit
11601 maxretries=3 #default number of retries
11604 user_name, hostname, port = re.split(
11605 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11608 if user_name is None:
11610 updatecache_flg=True
11611 all_rsync_opts = set(rsync_opts)
11612 lexer = shlex.shlex(StringIO.StringIO(
11613 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11614 lexer.whitespace_split = True
11615 extra_rsync_opts = list(lexer)
11617 all_rsync_opts.update(extra_rsync_opts)
11618 family = socket.AF_INET
11619 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11620 family = socket.AF_INET
11621 elif socket.has_ipv6 and \
11622 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11623 family = socket.AF_INET6
11625 SERVER_OUT_OF_DATE = -1
11626 EXCEEDED_MAX_RETRIES = -2
11632 for addrinfo in socket.getaddrinfo(
11633 hostname, None, family, socket.SOCK_STREAM):
11634 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11635 # IPv6 addresses need to be enclosed in square brackets
11636 ips.append("[%s]" % addrinfo[4][0])
11638 ips.append(addrinfo[4][0])
11639 from random import shuffle
11641 except SystemExit, e:
11642 raise # Needed else can't exit
11643 except Exception, e:
11644 print "Notice:",str(e)
11649 dosyncuri = syncuri.replace(
11650 "//" + user_name + hostname + port + "/",
11651 "//" + user_name + ips[0] + port + "/", 1)
11652 except SystemExit, e:
11653 raise # Needed else can't exit
11654 except Exception, e:
11655 print "Notice:",str(e)
11659 if "--ask" in myopts:
11660 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11665 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11666 if "--quiet" not in myopts:
11667 print ">>> Starting rsync with "+dosyncuri+"..."
11669 emergelog(xterm_titles,
11670 ">>> Starting retry %d of %d with %s" % \
11671 (retries,maxretries,dosyncuri))
11672 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11674 if mytimestamp != 0 and "--quiet" not in myopts:
11675 print ">>> Checking server timestamp ..."
11677 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11679 if "--debug" in myopts:
11682 exitcode = os.EX_OK
11683 servertimestamp = 0
11684 # Even if there's no timestamp available locally, fetch the
11685 # timestamp anyway as an initial probe to verify that the server is
11686 # responsive. This protects us from hanging indefinitely on a
11687 # connection attempt to an unresponsive server which rsync's
11688 # --timeout option does not prevent.
11690 # Temporary file for remote server timestamp comparison.
11691 from tempfile import mkstemp
11692 fd, tmpservertimestampfile = mkstemp()
11694 mycommand = rsynccommand[:]
11695 mycommand.append(dosyncuri.rstrip("/") + \
11696 "/metadata/timestamp.chk")
11697 mycommand.append(tmpservertimestampfile)
11701 def timeout_handler(signum, frame):
11702 raise portage.exception.PortageException("timed out")
11703 signal.signal(signal.SIGALRM, timeout_handler)
11704 # Timeout here in case the server is unresponsive. The
11705 # --timeout rsync option doesn't apply to the initial
11706 # connection attempt.
11707 if rsync_initial_timeout:
11708 signal.alarm(rsync_initial_timeout)
11710 mypids.extend(portage.process.spawn(
11711 mycommand, env=settings.environ(), returnpid=True))
11712 exitcode = os.waitpid(mypids[0], 0)[1]
11713 content = portage.grabfile(tmpservertimestampfile)
11715 if rsync_initial_timeout:
11718 os.unlink(tmpservertimestampfile)
11721 except portage.exception.PortageException, e:
11725 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11726 os.kill(mypids[0], signal.SIGTERM)
11727 os.waitpid(mypids[0], 0)
11728 # This is the same code rsync uses for timeout.
11731 if exitcode != os.EX_OK:
11732 if exitcode & 0xff:
11733 exitcode = (exitcode & 0xff) << 8
11735 exitcode = exitcode >> 8
11737 portage.process.spawned_pids.remove(mypids[0])
11740 servertimestamp = time.mktime(time.strptime(
11741 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11742 except (OverflowError, ValueError):
11744 del mycommand, mypids, content
11745 if exitcode == os.EX_OK:
11746 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11747 emergelog(xterm_titles,
11748 ">>> Cancelling sync -- Already current.")
11751 print ">>> Timestamps on the server and in the local repository are the same."
11752 print ">>> Cancelling all further sync action. You are already up to date."
11754 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11758 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11759 emergelog(xterm_titles,
11760 ">>> Server out of date: %s" % dosyncuri)
11763 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11765 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11768 exitcode = SERVER_OUT_OF_DATE
11769 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11771 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11772 exitcode = portage.process.spawn(mycommand,
11773 env=settings.environ())
11774 if exitcode in [0,1,3,4,11,14,20,21]:
11776 elif exitcode in [1,3,4,11,14,20,21]:
11779 # Code 2 indicates protocol incompatibility, which is expected
11780 # for servers with protocol < 29 that don't support
11781 # --prune-empty-directories. Retry for a server that supports
11782 # at least rsync protocol version 29 (>=rsync-2.6.4).
11787 if retries<=maxretries:
11788 print ">>> Retrying..."
11793 updatecache_flg=False
11794 exitcode = EXCEEDED_MAX_RETRIES
11798 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11799 elif exitcode == SERVER_OUT_OF_DATE:
11801 elif exitcode == EXCEEDED_MAX_RETRIES:
11803 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11808 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11809 msg.append("that your SYNC statement is proper.")
11810 msg.append("SYNC=" + settings["SYNC"])
11812 msg.append("Rsync has reported that there is a File IO error. Normally")
11813 msg.append("this means your disk is full, but can be caused by corruption")
11814 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11815 msg.append("and try again after the problem has been fixed.")
11816 msg.append("PORTDIR=" + settings["PORTDIR"])
11818 msg.append("Rsync was killed before it finished.")
11820 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11821 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11822 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11823 msg.append("temporary problem unless complications exist with your network")
11824 msg.append("(and possibly your system's filesystem) configuration.")
11828 elif syncuri[:6]=="cvs://":
11829 if not os.path.exists("/usr/bin/cvs"):
11830 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
11831 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
11833 cvsroot=syncuri[6:]
11834 cvsdir=os.path.dirname(myportdir)
11835 if not os.path.exists(myportdir+"/CVS"):
11837 print ">>> Starting initial cvs checkout with "+syncuri+"..."
11838 if os.path.exists(cvsdir+"/gentoo-x86"):
11839 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
11842 os.rmdir(myportdir)
11844 if e.errno != errno.ENOENT:
11846 "!!! existing '%s' directory; exiting.\n" % myportdir)
11849 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
11850 print "!!! cvs checkout error; exiting."
11852 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
11855 print ">>> Starting cvs update with "+syncuri+"..."
11856 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
11857 myportdir, settings, free=1)
11858 if retval != os.EX_OK:
11860 dosyncuri = syncuri
11862 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
11863 noiselevel=-1, level=logging.ERROR)
11866 if updatecache_flg and \
11867 myaction != "metadata" and \
11868 "metadata-transfer" not in settings.features:
11869 updatecache_flg = False
11871 # Reload the whole config from scratch.
11872 settings, trees, mtimedb = load_emerge_config(trees=trees)
11873 root_config = trees[settings["ROOT"]]["root_config"]
11874 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11876 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
11877 action_metadata(settings, portdb, myopts)
11879 if portage._global_updates(trees, mtimedb["updates"]):
11881 # Reload the whole config from scratch.
11882 settings, trees, mtimedb = load_emerge_config(trees=trees)
11883 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11884 root_config = trees[settings["ROOT"]]["root_config"]
11886 mybestpv = portdb.xmatch("bestmatch-visible",
11887 portage.const.PORTAGE_PACKAGE_ATOM)
11888 mypvs = portage.best(
11889 trees[settings["ROOT"]]["vartree"].dbapi.match(
11890 portage.const.PORTAGE_PACKAGE_ATOM))
11892 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
11894 if myaction != "metadata":
11895 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
11896 retval = portage.process.spawn(
11897 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
11898 dosyncuri], env=settings.environ())
11899 if retval != os.EX_OK:
11900 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
11902 if(mybestpv != mypvs) and not "--quiet" in myopts:
11904 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
11905 print red(" * ")+"that you update portage now, before any other packages are updated."
11907 print red(" * ")+"To update portage, run 'emerge portage' now."
11910 display_news_notification(root_config, myopts)
11913 def action_metadata(settings, portdb, myopts):
11914 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
11915 old_umask = os.umask(0002)
11916 cachedir = os.path.normpath(settings.depcachedir)
11917 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
11918 "/lib", "/opt", "/proc", "/root", "/sbin",
11919 "/sys", "/tmp", "/usr", "/var"]:
11920 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
11921 "ROOT DIRECTORY ON YOUR SYSTEM."
11922 print >> sys.stderr, \
11923 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
11925 if not os.path.exists(cachedir):
11928 ec = portage.eclass_cache.cache(portdb.porttree_root)
11929 myportdir = os.path.realpath(settings["PORTDIR"])
11930 cm = settings.load_best_module("portdbapi.metadbmodule")(
11931 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
11933 from portage.cache import util
11935 class percentage_noise_maker(util.quiet_mirroring):
11936 def __init__(self, dbapi):
11938 self.cp_all = dbapi.cp_all()
11939 l = len(self.cp_all)
11940 self.call_update_min = 100000000
11941 self.min_cp_all = l/100.0
11945 def __iter__(self):
11946 for x in self.cp_all:
11948 if self.count > self.min_cp_all:
11949 self.call_update_min = 0
11951 for y in self.dbapi.cp_list(x):
11953 self.call_update_mine = 0
11955 def update(self, *arg):
11956 try: self.pstr = int(self.pstr) + 1
11957 except ValueError: self.pstr = 1
11958 sys.stdout.write("%s%i%%" % \
11959 ("\b" * (len(str(self.pstr))+1), self.pstr))
11961 self.call_update_min = 10000000
11963 def finish(self, *arg):
11964 sys.stdout.write("\b\b\b\b100%\n")
11967 if "--quiet" in myopts:
11968 def quicky_cpv_generator(cp_all_list):
11969 for x in cp_all_list:
11970 for y in portdb.cp_list(x):
11972 source = quicky_cpv_generator(portdb.cp_all())
11973 noise_maker = portage.cache.util.quiet_mirroring()
11975 noise_maker = source = percentage_noise_maker(portdb)
11976 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
11977 eclass_cache=ec, verbose_instance=noise_maker)
11980 os.umask(old_umask)
11982 def action_regen(settings, portdb, max_jobs, max_load):
11983 xterm_titles = "notitles" not in settings.features
11984 emergelog(xterm_titles, " === regen")
11985 #regenerate cache entries
11986 portage.writemsg_stdout("Regenerating cache entries...\n")
11988 os.close(sys.stdin.fileno())
11989 except SystemExit, e:
11990 raise # Needed else can't exit
11995 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
11998 portage.writemsg_stdout("done!\n")
12000 def action_config(settings, trees, myopts, myfiles):
12001 if len(myfiles) != 1:
12002 print red("!!! config can only take a single package atom at this time\n")
12004 if not is_valid_package_atom(myfiles[0]):
12005 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12007 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12008 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12012 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12013 except portage.exception.AmbiguousPackageName, e:
12014 # Multiple matches thrown from cpv_expand
12017 print "No packages found.\n"
12019 elif len(pkgs) > 1:
12020 if "--ask" in myopts:
12022 print "Please select a package to configure:"
12026 options.append(str(idx))
12027 print options[-1]+") "+pkg
12029 options.append("X")
12030 idx = userquery("Selection?", options)
12033 pkg = pkgs[int(idx)-1]
12035 print "The following packages available:"
12038 print "\nPlease use a specific atom or the --ask option."
12044 if "--ask" in myopts:
12045 if userquery("Ready to configure "+pkg+"?") == "No":
12048 print "Configuring pkg..."
12050 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12051 mysettings = portage.config(clone=settings)
12052 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12053 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12054 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12056 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12057 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12058 if retval == os.EX_OK:
12059 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12060 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12063 def action_info(settings, trees, myopts, myfiles):
12064 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12065 settings.profile_path, settings["CHOST"],
12066 trees[settings["ROOT"]]["vartree"].dbapi)
12068 header_title = "System Settings"
12070 print header_width * "="
12071 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12072 print header_width * "="
12073 print "System uname: "+platform.platform(aliased=1)
12075 lastSync = portage.grabfile(os.path.join(
12076 settings["PORTDIR"], "metadata", "timestamp.chk"))
12077 print "Timestamp of tree:",
12083 output=commands.getstatusoutput("distcc --version")
12085 print str(output[1].split("\n",1)[0]),
12086 if "distcc" in settings.features:
12091 output=commands.getstatusoutput("ccache -V")
12093 print str(output[1].split("\n",1)[0]),
12094 if "ccache" in settings.features:
12099 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12100 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12101 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12102 myvars = portage.util.unique_array(myvars)
12106 if portage.isvalidatom(x):
12107 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12108 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12109 pkg_matches.sort(portage.pkgcmp)
12111 for pn, ver, rev in pkg_matches:
12113 pkgs.append(ver + "-" + rev)
12117 pkgs = ", ".join(pkgs)
12118 print "%-20s %s" % (x+":", pkgs)
12120 print "%-20s %s" % (x+":", "[NOT VALID]")
12122 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12124 if "--verbose" in myopts:
12125 myvars=settings.keys()
12127 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12128 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12129 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12130 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12132 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12134 myvars = portage.util.unique_array(myvars)
12140 print '%s="%s"' % (x, settings[x])
12142 use = set(settings["USE"].split())
12143 use_expand = settings["USE_EXPAND"].split()
12145 for varname in use_expand:
12146 flag_prefix = varname.lower() + "_"
12147 for f in list(use):
12148 if f.startswith(flag_prefix):
12152 print 'USE="%s"' % " ".join(use),
12153 for varname in use_expand:
12154 myval = settings.get(varname)
12156 print '%s="%s"' % (varname, myval),
12159 unset_vars.append(x)
12161 print "Unset: "+", ".join(unset_vars)
12164 if "--debug" in myopts:
12165 for x in dir(portage):
12166 module = getattr(portage, x)
12167 if "cvs_id_string" in dir(module):
12168 print "%s: %s" % (str(x), str(module.cvs_id_string))
12170 # See if we can find any packages installed matching the strings
12171 # passed on the command line
12173 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12174 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12176 mypkgs.extend(vardb.match(x))
12178 # If some packages were found...
12180 # Get our global settings (we only print stuff if it varies from
12181 # the current config)
12182 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12183 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12185 pkgsettings = portage.config(clone=settings)
12187 for myvar in mydesiredvars:
12188 global_vals[myvar] = set(settings.get(myvar, "").split())
12190 # Loop through each package
12191 # Only print settings if they differ from global settings
12192 header_title = "Package Settings"
12193 print header_width * "="
12194 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12195 print header_width * "="
12196 from portage.output import EOutput
12199 # Get all package specific variables
12200 auxvalues = vardb.aux_get(pkg, auxkeys)
12202 for i in xrange(len(auxkeys)):
12203 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12205 for myvar in mydesiredvars:
12206 # If the package variable doesn't match the
12207 # current global variable, something has changed
12208 # so set diff_found so we know to print
12209 if valuesmap[myvar] != global_vals[myvar]:
12210 diff_values[myvar] = valuesmap[myvar]
12211 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12212 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12213 pkgsettings.reset()
12214 # If a matching ebuild is no longer available in the tree, maybe it
12215 # would make sense to compare against the flags for the best
12216 # available version with the same slot?
12218 if portdb.cpv_exists(pkg):
12220 pkgsettings.setcpv(pkg, mydb=mydb)
12221 if valuesmap["IUSE"].intersection(
12222 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12223 diff_values["USE"] = valuesmap["USE"]
12224 # If a difference was found, print the info for
12227 # Print package info
12228 print "%s was built with the following:" % pkg
12229 for myvar in mydesiredvars + ["USE"]:
12230 if myvar in diff_values:
12231 mylist = list(diff_values[myvar])
12233 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12235 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12236 ebuildpath = vardb.findname(pkg)
12237 if not ebuildpath or not os.path.exists(ebuildpath):
12238 out.ewarn("No ebuild found for '%s'" % pkg)
12240 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12241 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12242 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12245 def action_search(root_config, myopts, myfiles, spinner):
12247 print "emerge: no search terms provided."
12249 searchinstance = search(root_config,
12250 spinner, "--searchdesc" in myopts,
12251 "--quiet" not in myopts, "--usepkg" in myopts,
12252 "--usepkgonly" in myopts)
12253 for mysearch in myfiles:
12255 searchinstance.execute(mysearch)
12256 except re.error, comment:
12257 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12259 searchinstance.output()
12261 def action_depclean(settings, trees, ldpath_mtimes,
12262 myopts, action, myfiles, spinner):
12263 # Kill packages that aren't explicitly merged or are required as a
12264 # dependency of another package. World file is explicit.
12266 # Global depclean or prune operations are not very safe when there are
12267 # missing dependencies since it's unknown how badly incomplete
12268 # the dependency graph is, and we might accidentally remove packages
12269 # that should have been pulled into the graph. On the other hand, it's
12270 # relatively safe to ignore missing deps when only asked to remove
12271 # specific packages.
12272 allow_missing_deps = len(myfiles) > 0
12275 msg.append("Depclean may break link level dependencies. Thus, it is\n")
12276 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
12277 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
12279 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12280 msg.append("mistakes. Packages that are part of the world set will always\n")
12281 msg.append("be kept. They can be manually added to this set with\n")
12282 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12283 msg.append("package.provided (see portage(5)) will be removed by\n")
12284 msg.append("depclean, even if they are part of the world set.\n")
12286 msg.append("As a safety measure, depclean will not remove any packages\n")
12287 msg.append("unless *all* required dependencies have been resolved. As a\n")
12288 msg.append("consequence, it is often necessary to run %s\n" % \
12289 good("`emerge --update"))
12290 msg.append(good("--newuse --deep world`") + \
12291 " prior to depclean.\n")
12293 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12294 portage.writemsg_stdout("\n")
12296 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12298 xterm_titles = "notitles" not in settings.features
12299 myroot = settings["ROOT"]
12300 root_config = trees[myroot]["root_config"]
12301 getSetAtoms = root_config.setconfig.getSetAtoms
12302 vardb = trees[myroot]["vartree"].dbapi
12304 required_set_names = ("system", "world")
12308 for s in required_set_names:
12309 required_sets[s] = InternalPackageSet(
12310 initial_atoms=getSetAtoms(s))
12313 # When removing packages, use a temporary version of world
12314 # which excludes packages that are intended to be eligible for
12316 world_temp_set = required_sets["world"]
12317 system_set = required_sets["system"]
12319 if not system_set or not world_temp_set:
12322 writemsg_level("!!! You have no system list.\n",
12323 level=logging.ERROR, noiselevel=-1)
12325 if not world_temp_set:
12326 writemsg_level("!!! You have no world file.\n",
12327 level=logging.WARNING, noiselevel=-1)
12329 writemsg_level("!!! Proceeding is likely to " + \
12330 "break your installation.\n",
12331 level=logging.WARNING, noiselevel=-1)
12332 if "--pretend" not in myopts:
12333 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12335 if action == "depclean":
12336 emergelog(xterm_titles, " >>> depclean")
12339 args_set = InternalPackageSet()
12342 if not is_valid_package_atom(x):
12343 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12344 level=logging.ERROR, noiselevel=-1)
12345 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12348 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12349 except portage.exception.AmbiguousPackageName, e:
12350 msg = "The short ebuild name \"" + x + \
12351 "\" is ambiguous. Please specify " + \
12352 "one of the following " + \
12353 "fully-qualified ebuild names instead:"
12354 for line in textwrap.wrap(msg, 70):
12355 writemsg_level("!!! %s\n" % (line,),
12356 level=logging.ERROR, noiselevel=-1)
12358 writemsg_level(" %s\n" % colorize("INFORM", i),
12359 level=logging.ERROR, noiselevel=-1)
12360 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12363 matched_packages = False
12366 matched_packages = True
12368 if not matched_packages:
12369 writemsg_level(">>> No packages selected for removal by %s\n" % \
12373 writemsg_level("\nCalculating dependencies ")
12374 resolver_params = create_depgraph_params(myopts, "remove")
12375 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12376 vardb = resolver.trees[myroot]["vartree"].dbapi
12378 if action == "depclean":
12381 # Pull in everything that's installed but not matched
12382 # by an argument atom since we don't want to clean any
12383 # package if something depends on it.
12385 world_temp_set.clear()
12390 if args_set.findAtomForPackage(pkg) is None:
12391 world_temp_set.add("=" + pkg.cpv)
12393 except portage.exception.InvalidDependString, e:
12394 show_invalid_depstring_notice(pkg,
12395 pkg.metadata["PROVIDE"], str(e))
12397 world_temp_set.add("=" + pkg.cpv)
12400 elif action == "prune":
12402 # Pull in everything that's installed since we don't
12403 # to prune a package if something depends on it.
12404 world_temp_set.clear()
12405 world_temp_set.update(vardb.cp_all())
12409 # Try to prune everything that's slotted.
12410 for cp in vardb.cp_all():
12411 if len(vardb.cp_list(cp)) > 1:
12414 # Remove atoms from world that match installed packages
12415 # that are also matched by argument atoms, but do not remove
12416 # them if they match the highest installed version.
12419 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12420 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12421 raise AssertionError("package expected in matches: " + \
12422 "cp = %s, cpv = %s matches = %s" % \
12423 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12425 highest_version = pkgs_for_cp[-1]
12426 if pkg == highest_version:
12427 # pkg is the highest version
12428 world_temp_set.add("=" + pkg.cpv)
12431 if len(pkgs_for_cp) <= 1:
12432 raise AssertionError("more packages expected: " + \
12433 "cp = %s, cpv = %s matches = %s" % \
12434 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12437 if args_set.findAtomForPackage(pkg) is None:
12438 world_temp_set.add("=" + pkg.cpv)
12440 except portage.exception.InvalidDependString, e:
12441 show_invalid_depstring_notice(pkg,
12442 pkg.metadata["PROVIDE"], str(e))
12444 world_temp_set.add("=" + pkg.cpv)
12448 for s, package_set in required_sets.iteritems():
12449 set_atom = SETPREFIX + s
12450 set_arg = SetArg(arg=set_atom, set=package_set,
12451 root_config=resolver.roots[myroot])
12452 set_args[s] = set_arg
12453 for atom in set_arg.set:
12454 resolver._dep_stack.append(
12455 Dependency(atom=atom, root=myroot, parent=set_arg))
12456 resolver.digraph.add(set_arg, None)
12458 success = resolver._complete_graph()
12459 writemsg_level("\b\b... done!\n")
12461 resolver.display_problems()
12466 def unresolved_deps():
12468 unresolvable = set()
12469 for dep in resolver._initially_unsatisfied_deps:
12470 if isinstance(dep.parent, Package) and \
12471 (dep.priority > UnmergeDepPriority.SOFT):
12472 unresolvable.add((dep.atom, dep.parent.cpv))
12474 if not unresolvable:
12477 if unresolvable and not allow_missing_deps:
12478 prefix = bad(" * ")
12480 msg.append("Dependencies could not be completely resolved due to")
12481 msg.append("the following required packages not being installed:")
12483 for atom, parent in unresolvable:
12484 msg.append(" %s pulled in by:" % (atom,))
12485 msg.append(" %s" % (parent,))
12487 msg.append("Have you forgotten to run " + \
12488 good("`emerge --update --newuse --deep world`") + " prior to")
12489 msg.append(("%s? It may be necessary to manually " + \
12490 "uninstall packages that no longer") % action)
12491 msg.append("exist in the portage tree since " + \
12492 "it may not be possible to satisfy their")
12493 msg.append("dependencies. Also, be aware of " + \
12494 "the --with-bdeps option that is documented")
12495 msg.append("in " + good("`man emerge`") + ".")
12496 if action == "prune":
12498 msg.append("If you would like to ignore " + \
12499 "dependencies then use %s." % good("--nodeps"))
12500 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12501 level=logging.ERROR, noiselevel=-1)
12505 if unresolved_deps():
12508 graph = resolver.digraph.copy()
12509 required_pkgs_total = 0
12511 if isinstance(node, Package):
12512 required_pkgs_total += 1
12514 def show_parents(child_node):
12515 parent_nodes = graph.parent_nodes(child_node)
12516 if not parent_nodes:
12517 # With --prune, the highest version can be pulled in without any
12518 # real parent since all installed packages are pulled in. In that
12519 # case there's nothing to show here.
12522 for node in parent_nodes:
12523 parent_strs.append(str(getattr(node, "cpv", node)))
12526 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
12527 for parent_str in parent_strs:
12528 msg.append(" %s\n" % (parent_str,))
12530 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12532 def create_cleanlist():
12533 pkgs_to_remove = []
12535 if action == "depclean":
12541 arg_atom = args_set.findAtomForPackage(pkg)
12542 except portage.exception.InvalidDependString:
12543 # this error has already been displayed by now
12547 if pkg not in graph:
12548 pkgs_to_remove.append(pkg)
12549 elif "--verbose" in myopts:
12554 if pkg not in graph:
12555 pkgs_to_remove.append(pkg)
12556 elif "--verbose" in myopts:
12559 elif action == "prune":
12560 # Prune really uses all installed instead of world. It's not
12561 # a real reverse dependency so don't display it as such.
12562 graph.remove(set_args["world"])
12564 for atom in args_set:
12565 for pkg in vardb.match_pkgs(atom):
12566 if pkg not in graph:
12567 pkgs_to_remove.append(pkg)
12568 elif "--verbose" in myopts:
12571 if not pkgs_to_remove:
12573 ">>> No packages selected for removal by %s\n" % action)
12574 if "--verbose" not in myopts:
12576 ">>> To see reverse dependencies, use %s\n" % \
12578 if action == "prune":
12580 ">>> To ignore dependencies, use %s\n" % \
12583 return pkgs_to_remove
12585 cleanlist = create_cleanlist()
12588 clean_set = set(cleanlist)
12590 # Use a topological sort to create an unmerge order such that
12591 # each package is unmerged before it's dependencies. This is
12592 # necessary to avoid breaking things that may need to run
12593 # during pkg_prerm or pkg_postrm phases.
12595 # Create a new graph to account for dependencies between the
12596 # packages being unmerged.
12600 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12601 runtime = UnmergeDepPriority(runtime=True)
12602 runtime_post = UnmergeDepPriority(runtime_post=True)
12603 buildtime = UnmergeDepPriority(buildtime=True)
12605 "RDEPEND": runtime,
12606 "PDEPEND": runtime_post,
12607 "DEPEND": buildtime,
12610 for node in clean_set:
12611 graph.add(node, None)
12613 node_use = node.metadata["USE"].split()
12614 for dep_type in dep_keys:
12615 depstr = node.metadata[dep_type]
12619 portage.dep._dep_check_strict = False
12620 success, atoms = portage.dep_check(depstr, None, settings,
12621 myuse=node_use, trees=resolver._graph_trees,
12624 portage.dep._dep_check_strict = True
12626 # Ignore invalid deps of packages that will
12627 # be uninstalled anyway.
12630 priority = priority_map[dep_type]
12632 if not isinstance(atom, portage.dep.Atom):
12633 # Ignore invalid atoms returned from dep_check().
12637 matches = vardb.match_pkgs(atom)
12640 for child_node in matches:
12641 if child_node in clean_set:
12642 graph.add(child_node, node, priority=priority)
12645 if len(graph.order) == len(graph.root_nodes()):
12646 # If there are no dependencies between packages
12647 # let unmerge() group them by cat/pn.
12649 cleanlist = [pkg.cpv for pkg in graph.order]
12651 # Order nodes from lowest to highest overall reference count for
12652 # optimal root node selection.
12653 node_refcounts = {}
12654 for node in graph.order:
12655 node_refcounts[node] = len(graph.parent_nodes(node))
12656 def cmp_reference_count(node1, node2):
12657 return node_refcounts[node1] - node_refcounts[node2]
12658 graph.order.sort(cmp_reference_count)
12660 ignore_priority_range = [None]
12661 ignore_priority_range.extend(
12662 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
12663 while not graph.empty():
12664 for ignore_priority in ignore_priority_range:
12665 nodes = graph.root_nodes(ignore_priority=ignore_priority)
12669 raise AssertionError("no root nodes")
12670 if ignore_priority is not None:
12671 # Some deps have been dropped due to circular dependencies,
12672 # so only pop one node in order do minimize the number that
12677 cleanlist.append(node.cpv)
12679 unmerge(root_config, myopts, "unmerge", cleanlist,
12680 ldpath_mtimes, ordered=ordered)
12682 if action == "prune":
12685 if not cleanlist and "--quiet" in myopts:
12688 print "Packages installed: "+str(len(vardb.cpv_all()))
12689 print "Packages in world: " + \
12690 str(len(root_config.sets["world"].getAtoms()))
12691 print "Packages in system: " + \
12692 str(len(root_config.sets["system"].getAtoms()))
12693 print "Required packages: "+str(required_pkgs_total)
12694 if "--pretend" in myopts:
12695 print "Number to remove: "+str(len(cleanlist))
12697 print "Number removed: "+str(len(cleanlist))
12699 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
12700 skip_masked=False, skip_unsatisfied=False):
12702 Construct a depgraph for the given resume list. This will raise
12703 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
12705 @returns: (success, depgraph, dropped_tasks)
12707 mergelist = mtimedb["resume"]["mergelist"]
12708 dropped_tasks = set()
12710 mydepgraph = depgraph(settings, trees,
12711 myopts, myparams, spinner)
12713 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
12714 skip_masked=skip_masked)
12715 except depgraph.UnsatisfiedResumeDep, e:
12716 if not skip_unsatisfied:
12719 graph = mydepgraph.digraph
12720 unsatisfied_parents = dict((dep.parent, dep.parent) \
12721 for dep in e.value)
12722 traversed_nodes = set()
12723 unsatisfied_stack = list(unsatisfied_parents)
12724 while unsatisfied_stack:
12725 pkg = unsatisfied_stack.pop()
12726 if pkg in traversed_nodes:
12728 traversed_nodes.add(pkg)
12730 # If this package was pulled in by a parent
12731 # package scheduled for merge, removing this
12732 # package may cause the the parent package's
12733 # dependency to become unsatisfied.
12734 for parent_node in graph.parent_nodes(pkg):
12735 if not isinstance(parent_node, Package) \
12736 or parent_node.operation not in ("merge", "nomerge"):
12739 graph.child_nodes(parent_node,
12740 ignore_priority=DepPriority.SOFT)
12741 if pkg in unsatisfied:
12742 unsatisfied_parents[parent_node] = parent_node
12743 unsatisfied_stack.append(parent_node)
12745 pruned_mergelist = [x for x in mergelist \
12746 if isinstance(x, list) and \
12747 tuple(x) not in unsatisfied_parents]
12749 # If the mergelist doesn't shrink then this loop is infinite.
12750 if len(pruned_mergelist) == len(mergelist):
12751 # This happens if a package can't be dropped because
12752 # it's already installed, but it has unsatisfied PDEPEND.
12754 mergelist[:] = pruned_mergelist
12756 # Exclude installed packages that have been removed from the graph due
12757 # to failure to build/install runtime dependencies after the dependent
12758 # package has already been installed.
12759 dropped_tasks.update(pkg for pkg in \
12760 unsatisfied_parents if pkg.operation != "nomerge")
12761 mydepgraph.break_refs(unsatisfied_parents)
12763 del e, graph, traversed_nodes, \
12764 unsatisfied_parents, unsatisfied_stack
12768 return (success, mydepgraph, dropped_tasks)
12770 def action_build(settings, trees, mtimedb,
12771 myopts, myaction, myfiles, spinner):
12773 # validate the state of the resume data
12774 # so that we can make assumptions later.
12775 for k in ("resume", "resume_backup"):
12776 if k not in mtimedb:
12778 resume_data = mtimedb[k]
12779 if not isinstance(resume_data, dict):
12782 mergelist = resume_data.get("mergelist")
12783 if not isinstance(mergelist, list):
12786 for x in mergelist:
12787 if not (isinstance(x, list) and len(x) == 4):
12789 pkg_type, pkg_root, pkg_key, pkg_action = x
12790 if pkg_root not in trees:
12791 # Current $ROOT setting differs,
12792 # so the list must be stale.
12798 resume_opts = resume_data.get("myopts")
12799 if not isinstance(resume_opts, (dict, list)):
12802 favorites = resume_data.get("favorites")
12803 if not isinstance(favorites, list):
12808 if "--resume" in myopts and \
12809 ("resume" in mtimedb or
12810 "resume_backup" in mtimedb):
12812 if "resume" not in mtimedb:
12813 mtimedb["resume"] = mtimedb["resume_backup"]
12814 del mtimedb["resume_backup"]
12816 # "myopts" is a list for backward compatibility.
12817 resume_opts = mtimedb["resume"].get("myopts", [])
12818 if isinstance(resume_opts, list):
12819 resume_opts = dict((k,True) for k in resume_opts)
12820 for opt in ("--skipfirst", "--ask", "--tree"):
12821 resume_opts.pop(opt, None)
12822 myopts.update(resume_opts)
12824 if "--debug" in myopts:
12825 writemsg_level("myopts %s\n" % (myopts,))
12827 # Adjust config according to options of the command being resumed.
12828 for myroot in trees:
12829 mysettings = trees[myroot]["vartree"].settings
12830 mysettings.unlock()
12831 adjust_config(myopts, mysettings)
12833 del myroot, mysettings
12835 ldpath_mtimes = mtimedb["ldpath"]
12838 buildpkgonly = "--buildpkgonly" in myopts
12839 pretend = "--pretend" in myopts
12840 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
12841 ask = "--ask" in myopts
12842 nodeps = "--nodeps" in myopts
12843 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
12844 tree = "--tree" in myopts
12845 if nodeps and tree:
12847 del myopts["--tree"]
12848 portage.writemsg(colorize("WARN", " * ") + \
12849 "--tree is broken with --nodeps. Disabling...\n")
12850 debug = "--debug" in myopts
12851 verbose = "--verbose" in myopts
12852 quiet = "--quiet" in myopts
12853 if pretend or fetchonly:
12854 # make the mtimedb readonly
12855 mtimedb.filename = None
12856 if "--digest" in myopts:
12857 msg = "The --digest option can prevent corruption from being" + \
12858 " noticed. The `repoman manifest` command is the preferred" + \
12859 " way to generate manifests and it is capable of doing an" + \
12860 " entire repository or category at once."
12861 prefix = bad(" * ")
12862 writemsg(prefix + "\n")
12863 from textwrap import wrap
12864 for line in wrap(msg, 72):
12865 writemsg("%s%s\n" % (prefix, line))
12866 writemsg(prefix + "\n")
12868 if "--quiet" not in myopts and \
12869 ("--pretend" in myopts or "--ask" in myopts or \
12870 "--tree" in myopts or "--verbose" in myopts):
12872 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12874 elif "--buildpkgonly" in myopts:
12878 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
12880 print darkgreen("These are the packages that would be %s, in reverse order:") % action
12884 print darkgreen("These are the packages that would be %s, in order:") % action
12887 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
12888 if not show_spinner:
12889 spinner.update = spinner.update_quiet
12892 favorites = mtimedb["resume"].get("favorites")
12893 if not isinstance(favorites, list):
12897 print "Calculating dependencies ",
12898 myparams = create_depgraph_params(myopts, myaction)
12900 resume_data = mtimedb["resume"]
12901 mergelist = resume_data["mergelist"]
12902 if mergelist and "--skipfirst" in myopts:
12903 for i, task in enumerate(mergelist):
12904 if isinstance(task, list) and \
12905 task and task[-1] == "merge":
12909 skip_masked = "--skipfirst" in myopts
12910 skip_unsatisfied = "--skipfirst" in myopts
12914 success, mydepgraph, dropped_tasks = resume_depgraph(
12915 settings, trees, mtimedb, myopts, myparams, spinner,
12916 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
12917 except (portage.exception.PackageNotFound,
12918 depgraph.UnsatisfiedResumeDep), e:
12919 if isinstance(e, depgraph.UnsatisfiedResumeDep):
12920 mydepgraph = e.depgraph
12923 from textwrap import wrap
12924 from portage.output import EOutput
12927 resume_data = mtimedb["resume"]
12928 mergelist = resume_data.get("mergelist")
12929 if not isinstance(mergelist, list):
12931 if mergelist and debug or (verbose and not quiet):
12932 out.eerror("Invalid resume list:")
12935 for task in mergelist:
12936 if isinstance(task, list):
12937 out.eerror(indent + str(tuple(task)))
12940 if isinstance(e, depgraph.UnsatisfiedResumeDep):
12941 out.eerror("One or more packages are either masked or " + \
12942 "have missing dependencies:")
12945 for dep in e.value:
12946 if dep.atom is None:
12947 out.eerror(indent + "Masked package:")
12948 out.eerror(2 * indent + str(dep.parent))
12951 out.eerror(indent + str(dep.atom) + " pulled in by:")
12952 out.eerror(2 * indent + str(dep.parent))
12954 msg = "The resume list contains packages " + \
12955 "that are either masked or have " + \
12956 "unsatisfied dependencies. " + \
12957 "Please restart/continue " + \
12958 "the operation manually, or use --skipfirst " + \
12959 "to skip the first package in the list and " + \
12960 "any other packages that may be " + \
12961 "masked or have missing dependencies."
12962 for line in wrap(msg, 72):
12964 elif isinstance(e, portage.exception.PackageNotFound):
12965 out.eerror("An expected package is " + \
12966 "not available: %s" % str(e))
12968 msg = "The resume list contains one or more " + \
12969 "packages that are no longer " + \
12970 "available. Please restart/continue " + \
12971 "the operation manually."
12972 for line in wrap(msg, 72):
12976 print "\b\b... done!"
12980 portage.writemsg("!!! One or more packages have been " + \
12981 "dropped due to\n" + \
12982 "!!! masking or unsatisfied dependencies:\n\n",
12984 for task in dropped_tasks:
12985 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
12986 portage.writemsg("\n", noiselevel=-1)
12989 if mydepgraph is not None:
12990 mydepgraph.display_problems()
12991 if not (ask or pretend):
12992 # delete the current list and also the backup
12993 # since it's probably stale too.
12994 for k in ("resume", "resume_backup"):
12995 mtimedb.pop(k, None)
13000 if ("--resume" in myopts):
13001 print darkgreen("emerge: It seems we have nothing to resume...")
13004 myparams = create_depgraph_params(myopts, myaction)
13005 if "--quiet" not in myopts and "--nodeps" not in myopts:
13006 print "Calculating dependencies ",
13008 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13010 retval, favorites = mydepgraph.select_files(myfiles)
13011 except portage.exception.PackageNotFound, e:
13012 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13014 except portage.exception.PackageSetNotFound, e:
13015 root_config = trees[settings["ROOT"]]["root_config"]
13016 display_missing_pkg_set(root_config, e.value)
13019 print "\b\b... done!"
13021 mydepgraph.display_problems()
13024 if "--pretend" not in myopts and \
13025 ("--ask" in myopts or "--tree" in myopts or \
13026 "--verbose" in myopts) and \
13027 not ("--quiet" in myopts and "--ask" not in myopts):
13028 if "--resume" in myopts:
13029 mymergelist = mydepgraph.altlist()
13030 if len(mymergelist) == 0:
13031 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13033 favorites = mtimedb["resume"]["favorites"]
13034 retval = mydepgraph.display(
13035 mydepgraph.altlist(reversed=tree),
13036 favorites=favorites)
13037 mydepgraph.display_problems()
13038 if retval != os.EX_OK:
13040 prompt="Would you like to resume merging these packages?"
13042 retval = mydepgraph.display(
13043 mydepgraph.altlist(reversed=("--tree" in myopts)),
13044 favorites=favorites)
13045 mydepgraph.display_problems()
13046 if retval != os.EX_OK:
13049 for x in mydepgraph.altlist():
13050 if isinstance(x, Package) and x.operation == "merge":
13054 sets = trees[settings["ROOT"]]["root_config"].sets
13055 world_candidates = None
13056 if "--noreplace" in myopts and \
13057 not oneshot and favorites:
13058 # Sets that are not world candidates are filtered
13059 # out here since the favorites list needs to be
13060 # complete for depgraph.loadResumeCommand() to
13061 # operate correctly.
13062 world_candidates = [x for x in favorites \
13063 if not (x.startswith(SETPREFIX) and \
13064 not sets[x[1:]].world_candidate)]
13065 if "--noreplace" in myopts and \
13066 not oneshot and world_candidates:
13068 for x in world_candidates:
13069 print " %s %s" % (good("*"), x)
13070 prompt="Would you like to add these packages to your world favorites?"
13071 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13072 prompt="Nothing to merge; would you like to auto-clean packages?"
13075 print "Nothing to merge; quitting."
13078 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13079 prompt="Would you like to fetch the source files for these packages?"
13081 prompt="Would you like to merge these packages?"
13083 if "--ask" in myopts and userquery(prompt) == "No":
13088 # Don't ask again (e.g. when auto-cleaning packages after merge)
13089 myopts.pop("--ask", None)
13091 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13092 if ("--resume" in myopts):
13093 mymergelist = mydepgraph.altlist()
13094 if len(mymergelist) == 0:
13095 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13097 favorites = mtimedb["resume"]["favorites"]
13098 retval = mydepgraph.display(
13099 mydepgraph.altlist(reversed=tree),
13100 favorites=favorites)
13101 mydepgraph.display_problems()
13102 if retval != os.EX_OK:
13105 retval = mydepgraph.display(
13106 mydepgraph.altlist(reversed=("--tree" in myopts)),
13107 favorites=favorites)
13108 mydepgraph.display_problems()
13109 if retval != os.EX_OK:
13111 if "--buildpkgonly" in myopts:
13112 graph_copy = mydepgraph.digraph.clone()
13113 for node in list(graph_copy.order):
13114 if not isinstance(node, Package):
13115 graph_copy.remove(node)
13116 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13117 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13118 print "!!! You have to merge the dependencies before you can build this package.\n"
13121 if "--buildpkgonly" in myopts:
13122 graph_copy = mydepgraph.digraph.clone()
13123 for node in list(graph_copy.order):
13124 if not isinstance(node, Package):
13125 graph_copy.remove(node)
13126 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13127 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13128 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13131 if ("--resume" in myopts):
13132 favorites=mtimedb["resume"]["favorites"]
13133 mymergelist = mydepgraph.altlist()
13134 mydepgraph.break_refs(mymergelist)
13135 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13136 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13137 del mydepgraph, mymergelist
13138 clear_caches(trees)
13140 retval = mergetask.merge()
13141 merge_count = mergetask.curval
13143 if "resume" in mtimedb and \
13144 "mergelist" in mtimedb["resume"] and \
13145 len(mtimedb["resume"]["mergelist"]) > 1:
13146 mtimedb["resume_backup"] = mtimedb["resume"]
13147 del mtimedb["resume"]
13149 mtimedb["resume"]={}
13150 # Stored as a dict starting with portage-2.1.6_rc1, and supported
13151 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13152 # a list type for options.
13153 mtimedb["resume"]["myopts"] = myopts.copy()
13155 # Convert Atom instances to plain str since the mtimedb loader
13156 # sets unpickler.find_global = None which causes unpickler.load()
13157 # to raise the following exception:
13159 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13161 # TODO: Maybe stop setting find_global = None, or find some other
13162 # way to avoid accidental triggering of the above UnpicklingError.
13163 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13165 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13166 for pkgline in mydepgraph.altlist():
13167 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13168 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13169 tmpsettings = portage.config(clone=settings)
13171 if settings.get("PORTAGE_DEBUG", "") == "1":
13173 retval = portage.doebuild(
13174 y, "digest", settings["ROOT"], tmpsettings, edebug,
13175 ("--pretend" in myopts),
13176 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13179 pkglist = mydepgraph.altlist()
13180 mydepgraph.saveNomergeFavorites()
13181 mydepgraph.break_refs(pkglist)
13182 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13183 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13184 del mydepgraph, pkglist
13185 clear_caches(trees)
13187 retval = mergetask.merge()
13188 merge_count = mergetask.curval
13190 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13191 if "yes" == settings.get("AUTOCLEAN"):
13192 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13193 unmerge(trees[settings["ROOT"]]["root_config"],
13194 myopts, "clean", [],
13195 ldpath_mtimes, autoclean=1)
13197 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13198 + " AUTOCLEAN is disabled. This can cause serious"
13199 + " problems due to overlapping packages.\n")
13203 def multiple_actions(action1, action2):
13204 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13205 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13208 def insert_optional_args(args):
13210 Parse optional arguments and insert a value if one has
13211 not been provided. This is done before feeding the args
13212 to the optparse parser since that parser does not support
13213 this feature natively.
13217 jobs_opts = ("-j", "--jobs")
13218 arg_stack = args[:]
13219 arg_stack.reverse()
13221 arg = arg_stack.pop()
13223 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13224 if not (short_job_opt or arg in jobs_opts):
13225 new_args.append(arg)
13228 # Insert an empty placeholder in order to
13229 # satisfy the requirements of optparse.
13231 new_args.append("--jobs")
13234 if short_job_opt and len(arg) > 2:
13235 if arg[:2] == "-j":
13237 job_count = int(arg[2:])
13239 saved_opts = arg[2:]
13242 saved_opts = arg[1:].replace("j", "")
13244 if job_count is None and arg_stack:
13246 job_count = int(arg_stack[-1])
13250 # Discard the job count from the stack
13251 # since we're consuming it here.
13254 if job_count is None:
13255 # unlimited number of jobs
13256 new_args.append("True")
13258 new_args.append(str(job_count))
13260 if saved_opts is not None:
13261 new_args.append("-" + saved_opts)
13265 def parse_opts(tmpcmdline, silent=False):
13270 global actions, options, shortmapping
13272 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13273 argument_options = {
13275 "help":"specify the location for portage configuration files",
13279 "help":"enable or disable color output",
13281 "choices":("y", "n")
13286 "help" : "Specifies the number of packages to build " + \
13292 "--load-average": {
13294 "help" :"Specifies that no new builds should be started " + \
13295 "if there are other builds running and the load average " + \
13296 "is at least LOAD (a floating-point number).",
13302 "help":"include unnecessary build time dependencies",
13304 "choices":("y", "n")
13307 "help":"specify conditions to trigger package reinstallation",
13309 "choices":["changed-use"]
13313 from optparse import OptionParser
13314 parser = OptionParser()
13315 if parser.has_option("--help"):
13316 parser.remove_option("--help")
13318 for action_opt in actions:
13319 parser.add_option("--" + action_opt, action="store_true",
13320 dest=action_opt.replace("-", "_"), default=False)
13321 for myopt in options:
13322 parser.add_option(myopt, action="store_true",
13323 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13324 for shortopt, longopt in shortmapping.iteritems():
13325 parser.add_option("-" + shortopt, action="store_true",
13326 dest=longopt.lstrip("--").replace("-", "_"), default=False)
13327 for myalias, myopt in longopt_aliases.iteritems():
13328 parser.add_option(myalias, action="store_true",
13329 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13331 for myopt, kwargs in argument_options.iteritems():
13332 parser.add_option(myopt,
13333 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13335 tmpcmdline = insert_optional_args(tmpcmdline)
13337 myoptions, myargs = parser.parse_args(args=tmpcmdline)
13341 if myoptions.jobs == "True":
13345 jobs = int(myoptions.jobs)
13349 if jobs is not True and \
13353 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13354 (myoptions.jobs,), noiselevel=-1)
13356 myoptions.jobs = jobs
13358 if myoptions.load_average:
13360 load_average = float(myoptions.load_average)
13364 if load_average <= 0.0:
13365 load_average = None
13367 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13368 (myoptions.load_average,), noiselevel=-1)
13370 myoptions.load_average = load_average
13372 for myopt in options:
13373 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13375 myopts[myopt] = True
13377 for myopt in argument_options:
13378 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13382 for action_opt in actions:
13383 v = getattr(myoptions, action_opt.replace("-", "_"))
13386 multiple_actions(myaction, action_opt)
13388 myaction = action_opt
13392 return myaction, myopts, myfiles
13394 def validate_ebuild_environment(trees):
13395 for myroot in trees:
13396 settings = trees[myroot]["vartree"].settings
13397 settings.validate()
13399 def clear_caches(trees):
13400 for d in trees.itervalues():
13401 d["porttree"].dbapi.melt()
13402 d["porttree"].dbapi._aux_cache.clear()
13403 d["bintree"].dbapi._aux_cache.clear()
13404 d["bintree"].dbapi._clear_cache()
13405 portage.dircache.clear()
13408 def load_emerge_config(trees=None):
13410 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13411 v = os.environ.get(envvar, None)
13412 if v and v.strip():
13414 trees = portage.create_trees(trees=trees, **kwargs)
13416 for root, root_trees in trees.iteritems():
13417 settings = root_trees["vartree"].settings
13418 setconfig = load_default_config(settings, root_trees)
13419 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13421 settings = trees["/"]["vartree"].settings
13423 for myroot in trees:
13425 settings = trees[myroot]["vartree"].settings
13428 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13429 mtimedb = portage.MtimeDB(mtimedbfile)
13431 return settings, trees, mtimedb
13433 def adjust_config(myopts, settings):
13434 """Make emerge specific adjustments to the config."""
13436 # To enhance usability, make some vars case insensitive by forcing them to
13438 for myvar in ("AUTOCLEAN", "NOCOLOR"):
13439 if myvar in settings:
13440 settings[myvar] = settings[myvar].lower()
13441 settings.backup_changes(myvar)
13444 # Kill noauto as it will break merges otherwise.
13445 if "noauto" in settings.features:
13446 while "noauto" in settings.features:
13447 settings.features.remove("noauto")
13448 settings["FEATURES"] = " ".join(settings.features)
13449 settings.backup_changes("FEATURES")
13453 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13454 except ValueError, e:
13455 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13456 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13457 settings["CLEAN_DELAY"], noiselevel=-1)
13458 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13459 settings.backup_changes("CLEAN_DELAY")
13461 EMERGE_WARNING_DELAY = 10
13463 EMERGE_WARNING_DELAY = int(settings.get(
13464 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13465 except ValueError, e:
13466 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13467 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13468 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13469 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13470 settings.backup_changes("EMERGE_WARNING_DELAY")
13472 if "--quiet" in myopts:
13473 settings["PORTAGE_QUIET"]="1"
13474 settings.backup_changes("PORTAGE_QUIET")
13476 if "--verbose" in myopts:
13477 settings["PORTAGE_VERBOSE"] = "1"
13478 settings.backup_changes("PORTAGE_VERBOSE")
13480 # Set so that configs will be merged regardless of remembered status
13481 if ("--noconfmem" in myopts):
13482 settings["NOCONFMEM"]="1"
13483 settings.backup_changes("NOCONFMEM")
13485 # Set various debug markers... They should be merged somehow.
13488 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13489 if PORTAGE_DEBUG not in (0, 1):
13490 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13491 PORTAGE_DEBUG, noiselevel=-1)
13492 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13495 except ValueError, e:
13496 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13497 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13498 settings["PORTAGE_DEBUG"], noiselevel=-1)
13500 if "--debug" in myopts:
13502 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13503 settings.backup_changes("PORTAGE_DEBUG")
13505 if settings.get("NOCOLOR") not in ("yes","true"):
13506 portage.output.havecolor = 1
13508 """The explicit --color < y | n > option overrides the NOCOLOR environment
13509 variable and stdout auto-detection."""
13510 if "--color" in myopts:
13511 if "y" == myopts["--color"]:
13512 portage.output.havecolor = 1
13513 settings["NOCOLOR"] = "false"
13515 portage.output.havecolor = 0
13516 settings["NOCOLOR"] = "true"
13517 settings.backup_changes("NOCOLOR")
13518 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13519 portage.output.havecolor = 0
13520 settings["NOCOLOR"] = "true"
13521 settings.backup_changes("NOCOLOR")
13523 def apply_priorities(settings):
13527 def nice(settings):
13529 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13530 except (OSError, ValueError), e:
13531 out = portage.output.EOutput()
13532 out.eerror("Failed to change nice value to '%s'" % \
13533 settings["PORTAGE_NICENESS"])
13534 out.eerror("%s\n" % str(e))
13536 def ionice(settings):
13538 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13540 ionice_cmd = shlex.split(ionice_cmd)
13544 from portage.util import varexpand
13545 variables = {"PID" : str(os.getpid())}
13546 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13549 rval = portage.process.spawn(cmd, env=os.environ)
13550 except portage.exception.CommandNotFound:
13551 # The OS kernel probably doesn't support ionice,
13552 # so return silently.
13555 if rval != os.EX_OK:
13556 out = portage.output.EOutput()
13557 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13558 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13560 def display_missing_pkg_set(root_config, set_name):
13563 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13564 "The following sets exist:") % \
13565 colorize("INFORM", set_name))
13568 for s in sorted(root_config.sets):
13569 msg.append(" %s" % s)
13572 writemsg_level("".join("%s\n" % l for l in msg),
13573 level=logging.ERROR, noiselevel=-1)
13575 def expand_set_arguments(myfiles, myaction, root_config):
13577 if myaction != "search":
13583 if x[:1] == SETPREFIX:
13585 msg.append("'%s' is not a valid package atom." % (x,))
13586 msg.append("Please check ebuild(5) for full details.")
13587 writemsg_level("".join("!!! %s\n" % line for line in msg),
13588 level=logging.ERROR, noiselevel=-1)
13589 return (myfiles, 1)
13590 elif x == "system":
13595 if myaction is not None:
13597 multiple_actions("system", myaction)
13598 return (myfiles, 1)
13600 multiple_actions("world", myaction)
13601 return (myfiles, 1)
13603 if system and world:
13604 multiple_actions("system", "world")
13605 return (myfiles, 1)
13607 return (myfiles, os.EX_OK)
13609 def repo_name_check(trees):
13610 missing_repo_names = set()
13611 for root, root_trees in trees.iteritems():
13612 if "porttree" in root_trees:
13613 portdb = root_trees["porttree"].dbapi
13614 missing_repo_names.update(portdb.porttrees)
13615 repos = portdb.getRepositories()
13617 missing_repo_names.discard(portdb.getRepositoryPath(r))
13619 if missing_repo_names:
13621 msg.append("WARNING: One or more repositories " + \
13622 "have missing repo_name entries:")
13624 for p in missing_repo_names:
13625 msg.append("\t%s/profiles/repo_name" % (p,))
13627 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
13628 "should be a plain text file containing a unique " + \
13629 "name for the repository on the first line.", 70))
13630 writemsg_level("".join("%s\n" % l for l in msg),
13631 level=logging.WARNING, noiselevel=-1)
13633 return bool(missing_repo_names)
13635 def config_protect_check(trees):
13636 for root, root_trees in trees.iteritems():
13637 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
13638 msg = "!!! CONFIG_PROTECT is empty"
13640 msg += " for '%s'" % root
13641 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
13643 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
13645 if "--quiet" in myopts:
13646 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13647 print "!!! one of the following fully-qualified ebuild names instead:\n"
13648 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13649 print " " + colorize("INFORM", cp)
13652 s = search(root_config, spinner, "--searchdesc" in myopts,
13653 "--quiet" not in myopts, "--usepkg" in myopts,
13654 "--usepkgonly" in myopts)
13655 null_cp = portage.dep_getkey(insert_category_into_atom(
13657 cat, atom_pn = portage.catsplit(null_cp)
13658 s.searchkey = atom_pn
13659 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13662 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13663 print "!!! one of the above fully-qualified ebuild names instead.\n"
13665 def profile_check(trees, myaction, myopts):
13666 if myaction in ("info", "sync"):
13668 elif "--version" in myopts or "--help" in myopts:
13670 for root, root_trees in trees.iteritems():
13671 if root_trees["root_config"].settings.profiles:
13673 # generate some profile related warning messages
13674 validate_ebuild_environment(trees)
13675 msg = "If you have just changed your profile configuration, you " + \
13676 "should revert back to the previous configuration. Due to " + \
13677 "your current profile being invalid, allowed actions are " + \
13678 "limited to --help, --info, --sync, and --version."
13679 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
13680 level=logging.ERROR, noiselevel=-1)
13685 global portage # NFC why this is necessary now - genone
13686 portage._disable_legacy_globals()
13687 # Disable color until we're sure that it should be enabled (after
13688 # EMERGE_DEFAULT_OPTS has been parsed).
13689 portage.output.havecolor = 0
13690 # This first pass is just for options that need to be known as early as
13691 # possible, such as --config-root. They will be parsed again later,
13692 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
13693 # the value of --config-root).
13694 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
13695 if "--debug" in myopts:
13696 os.environ["PORTAGE_DEBUG"] = "1"
13697 if "--config-root" in myopts:
13698 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
13700 # Portage needs to ensure a sane umask for the files it creates.
13702 settings, trees, mtimedb = load_emerge_config()
13703 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13704 rval = profile_check(trees, myaction, myopts)
13705 if rval != os.EX_OK:
13708 if portage._global_updates(trees, mtimedb["updates"]):
13710 # Reload the whole config from scratch.
13711 settings, trees, mtimedb = load_emerge_config(trees=trees)
13712 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13714 xterm_titles = "notitles" not in settings.features
13717 if "--ignore-default-opts" not in myopts:
13718 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
13719 tmpcmdline.extend(sys.argv[1:])
13720 myaction, myopts, myfiles = parse_opts(tmpcmdline)
13722 if "--digest" in myopts:
13723 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
13724 # Reload the whole config from scratch so that the portdbapi internal
13725 # config is updated with new FEATURES.
13726 settings, trees, mtimedb = load_emerge_config(trees=trees)
13727 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13729 for myroot in trees:
13730 mysettings = trees[myroot]["vartree"].settings
13731 mysettings.unlock()
13732 adjust_config(myopts, mysettings)
13733 mysettings["PORTAGE_COUNTER_HASH"] = \
13734 trees[myroot]["vartree"].dbapi._counter_hash()
13735 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
13737 del myroot, mysettings
13739 apply_priorities(settings)
13741 spinner = stdout_spinner()
13742 if "candy" in settings.features:
13743 spinner.update = spinner.update_scroll
13745 if "--quiet" not in myopts:
13746 portage.deprecated_profile_check()
13747 #repo_name_check(trees)
13748 config_protect_check(trees)
13750 eclasses_overridden = {}
13751 for mytrees in trees.itervalues():
13752 mydb = mytrees["porttree"].dbapi
13753 # Freeze the portdbapi for performance (memoize all xmatch results).
13755 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
13758 if eclasses_overridden and \
13759 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
13760 prefix = bad(" * ")
13761 if len(eclasses_overridden) == 1:
13762 writemsg(prefix + "Overlay eclass overrides " + \
13763 "eclass from PORTDIR:\n", noiselevel=-1)
13765 writemsg(prefix + "Overlay eclasses override " + \
13766 "eclasses from PORTDIR:\n", noiselevel=-1)
13767 writemsg(prefix + "\n", noiselevel=-1)
13768 for eclass_name in sorted(eclasses_overridden):
13769 writemsg(prefix + " '%s/%s.eclass'\n" % \
13770 (eclasses_overridden[eclass_name], eclass_name),
13772 writemsg(prefix + "\n", noiselevel=-1)
13773 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
13774 "because it will trigger invalidation of cached ebuild metadata " + \
13775 "that is distributed with the portage tree. If you must " + \
13776 "override eclasses from PORTDIR then you are advised to add " + \
13777 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
13778 "`emerge --regen` after each time that you run `emerge --sync`. " + \
13779 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
13780 "you would like to disable this warning."
13781 from textwrap import wrap
13782 for line in wrap(msg, 72):
13783 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
13785 if "moo" in myfiles:
13788 Larry loves Gentoo (""" + platform.system() + """)
13790 _______________________
13791 < Have you mooed today? >
13792 -----------------------
13802 ext = os.path.splitext(x)[1]
13803 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
13804 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
13807 root_config = trees[settings["ROOT"]]["root_config"]
13809 # only expand sets for actions taking package arguments
13810 oldargs = myfiles[:]
13811 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
13812 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
13813 if retval != os.EX_OK:
13816 # Need to handle empty sets specially, otherwise emerge will react
13817 # with the help message for empty argument lists
13818 if oldargs and not myfiles:
13819 print "emerge: no targets left after set expansion"
13822 if ("--tree" in myopts) and ("--columns" in myopts):
13823 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
13826 if ("--quiet" in myopts):
13827 spinner.update = spinner.update_quiet
13828 portage.util.noiselimit = -1
13830 # Always create packages if FEATURES=buildpkg
13831 # Imply --buildpkg if --buildpkgonly
13832 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
13833 if "--buildpkg" not in myopts:
13834 myopts["--buildpkg"] = True
13836 # Also allow -S to invoke search action (-sS)
13837 if ("--searchdesc" in myopts):
13838 if myaction and myaction != "search":
13839 myfiles.append(myaction)
13840 if "--search" not in myopts:
13841 myopts["--search"] = True
13842 myaction = "search"
13844 # Always try and fetch binary packages if FEATURES=getbinpkg
13845 if ("getbinpkg" in settings.features):
13846 myopts["--getbinpkg"] = True
13848 if "--buildpkgonly" in myopts:
13849 # --buildpkgonly will not merge anything, so
13850 # it cancels all binary package options.
13851 for opt in ("--getbinpkg", "--getbinpkgonly",
13852 "--usepkg", "--usepkgonly"):
13853 myopts.pop(opt, None)
13855 if "--fetch-all-uri" in myopts:
13856 myopts["--fetchonly"] = True
13858 if "--skipfirst" in myopts and "--resume" not in myopts:
13859 myopts["--resume"] = True
13861 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
13862 myopts["--usepkgonly"] = True
13864 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
13865 myopts["--getbinpkg"] = True
13867 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
13868 myopts["--usepkg"] = True
13870 # Also allow -K to apply --usepkg/-k
13871 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
13872 myopts["--usepkg"] = True
13874 # Allow -p to remove --ask
13875 if ("--pretend" in myopts) and ("--ask" in myopts):
13876 print ">>> --pretend disables --ask... removing --ask from options."
13877 del myopts["--ask"]
13879 # forbid --ask when not in a terminal
13880 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
13881 if ("--ask" in myopts) and (not sys.stdin.isatty()):
13882 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
13886 if settings.get("PORTAGE_DEBUG", "") == "1":
13887 spinner.update = spinner.update_quiet
13889 if "python-trace" in settings.features:
13890 import portage.debug
13891 portage.debug.set_trace(True)
13893 if not ("--quiet" in myopts):
13894 if not sys.stdout.isatty() or ("--nospinner" in myopts):
13895 spinner.update = spinner.update_basic
13897 if "--version" in myopts:
13898 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13899 settings.profile_path, settings["CHOST"],
13900 trees[settings["ROOT"]]["vartree"].dbapi)
13902 elif "--help" in myopts:
13903 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13906 if "--debug" in myopts:
13907 print "myaction", myaction
13908 print "myopts", myopts
13910 if not myaction and not myfiles and "--resume" not in myopts:
13911 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13914 pretend = "--pretend" in myopts
13915 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13916 buildpkgonly = "--buildpkgonly" in myopts
13918 # check if root user is the current user for the actions where emerge needs this
13919 if portage.secpass < 2:
13920 # We've already allowed "--version" and "--help" above.
13921 if "--pretend" not in myopts and myaction not in ("search","info"):
13922 need_superuser = not \
13924 (buildpkgonly and secpass >= 1) or \
13925 myaction in ("metadata", "regen") or \
13926 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
13927 if portage.secpass < 1 or \
13930 access_desc = "superuser"
13932 access_desc = "portage group"
13933 # Always show portage_group_warning() when only portage group
13934 # access is required but the user is not in the portage group.
13935 from portage.data import portage_group_warning
13936 if "--ask" in myopts:
13937 myopts["--pretend"] = True
13938 del myopts["--ask"]
13939 print ("%s access is required... " + \
13940 "adding --pretend to options.\n") % access_desc
13941 if portage.secpass < 1 and not need_superuser:
13942 portage_group_warning()
13944 sys.stderr.write(("emerge: %s access is " + \
13945 "required.\n\n") % access_desc)
13946 if portage.secpass < 1 and not need_superuser:
13947 portage_group_warning()
13950 disable_emergelog = False
13951 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
13953 disable_emergelog = True
13955 if myaction in ("search", "info"):
13956 disable_emergelog = True
13957 if disable_emergelog:
13958 """ Disable emergelog for everything except build or unmerge
13959 operations. This helps minimize parallel emerge.log entries that can
13960 confuse log parsers. We especially want it disabled during
13961 parallel-fetch, which uses --resume --fetchonly."""
13963 def emergelog(*pargs, **kargs):
13966 if not "--pretend" in myopts:
13967 emergelog(xterm_titles, "Started emerge on: "+\
13968 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
13971 myelogstr=" ".join(myopts)
13973 myelogstr+=" "+myaction
13975 myelogstr += " " + " ".join(oldargs)
13976 emergelog(xterm_titles, " *** emerge " + myelogstr)
13979 def emergeexitsig(signum, frame):
13980 signal.signal(signal.SIGINT, signal.SIG_IGN)
13981 signal.signal(signal.SIGTERM, signal.SIG_IGN)
13982 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
13983 sys.exit(100+signum)
13984 signal.signal(signal.SIGINT, emergeexitsig)
13985 signal.signal(signal.SIGTERM, emergeexitsig)
13988 """This gets out final log message in before we quit."""
13989 if "--pretend" not in myopts:
13990 emergelog(xterm_titles, " *** terminating.")
13991 if "notitles" not in settings.features:
13993 portage.atexit_register(emergeexit)
13995 if myaction in ("config", "metadata", "regen", "sync"):
13996 if "--pretend" in myopts:
13997 sys.stderr.write(("emerge: The '%s' action does " + \
13998 "not support '--pretend'.\n") % myaction)
14001 if "sync" == myaction:
14002 return action_sync(settings, trees, mtimedb, myopts, myaction)
14003 elif "metadata" == myaction:
14004 action_metadata(settings, portdb, myopts)
14005 elif myaction=="regen":
14006 validate_ebuild_environment(trees)
14007 action_regen(settings, portdb, myopts.get("--jobs"),
14008 myopts.get("--load-average"))
14010 elif "config"==myaction:
14011 validate_ebuild_environment(trees)
14012 action_config(settings, trees, myopts, myfiles)
14015 elif "search"==myaction:
14016 validate_ebuild_environment(trees)
14017 action_search(trees[settings["ROOT"]]["root_config"],
14018 myopts, myfiles, spinner)
14019 elif myaction in ("clean", "unmerge") or \
14020 (myaction == "prune" and "--nodeps" in myopts):
14021 validate_ebuild_environment(trees)
14023 # Ensure atoms are valid before calling unmerge().
14024 # For backward compat, leading '=' is not required.
14026 if is_valid_package_atom(x) or \
14027 is_valid_package_atom("=" + x):
14030 msg.append("'%s' is not a valid package atom." % (x,))
14031 msg.append("Please check ebuild(5) for full details.")
14032 writemsg_level("".join("!!! %s\n" % line for line in msg),
14033 level=logging.ERROR, noiselevel=-1)
14036 # When given a list of atoms, unmerge
14037 # them in the order given.
14038 ordered = myaction == "unmerge"
14039 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14040 mtimedb["ldpath"], ordered=ordered):
14041 if not (buildpkgonly or fetchonly or pretend):
14042 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14044 elif myaction in ("depclean", "info", "prune"):
14046 # Ensure atoms are valid before calling unmerge().
14047 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14050 if is_valid_package_atom(x):
14052 valid_atoms.append(
14053 portage.dep_expand(x, mydb=vardb, settings=settings))
14054 except portage.exception.AmbiguousPackageName, e:
14055 msg = "The short ebuild name \"" + x + \
14056 "\" is ambiguous. Please specify " + \
14057 "one of the following " + \
14058 "fully-qualified ebuild names instead:"
14059 for line in textwrap.wrap(msg, 70):
14060 writemsg_level("!!! %s\n" % (line,),
14061 level=logging.ERROR, noiselevel=-1)
14063 writemsg_level(" %s\n" % colorize("INFORM", i),
14064 level=logging.ERROR, noiselevel=-1)
14065 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14069 msg.append("'%s' is not a valid package atom." % (x,))
14070 msg.append("Please check ebuild(5) for full details.")
14071 writemsg_level("".join("!!! %s\n" % line for line in msg),
14072 level=logging.ERROR, noiselevel=-1)
14075 if myaction == "info":
14076 return action_info(settings, trees, myopts, valid_atoms)
14078 validate_ebuild_environment(trees)
14079 action_depclean(settings, trees, mtimedb["ldpath"],
14080 myopts, myaction, valid_atoms, spinner)
14081 if not (buildpkgonly or fetchonly or pretend):
14082 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14083 # "update", "system", or just process files:
14085 validate_ebuild_environment(trees)
14086 if "--pretend" not in myopts:
14087 display_news_notification(root_config, myopts)
14088 retval = action_build(settings, trees, mtimedb,
14089 myopts, myaction, myfiles, spinner)
14090 root_config = trees[settings["ROOT"]]["root_config"]
14091 post_emerge(root_config, myopts, mtimedb, retval)