2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if isinstance(mysize, basestring):
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 if setconfig is None:
776 self.sets = self.setconfig.getSets()
777 self.visible_pkgs = PackageVirtualDbapi(self.settings)
779 def create_world_atom(pkg, args_set, root_config):
780 """Create a new atom for the world file if one does not exist. If the
781 argument atom is precise enough to identify a specific slot then a slot
782 atom will be returned. Atoms that are in the system set may also be stored
783 in world since system atoms can only match one slot while world atoms can
784 be greedy with respect to slots. Unslotted system packages will not be
787 arg_atom = args_set.findAtomForPackage(pkg)
790 cp = portage.dep_getkey(arg_atom)
792 sets = root_config.sets
793 portdb = root_config.trees["porttree"].dbapi
794 vardb = root_config.trees["vartree"].dbapi
795 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
796 for cpv in portdb.match(cp))
797 slotted = len(available_slots) > 1 or \
798 (len(available_slots) == 1 and "0" not in available_slots)
800 # check the vdb in case this is multislot
801 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
802 for cpv in vardb.match(cp))
803 slotted = len(available_slots) > 1 or \
804 (len(available_slots) == 1 and "0" not in available_slots)
805 if slotted and arg_atom != cp:
806 # If the user gave a specific atom, store it as a
807 # slot atom in the world file.
808 slot_atom = pkg.slot_atom
810 # For USE=multislot, there are a couple of cases to
813 # 1) SLOT="0", but the real SLOT spontaneously changed to some
814 # unknown value, so just record an unslotted atom.
816 # 2) SLOT comes from an installed package and there is no
817 # matching SLOT in the portage tree.
819 # Make sure that the slot atom is available in either the
820 # portdb or the vardb, since otherwise the user certainly
821 # doesn't want the SLOT atom recorded in the world file
822 # (case 1 above). If it's only available in the vardb,
823 # the user may be trying to prevent a USE=multislot
824 # package from being removed by --depclean (case 2 above).
827 if not portdb.match(slot_atom):
828 # SLOT seems to come from an installed multislot package
830 # If there is no installed package matching the SLOT atom,
831 # it probably changed SLOT spontaneously due to USE=multislot,
832 # so just record an unslotted atom.
833 if vardb.match(slot_atom):
834 # Now verify that the argument is precise
835 # enough to identify a specific slot.
836 matches = mydb.match(arg_atom)
837 matched_slots = set()
839 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
840 if len(matched_slots) == 1:
841 new_world_atom = slot_atom
843 if new_world_atom == sets["world"].findAtomForPackage(pkg):
844 # Both atoms would be identical, so there's nothing to add.
847 # Unlike world atoms, system atoms are not greedy for slots, so they
848 # can't be safely excluded from world if they are slotted.
849 system_atom = sets["system"].findAtomForPackage(pkg)
851 if not portage.dep_getkey(system_atom).startswith("virtual/"):
853 # System virtuals aren't safe to exclude from world since they can
854 # match multiple old-style virtuals but only one of them will be
855 # pulled in by update or depclean.
856 providers = portdb.mysettings.getvirtuals().get(
857 portage.dep_getkey(system_atom))
858 if providers and len(providers) == 1 and providers[0] == cp:
860 return new_world_atom
862 def filter_iuse_defaults(iuse):
864 if flag.startswith("+") or flag.startswith("-"):
869 class SlotObject(object):
870 __slots__ = ("__weakref__",)
872 def __init__(self, **kwargs):
873 classes = [self.__class__]
878 classes.extend(c.__bases__)
879 slots = getattr(c, "__slots__", None)
883 myvalue = kwargs.get(myattr, None)
884 setattr(self, myattr, myvalue)
888 Create a new instance and copy all attributes
889 defined from __slots__ (including those from
892 obj = self.__class__()
894 classes = [self.__class__]
899 classes.extend(c.__bases__)
900 slots = getattr(c, "__slots__", None)
904 setattr(obj, myattr, getattr(self, myattr))
908 class AbstractDepPriority(SlotObject):
909 __slots__ = ("buildtime", "runtime", "runtime_post")
911 def __lt__(self, other):
912 return self.__int__() < other
914 def __le__(self, other):
915 return self.__int__() <= other
917 def __eq__(self, other):
918 return self.__int__() == other
920 def __ne__(self, other):
921 return self.__int__() != other
923 def __gt__(self, other):
924 return self.__int__() > other
926 def __ge__(self, other):
927 return self.__int__() >= other
931 return copy.copy(self)
933 class DepPriority(AbstractDepPriority):
935 __slots__ = ("satisfied", "optional", "rebuild")
947 if self.runtime_post:
948 return "runtime_post"
951 class BlockerDepPriority(DepPriority):
959 BlockerDepPriority.instance = BlockerDepPriority()
961 class UnmergeDepPriority(AbstractDepPriority):
962 __slots__ = ("optional", "satisfied",)
964 Combination of properties Priority Category
969 (none of the above) -2 SOFT
979 if self.runtime_post:
986 myvalue = self.__int__()
987 if myvalue > self.SOFT:
991 class DepPriorityNormalRange(object):
993 DepPriority properties Index Category
997 runtime_post 2 MEDIUM_SOFT
999 (none of the above) 0 NONE
1007 def _ignore_optional(cls, priority):
1008 if priority.__class__ is not DepPriority:
1010 return bool(priority.optional)
1013 def _ignore_runtime_post(cls, priority):
1014 if priority.__class__ is not DepPriority:
1016 return bool(priority.optional or priority.runtime_post)
1019 def _ignore_runtime(cls, priority):
1020 if priority.__class__ is not DepPriority:
1022 return not priority.buildtime
1024 ignore_medium = _ignore_runtime
1025 ignore_medium_soft = _ignore_runtime_post
1026 ignore_soft = _ignore_optional
1028 DepPriorityNormalRange.ignore_priority = (
1030 DepPriorityNormalRange._ignore_optional,
1031 DepPriorityNormalRange._ignore_runtime_post,
1032 DepPriorityNormalRange._ignore_runtime
1035 class DepPrioritySatisfiedRange(object):
1037 DepPriority Index Category
1039 not satisfied and buildtime HARD
1040 not satisfied and runtime 7 MEDIUM
1041 not satisfied and runtime_post 6 MEDIUM_SOFT
1042 satisfied and buildtime and rebuild 5 SOFT
1043 satisfied and buildtime 4 SOFT
1044 satisfied and runtime 3 SOFT
1045 satisfied and runtime_post 2 SOFT
1047 (none of the above) 0 NONE
1055 def _ignore_optional(cls, priority):
1056 if priority.__class__ is not DepPriority:
1058 return bool(priority.optional)
1061 def _ignore_satisfied_runtime_post(cls, priority):
1062 if priority.__class__ is not DepPriority:
1064 if priority.optional:
1066 if not priority.satisfied:
1068 return bool(priority.runtime_post)
1071 def _ignore_satisfied_runtime(cls, priority):
1072 if priority.__class__ is not DepPriority:
1074 if priority.optional:
1076 if not priority.satisfied:
1078 return not priority.buildtime
1081 def _ignore_satisfied_buildtime(cls, priority):
1082 if priority.__class__ is not DepPriority:
1084 if priority.optional:
1086 if not priority.satisfied:
1088 if priority.buildtime:
1089 return not priority.rebuild
1093 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1094 if priority.__class__ is not DepPriority:
1096 if priority.optional:
1098 return bool(priority.satisfied)
1101 def _ignore_runtime_post(cls, priority):
1102 if priority.__class__ is not DepPriority:
1104 return bool(priority.optional or \
1105 priority.satisfied or \
1106 priority.runtime_post)
1109 def _ignore_runtime(cls, priority):
1110 if priority.__class__ is not DepPriority:
1112 return bool(priority.satisfied or \
1113 not priority.buildtime)
1115 ignore_medium = _ignore_runtime
1116 ignore_medium_soft = _ignore_runtime_post
1117 ignore_soft = _ignore_satisfied_buildtime_rebuild
1119 DepPrioritySatisfiedRange.ignore_priority = (
1121 DepPrioritySatisfiedRange._ignore_optional,
1122 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1124 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1125 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1126 DepPrioritySatisfiedRange._ignore_runtime_post,
1127 DepPrioritySatisfiedRange._ignore_runtime
1130 def _find_deep_system_runtime_deps(graph):
1131 deep_system_deps = set()
1134 if not isinstance(node, Package) or \
1135 node.operation == 'uninstall':
1137 if node.root_config.sets['system'].findAtomForPackage(node):
1138 node_stack.append(node)
1140 def ignore_priority(priority):
1142 Ignore non-runtime priorities.
1144 if isinstance(priority, DepPriority) and \
1145 (priority.runtime or priority.runtime_post):
1150 node = node_stack.pop()
1151 if node in deep_system_deps:
1153 deep_system_deps.add(node)
1154 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1155 if not isinstance(child, Package) or \
1156 child.operation == 'uninstall':
1158 node_stack.append(child)
1160 return deep_system_deps
1162 class FakeVartree(portage.vartree):
1163 """This is implements an in-memory copy of a vartree instance that provides
1164 all the interfaces required for use by the depgraph. The vardb is locked
1165 during the constructor call just long enough to read a copy of the
1166 installed package information. This allows the depgraph to do it's
1167 dependency calculations without holding a lock on the vardb. It also
1168 allows things like vardb global updates to be done in memory so that the
1169 user doesn't necessarily need write access to the vardb in cases where
1170 global updates are necessary (updates are performed when necessary if there
1171 is not a matching ebuild in the tree)."""
1172 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1173 self._root_config = root_config
1174 if pkg_cache is None:
1176 real_vartree = root_config.trees["vartree"]
1177 portdb = root_config.trees["porttree"].dbapi
1178 self.root = real_vartree.root
1179 self.settings = real_vartree.settings
1180 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1181 if "_mtime_" not in mykeys:
1182 mykeys.append("_mtime_")
1183 self._db_keys = mykeys
1184 self._pkg_cache = pkg_cache
1185 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1186 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1188 # At least the parent needs to exist for the lock file.
1189 portage.util.ensure_dirs(vdb_path)
1190 except portage.exception.PortageException:
1194 if acquire_lock and os.access(vdb_path, os.W_OK):
1195 vdb_lock = portage.locks.lockdir(vdb_path)
1196 real_dbapi = real_vartree.dbapi
1198 for cpv in real_dbapi.cpv_all():
1199 cache_key = ("installed", self.root, cpv, "nomerge")
1200 pkg = self._pkg_cache.get(cache_key)
1202 metadata = pkg.metadata
1204 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1205 myslot = metadata["SLOT"]
1206 mycp = portage.dep_getkey(cpv)
1207 myslot_atom = "%s:%s" % (mycp, myslot)
1209 mycounter = long(metadata["COUNTER"])
1212 metadata["COUNTER"] = str(mycounter)
1213 other_counter = slot_counters.get(myslot_atom, None)
1214 if other_counter is not None:
1215 if other_counter > mycounter:
1217 slot_counters[myslot_atom] = mycounter
1219 pkg = Package(built=True, cpv=cpv,
1220 installed=True, metadata=metadata,
1221 root_config=root_config, type_name="installed")
1222 self._pkg_cache[pkg] = pkg
1223 self.dbapi.cpv_inject(pkg)
1224 real_dbapi.flush_cache()
1227 portage.locks.unlockdir(vdb_lock)
1228 # Populate the old-style virtuals using the cached values.
1229 if not self.settings.treeVirtuals:
1230 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1231 portage.getCPFromCPV, self.get_all_provides())
1233 # Intialize variables needed for lazy cache pulls of the live ebuild
1234 # metadata. This ensures that the vardb lock is released ASAP, without
1235 # being delayed in case cache generation is triggered.
1236 self._aux_get = self.dbapi.aux_get
1237 self.dbapi.aux_get = self._aux_get_wrapper
1238 self._match = self.dbapi.match
1239 self.dbapi.match = self._match_wrapper
1240 self._aux_get_history = set()
1241 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1242 self._portdb = portdb
1243 self._global_updates = None
1245 def _match_wrapper(self, cpv, use_cache=1):
1247 Make sure the metadata in Package instances gets updated for any
1248 cpv that is returned from a match() call, since the metadata can
1249 be accessed directly from the Package instance instead of via
1252 matches = self._match(cpv, use_cache=use_cache)
1254 if cpv in self._aux_get_history:
1256 self._aux_get_wrapper(cpv, [])
1259 def _aux_get_wrapper(self, pkg, wants):
1260 if pkg in self._aux_get_history:
1261 return self._aux_get(pkg, wants)
1262 self._aux_get_history.add(pkg)
1264 # Use the live ebuild metadata if possible.
1265 live_metadata = dict(izip(self._portdb_keys,
1266 self._portdb.aux_get(pkg, self._portdb_keys)))
1267 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1269 self.dbapi.aux_update(pkg, live_metadata)
1270 except (KeyError, portage.exception.PortageException):
1271 if self._global_updates is None:
1272 self._global_updates = \
1273 grab_global_updates(self._portdb.porttree_root)
1274 perform_global_updates(
1275 pkg, self.dbapi, self._global_updates)
1276 return self._aux_get(pkg, wants)
1278 def sync(self, acquire_lock=1):
1280 Call this method to synchronize state with the real vardb
1281 after one or more packages may have been installed or
1284 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1286 # At least the parent needs to exist for the lock file.
1287 portage.util.ensure_dirs(vdb_path)
1288 except portage.exception.PortageException:
1292 if acquire_lock and os.access(vdb_path, os.W_OK):
1293 vdb_lock = portage.locks.lockdir(vdb_path)
1297 portage.locks.unlockdir(vdb_lock)
1301 real_vardb = self._root_config.trees["vartree"].dbapi
1302 current_cpv_set = frozenset(real_vardb.cpv_all())
1303 pkg_vardb = self.dbapi
1304 aux_get_history = self._aux_get_history
1306 # Remove any packages that have been uninstalled.
1307 for pkg in list(pkg_vardb):
1308 if pkg.cpv not in current_cpv_set:
1309 pkg_vardb.cpv_remove(pkg)
1310 aux_get_history.discard(pkg.cpv)
1312 # Validate counters and timestamps.
1315 validation_keys = ["COUNTER", "_mtime_"]
1316 for cpv in current_cpv_set:
1318 pkg_hash_key = ("installed", root, cpv, "nomerge")
1319 pkg = pkg_vardb.get(pkg_hash_key)
1321 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1323 counter = long(counter)
1327 if counter != pkg.counter or \
1329 pkg_vardb.cpv_remove(pkg)
1330 aux_get_history.discard(pkg.cpv)
1334 pkg = self._pkg(cpv)
1336 other_counter = slot_counters.get(pkg.slot_atom)
1337 if other_counter is not None:
1338 if other_counter > pkg.counter:
1341 slot_counters[pkg.slot_atom] = pkg.counter
1342 pkg_vardb.cpv_inject(pkg)
1344 real_vardb.flush_cache()
1346 def _pkg(self, cpv):
1347 root_config = self._root_config
1348 real_vardb = root_config.trees["vartree"].dbapi
1349 pkg = Package(cpv=cpv, installed=True,
1350 metadata=izip(self._db_keys,
1351 real_vardb.aux_get(cpv, self._db_keys)),
1352 root_config=root_config,
1353 type_name="installed")
1356 mycounter = long(pkg.metadata["COUNTER"])
1359 pkg.metadata["COUNTER"] = str(mycounter)
1363 def grab_global_updates(portdir):
1364 from portage.update import grab_updates, parse_updates
1365 updpath = os.path.join(portdir, "profiles", "updates")
1367 rawupdates = grab_updates(updpath)
1368 except portage.exception.DirectoryNotFound:
1371 for mykey, mystat, mycontent in rawupdates:
1372 commands, errors = parse_updates(mycontent)
1373 upd_commands.extend(commands)
1376 def perform_global_updates(mycpv, mydb, mycommands):
1377 from portage.update import update_dbentries
1378 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1379 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1380 updates = update_dbentries(mycommands, aux_dict)
1382 mydb.aux_update(mycpv, updates)
1384 def visible(pkgsettings, pkg):
1386 Check if a package is visible. This can raise an InvalidDependString
1387 exception if LICENSE is invalid.
1388 TODO: optionally generate a list of masking reasons
1390 @returns: True if the package is visible, False otherwise.
1392 if not pkg.metadata["SLOT"]:
1394 if not pkg.installed:
1395 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1397 eapi = pkg.metadata["EAPI"]
1398 if not portage.eapi_is_supported(eapi):
1400 if not pkg.installed:
1401 if portage._eapi_is_deprecated(eapi):
1403 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1405 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1407 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1410 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1412 except portage.exception.InvalidDependString:
1416 def get_masking_status(pkg, pkgsettings, root_config):
1418 mreasons = portage.getmaskingstatus(
1419 pkg, settings=pkgsettings,
1420 portdb=root_config.trees["porttree"].dbapi)
1422 if not pkg.installed:
1423 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1424 mreasons.append("CHOST: %s" % \
1425 pkg.metadata["CHOST"])
1427 if not pkg.metadata["SLOT"]:
1428 mreasons.append("invalid: SLOT is undefined")
1432 def get_mask_info(root_config, cpv, pkgsettings,
1433 db, pkg_type, built, installed, db_keys):
1436 metadata = dict(izip(db_keys,
1437 db.aux_get(cpv, db_keys)))
1440 if metadata and not built:
1441 pkgsettings.setcpv(cpv, mydb=metadata)
1442 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1443 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1444 if metadata is None:
1445 mreasons = ["corruption"]
1447 eapi = metadata['EAPI']
1450 if not portage.eapi_is_supported(eapi):
1451 mreasons = ['EAPI %s' % eapi]
1453 pkg = Package(type_name=pkg_type, root_config=root_config,
1454 cpv=cpv, built=built, installed=installed, metadata=metadata)
1455 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1456 return metadata, mreasons
1458 def show_masked_packages(masked_packages):
1459 shown_licenses = set()
1460 shown_comments = set()
1461 # Maybe there is both an ebuild and a binary. Only
1462 # show one of them to avoid redundant appearance.
1464 have_eapi_mask = False
1465 for (root_config, pkgsettings, cpv,
1466 metadata, mreasons) in masked_packages:
1467 if cpv in shown_cpvs:
1470 comment, filename = None, None
1471 if "package.mask" in mreasons:
1472 comment, filename = \
1473 portage.getmaskingreason(
1474 cpv, metadata=metadata,
1475 settings=pkgsettings,
1476 portdb=root_config.trees["porttree"].dbapi,
1477 return_location=True)
1478 missing_licenses = []
1480 if not portage.eapi_is_supported(metadata["EAPI"]):
1481 have_eapi_mask = True
1483 missing_licenses = \
1484 pkgsettings._getMissingLicenses(
1486 except portage.exception.InvalidDependString:
1487 # This will have already been reported
1488 # above via mreasons.
1491 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1492 if comment and comment not in shown_comments:
1495 shown_comments.add(comment)
1496 portdb = root_config.trees["porttree"].dbapi
1497 for l in missing_licenses:
1498 l_path = portdb.findLicensePath(l)
1499 if l in shown_licenses:
1501 msg = ("A copy of the '%s' license" + \
1502 " is located at '%s'.") % (l, l_path)
1505 shown_licenses.add(l)
1506 return have_eapi_mask
1508 class Task(SlotObject):
1509 __slots__ = ("_hash_key", "_hash_value")
1511 def _get_hash_key(self):
1512 hash_key = getattr(self, "_hash_key", None)
1513 if hash_key is None:
1514 raise NotImplementedError(self)
1517 def __eq__(self, other):
1518 return self._get_hash_key() == other
1520 def __ne__(self, other):
1521 return self._get_hash_key() != other
1524 hash_value = getattr(self, "_hash_value", None)
1525 if hash_value is None:
1526 self._hash_value = hash(self._get_hash_key())
1527 return self._hash_value
1530 return len(self._get_hash_key())
1532 def __getitem__(self, key):
1533 return self._get_hash_key()[key]
1536 return iter(self._get_hash_key())
1538 def __contains__(self, key):
1539 return key in self._get_hash_key()
1542 return str(self._get_hash_key())
1544 class Blocker(Task):
1546 __hash__ = Task.__hash__
1547 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1549 def __init__(self, **kwargs):
1550 Task.__init__(self, **kwargs)
1551 self.cp = portage.dep_getkey(self.atom)
1553 def _get_hash_key(self):
1554 hash_key = getattr(self, "_hash_key", None)
1555 if hash_key is None:
1557 ("blocks", self.root, self.atom, self.eapi)
1558 return self._hash_key
1560 class Package(Task):
1562 __hash__ = Task.__hash__
1563 __slots__ = ("built", "cpv", "depth",
1564 "installed", "metadata", "onlydeps", "operation",
1565 "root_config", "type_name",
1566 "category", "counter", "cp", "cpv_split",
1567 "inherited", "iuse", "mtime",
1568 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1571 "CHOST", "COUNTER", "DEPEND", "EAPI",
1572 "INHERITED", "IUSE", "KEYWORDS",
1573 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1574 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1576 def __init__(self, **kwargs):
1577 Task.__init__(self, **kwargs)
1578 self.root = self.root_config.root
1579 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1580 self.cp = portage.cpv_getkey(self.cpv)
1583 # Avoid an InvalidAtom exception when creating slot_atom.
1584 # This package instance will be masked due to empty SLOT.
1586 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1587 self.category, self.pf = portage.catsplit(self.cpv)
1588 self.cpv_split = portage.catpkgsplit(self.cpv)
1589 self.pv_split = self.cpv_split[1:]
1593 __slots__ = ("__weakref__", "enabled")
1595 def __init__(self, use):
1596 self.enabled = frozenset(use)
1598 class _iuse(object):
1600 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1602 def __init__(self, tokens, iuse_implicit):
1603 self.tokens = tuple(tokens)
1604 self.iuse_implicit = iuse_implicit
1611 enabled.append(x[1:])
1613 disabled.append(x[1:])
1616 self.enabled = frozenset(enabled)
1617 self.disabled = frozenset(disabled)
1618 self.all = frozenset(chain(enabled, disabled, other))
1620 def __getattribute__(self, name):
1623 return object.__getattribute__(self, "regex")
1624 except AttributeError:
1625 all = object.__getattribute__(self, "all")
1626 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1627 # Escape anything except ".*" which is supposed
1628 # to pass through from _get_implicit_iuse()
1629 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1630 regex = "^(%s)$" % "|".join(regex)
1631 regex = regex.replace("\\.\\*", ".*")
1632 self.regex = re.compile(regex)
1633 return object.__getattribute__(self, name)
1635 def _get_hash_key(self):
1636 hash_key = getattr(self, "_hash_key", None)
1637 if hash_key is None:
1638 if self.operation is None:
1639 self.operation = "merge"
1640 if self.onlydeps or self.installed:
1641 self.operation = "nomerge"
1643 (self.type_name, self.root, self.cpv, self.operation)
1644 return self._hash_key
1646 def __lt__(self, other):
1647 if other.cp != self.cp:
1649 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1653 def __le__(self, other):
1654 if other.cp != self.cp:
1656 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1660 def __gt__(self, other):
1661 if other.cp != self.cp:
1663 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1667 def __ge__(self, other):
1668 if other.cp != self.cp:
1670 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1674 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1675 if not x.startswith("UNUSED_"))
1676 _all_metadata_keys.discard("CDEPEND")
1677 _all_metadata_keys.update(Package.metadata_keys)
1679 from portage.cache.mappings import slot_dict_class
1680 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1682 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1684 Detect metadata updates and synchronize Package attributes.
1687 __slots__ = ("_pkg",)
1688 _wrapped_keys = frozenset(
1689 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1691 def __init__(self, pkg, metadata):
1692 _PackageMetadataWrapperBase.__init__(self)
1694 self.update(metadata)
1696 def __setitem__(self, k, v):
1697 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1698 if k in self._wrapped_keys:
1699 getattr(self, "_set_" + k.lower())(k, v)
1701 def _set_inherited(self, k, v):
1702 if isinstance(v, basestring):
1703 v = frozenset(v.split())
1704 self._pkg.inherited = v
1706 def _set_iuse(self, k, v):
1707 self._pkg.iuse = self._pkg._iuse(
1708 v.split(), self._pkg.root_config.iuse_implicit)
1710 def _set_slot(self, k, v):
1713 def _set_use(self, k, v):
1714 self._pkg.use = self._pkg._use(v.split())
1716 def _set_counter(self, k, v):
1717 if isinstance(v, basestring):
1722 self._pkg.counter = v
1724 def _set__mtime_(self, k, v):
1725 if isinstance(v, basestring):
1732 class EbuildFetchonly(SlotObject):
1734 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1737 settings = self.settings
1739 portdb = pkg.root_config.trees["porttree"].dbapi
1740 ebuild_path = portdb.findname(pkg.cpv)
1741 settings.setcpv(pkg)
1742 debug = settings.get("PORTAGE_DEBUG") == "1"
1743 use_cache = 1 # always true
1744 portage.doebuild_environment(ebuild_path, "fetch",
1745 settings["ROOT"], settings, debug, use_cache, portdb)
1746 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1749 rval = self._execute_with_builddir()
1751 rval = portage.doebuild(ebuild_path, "fetch",
1752 settings["ROOT"], settings, debug=debug,
1753 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1754 mydbapi=portdb, tree="porttree")
1756 if rval != os.EX_OK:
1757 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1758 eerror(msg, phase="unpack", key=pkg.cpv)
1762 def _execute_with_builddir(self):
1763 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1764 # ensuring sane $PWD (bug #239560) and storing elog
1765 # messages. Use a private temp directory, in order
1766 # to avoid locking the main one.
1767 settings = self.settings
1768 global_tmpdir = settings["PORTAGE_TMPDIR"]
1769 from tempfile import mkdtemp
1771 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1773 if e.errno != portage.exception.PermissionDenied.errno:
1775 raise portage.exception.PermissionDenied(global_tmpdir)
1776 settings["PORTAGE_TMPDIR"] = private_tmpdir
1777 settings.backup_changes("PORTAGE_TMPDIR")
1779 retval = self._execute()
1781 settings["PORTAGE_TMPDIR"] = global_tmpdir
1782 settings.backup_changes("PORTAGE_TMPDIR")
1783 shutil.rmtree(private_tmpdir)
1787 settings = self.settings
1789 root_config = pkg.root_config
1790 portdb = root_config.trees["porttree"].dbapi
1791 ebuild_path = portdb.findname(pkg.cpv)
1792 debug = settings.get("PORTAGE_DEBUG") == "1"
1793 retval = portage.doebuild(ebuild_path, "fetch",
1794 self.settings["ROOT"], self.settings, debug=debug,
1795 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796 mydbapi=portdb, tree="porttree")
1798 if retval != os.EX_OK:
1799 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800 eerror(msg, phase="unpack", key=pkg.cpv)
1802 portage.elog.elog_process(self.pkg.cpv, self.settings)
1805 class PollConstants(object):
1808 Provides POLL* constants that are equivalent to those from the
1809 select module, for use by PollSelectAdapter.
1812 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1815 locals()[k] = getattr(select, k, v)
1819 class AsynchronousTask(SlotObject):
1821 Subclasses override _wait() and _poll() so that calls
1822 to public methods can be wrapped for implementing
1823 hooks such as exit listener notification.
1825 Sublasses should call self.wait() to notify exit listeners after
1826 the task is complete and self.returncode has been set.
1829 __slots__ = ("background", "cancelled", "returncode") + \
1830 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1834 Start an asynchronous task and then return as soon as possible.
1840 raise NotImplementedError(self)
1843 return self.returncode is None
1850 return self.returncode
1853 if self.returncode is None:
1856 return self.returncode
1859 return self.returncode
1862 self.cancelled = True
1865 def addStartListener(self, f):
1867 The function will be called with one argument, a reference to self.
1869 if self._start_listeners is None:
1870 self._start_listeners = []
1871 self._start_listeners.append(f)
1873 def removeStartListener(self, f):
1874 if self._start_listeners is None:
1876 self._start_listeners.remove(f)
1878 def _start_hook(self):
1879 if self._start_listeners is not None:
1880 start_listeners = self._start_listeners
1881 self._start_listeners = None
1883 for f in start_listeners:
1886 def addExitListener(self, f):
1888 The function will be called with one argument, a reference to self.
1890 if self._exit_listeners is None:
1891 self._exit_listeners = []
1892 self._exit_listeners.append(f)
1894 def removeExitListener(self, f):
1895 if self._exit_listeners is None:
1896 if self._exit_listener_stack is not None:
1897 self._exit_listener_stack.remove(f)
1899 self._exit_listeners.remove(f)
1901 def _wait_hook(self):
1903 Call this method after the task completes, just before returning
1904 the returncode from wait() or poll(). This hook is
1905 used to trigger exit listeners when the returncode first
1908 if self.returncode is not None and \
1909 self._exit_listeners is not None:
1911 # This prevents recursion, in case one of the
1912 # exit handlers triggers this method again by
1913 # calling wait(). Use a stack that gives
1914 # removeExitListener() an opportunity to consume
1915 # listeners from the stack, before they can get
1916 # called below. This is necessary because a call
1917 # to one exit listener may result in a call to
1918 # removeExitListener() for another listener on
1919 # the stack. That listener needs to be removed
1920 # from the stack since it would be inconsistent
1921 # to call it after it has been been passed into
1922 # removeExitListener().
1923 self._exit_listener_stack = self._exit_listeners
1924 self._exit_listeners = None
1926 self._exit_listener_stack.reverse()
1927 while self._exit_listener_stack:
1928 self._exit_listener_stack.pop()(self)
1930 class AbstractPollTask(AsynchronousTask):
1932 __slots__ = ("scheduler",) + \
1936 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1940 def _unregister(self):
1941 raise NotImplementedError(self)
1943 def _unregister_if_appropriate(self, event):
1944 if self._registered:
1945 if event & self._exceptional_events:
1948 elif event & PollConstants.POLLHUP:
1952 class PipeReader(AbstractPollTask):
1955 Reads output from one or more files and saves it in memory,
1956 for retrieval via the getvalue() method. This is driven by
1957 the scheduler's poll() loop, so it runs entirely within the
1961 __slots__ = ("input_files",) + \
1962 ("_read_data", "_reg_ids")
1965 self._reg_ids = set()
1966 self._read_data = []
1967 for k, f in self.input_files.iteritems():
1968 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970 self._reg_ids.add(self.scheduler.register(f.fileno(),
1971 self._registered_events, self._output_handler))
1972 self._registered = True
1975 return self._registered
1978 if self.returncode is None:
1980 self.cancelled = True
1984 if self.returncode is not None:
1985 return self.returncode
1987 if self._registered:
1988 self.scheduler.schedule(self._reg_ids)
1991 self.returncode = os.EX_OK
1992 return self.returncode
1995 """Retrieve the entire contents"""
1996 if sys.hexversion >= 0x3000000:
1997 return bytes().join(self._read_data)
1998 return "".join(self._read_data)
2001 """Free the memory buffer."""
2002 self._read_data = None
2004 def _output_handler(self, fd, event):
2006 if event & PollConstants.POLLIN:
2008 for f in self.input_files.itervalues():
2009 if fd == f.fileno():
2012 buf = array.array('B')
2014 buf.fromfile(f, self._bufsize)
2019 self._read_data.append(buf.tostring())
2024 self._unregister_if_appropriate(event)
2025 return self._registered
2027 def _unregister(self):
2029 Unregister from the scheduler and close open files.
2032 self._registered = False
2034 if self._reg_ids is not None:
2035 for reg_id in self._reg_ids:
2036 self.scheduler.unregister(reg_id)
2037 self._reg_ids = None
2039 if self.input_files is not None:
2040 for f in self.input_files.itervalues():
2042 self.input_files = None
2044 class CompositeTask(AsynchronousTask):
2046 __slots__ = ("scheduler",) + ("_current_task",)
2049 return self._current_task is not None
2052 self.cancelled = True
2053 if self._current_task is not None:
2054 self._current_task.cancel()
2058 This does a loop calling self._current_task.poll()
2059 repeatedly as long as the value of self._current_task
2060 keeps changing. It calls poll() a maximum of one time
2061 for a given self._current_task instance. This is useful
2062 since calling poll() on a task can trigger advance to
2063 the next task could eventually lead to the returncode
2064 being set in cases when polling only a single task would
2065 not have the same effect.
2070 task = self._current_task
2071 if task is None or task is prev:
2072 # don't poll the same task more than once
2077 return self.returncode
2083 task = self._current_task
2085 # don't wait for the same task more than once
2088 # Before the task.wait() method returned, an exit
2089 # listener should have set self._current_task to either
2090 # a different task or None. Something is wrong.
2091 raise AssertionError("self._current_task has not " + \
2092 "changed since calling wait", self, task)
2096 return self.returncode
2098 def _assert_current(self, task):
2100 Raises an AssertionError if the given task is not the
2101 same one as self._current_task. This can be useful
2104 if task is not self._current_task:
2105 raise AssertionError("Unrecognized task: %s" % (task,))
2107 def _default_exit(self, task):
2109 Calls _assert_current() on the given task and then sets the
2110 composite returncode attribute if task.returncode != os.EX_OK.
2111 If the task failed then self._current_task will be set to None.
2112 Subclasses can use this as a generic task exit callback.
2115 @returns: The task.returncode attribute.
2117 self._assert_current(task)
2118 if task.returncode != os.EX_OK:
2119 self.returncode = task.returncode
2120 self._current_task = None
2121 return task.returncode
2123 def _final_exit(self, task):
2125 Assumes that task is the final task of this composite task.
2126 Calls _default_exit() and sets self.returncode to the task's
2127 returncode and sets self._current_task to None.
2129 self._default_exit(task)
2130 self._current_task = None
2131 self.returncode = task.returncode
2132 return self.returncode
2134 def _default_final_exit(self, task):
2136 This calls _final_exit() and then wait().
2138 Subclasses can use this as a generic final task exit callback.
2141 self._final_exit(task)
2144 def _start_task(self, task, exit_handler):
2146 Register exit handler for the given task, set it
2147 as self._current_task, and call task.start().
2149 Subclasses can use this as a generic way to start
2153 task.addExitListener(exit_handler)
2154 self._current_task = task
2157 class TaskSequence(CompositeTask):
2159 A collection of tasks that executes sequentially. Each task
2160 must have a addExitListener() method that can be used as
2161 a means to trigger movement from one task to the next.
2164 __slots__ = ("_task_queue",)
2166 def __init__(self, **kwargs):
2167 AsynchronousTask.__init__(self, **kwargs)
2168 self._task_queue = deque()
2170 def add(self, task):
2171 self._task_queue.append(task)
2174 self._start_next_task()
2177 self._task_queue.clear()
2178 CompositeTask.cancel(self)
2180 def _start_next_task(self):
2181 self._start_task(self._task_queue.popleft(),
2182 self._task_exit_handler)
2184 def _task_exit_handler(self, task):
2185 if self._default_exit(task) != os.EX_OK:
2187 elif self._task_queue:
2188 self._start_next_task()
2190 self._final_exit(task)
2193 class SubProcess(AbstractPollTask):
2195 __slots__ = ("pid",) + \
2196 ("_files", "_reg_id")
2198 # A file descriptor is required for the scheduler to monitor changes from
2199 # inside a poll() loop. When logging is not enabled, create a pipe just to
2200 # serve this purpose alone.
2204 if self.returncode is not None:
2205 return self.returncode
2206 if self.pid is None:
2207 return self.returncode
2208 if self._registered:
2209 return self.returncode
2212 retval = os.waitpid(self.pid, os.WNOHANG)
2214 if e.errno != errno.ECHILD:
2217 retval = (self.pid, 1)
2219 if retval == (0, 0):
2221 self._set_returncode(retval)
2222 return self.returncode
2227 os.kill(self.pid, signal.SIGTERM)
2229 if e.errno != errno.ESRCH:
2233 self.cancelled = True
2234 if self.pid is not None:
2236 return self.returncode
2239 return self.pid is not None and \
2240 self.returncode is None
2244 if self.returncode is not None:
2245 return self.returncode
2247 if self._registered:
2248 self.scheduler.schedule(self._reg_id)
2250 if self.returncode is not None:
2251 return self.returncode
2254 wait_retval = os.waitpid(self.pid, 0)
2256 if e.errno != errno.ECHILD:
2259 self._set_returncode((self.pid, 1))
2261 self._set_returncode(wait_retval)
2263 return self.returncode
2265 def _unregister(self):
2267 Unregister from the scheduler and close open files.
2270 self._registered = False
2272 if self._reg_id is not None:
2273 self.scheduler.unregister(self._reg_id)
2276 if self._files is not None:
2277 for f in self._files.itervalues():
2281 def _set_returncode(self, wait_retval):
2283 retval = wait_retval[1]
2285 if retval != os.EX_OK:
2287 retval = (retval & 0xff) << 8
2289 retval = retval >> 8
2291 self.returncode = retval
2293 class SpawnProcess(SubProcess):
2296 Constructor keyword args are passed into portage.process.spawn().
2297 The required "args" keyword argument will be passed as the first
2301 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302 "uid", "gid", "groups", "umask", "logfile",
2303 "path_lookup", "pre_exec")
2305 __slots__ = ("args",) + \
2308 _file_names = ("log", "process", "stdout")
2309 _files_dict = slot_dict_class(_file_names, prefix="")
2316 if self.fd_pipes is None:
2318 fd_pipes = self.fd_pipes
2319 fd_pipes.setdefault(0, sys.stdin.fileno())
2320 fd_pipes.setdefault(1, sys.stdout.fileno())
2321 fd_pipes.setdefault(2, sys.stderr.fileno())
2323 # flush any pending output
2324 for fd in fd_pipes.itervalues():
2325 if fd == sys.stdout.fileno():
2327 if fd == sys.stderr.fileno():
2330 logfile = self.logfile
2331 self._files = self._files_dict()
2334 master_fd, slave_fd = self._pipe(fd_pipes)
2335 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2339 fd_pipes_orig = fd_pipes.copy()
2341 # TODO: Use job control functions like tcsetpgrp() to control
2342 # access to stdin. Until then, use /dev/null so that any
2343 # attempts to read from stdin will immediately return EOF
2344 # instead of blocking indefinitely.
2345 null_input = open('/dev/null', 'rb')
2346 fd_pipes[0] = null_input.fileno()
2348 fd_pipes[0] = fd_pipes_orig[0]
2350 files.process = os.fdopen(master_fd, 'rb')
2351 if logfile is not None:
2353 fd_pipes[1] = slave_fd
2354 fd_pipes[2] = slave_fd
2356 files.log = open(logfile, mode='ab')
2357 portage.util.apply_secpass_permissions(logfile,
2358 uid=portage.portage_uid, gid=portage.portage_gid,
2361 if not self.background:
2362 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2364 output_handler = self._output_handler
2368 # Create a dummy pipe so the scheduler can monitor
2369 # the process from inside a poll() loop.
2370 fd_pipes[self._dummy_pipe_fd] = slave_fd
2372 fd_pipes[1] = slave_fd
2373 fd_pipes[2] = slave_fd
2374 output_handler = self._dummy_handler
2377 for k in self._spawn_kwarg_names:
2378 v = getattr(self, k)
2382 kwargs["fd_pipes"] = fd_pipes
2383 kwargs["returnpid"] = True
2384 kwargs.pop("logfile", None)
2386 self._reg_id = self.scheduler.register(files.process.fileno(),
2387 self._registered_events, output_handler)
2388 self._registered = True
2390 retval = self._spawn(self.args, **kwargs)
2393 if null_input is not None:
2396 if isinstance(retval, int):
2399 self.returncode = retval
2403 self.pid = retval[0]
2404 portage.process.spawned_pids.remove(self.pid)
2406 def _pipe(self, fd_pipes):
2408 @type fd_pipes: dict
2409 @param fd_pipes: pipes from which to copy terminal size if desired.
2413 def _spawn(self, args, **kwargs):
2414 return portage.process.spawn(args, **kwargs)
2416 def _output_handler(self, fd, event):
2418 if event & PollConstants.POLLIN:
2421 buf = array.array('B')
2423 buf.fromfile(files.process, self._bufsize)
2428 if not self.background:
2429 write_successful = False
2433 if not write_successful:
2434 buf.tofile(files.stdout)
2435 write_successful = True
2436 files.stdout.flush()
2439 if e.errno != errno.EAGAIN:
2444 # Avoid a potentially infinite loop. In
2445 # most cases, the failure count is zero
2446 # and it's unlikely to exceed 1.
2449 # This means that a subprocess has put an inherited
2450 # stdio file descriptor (typically stdin) into
2451 # O_NONBLOCK mode. This is not acceptable (see bug
2452 # #264435), so revert it. We need to use a loop
2453 # here since there's a race condition due to
2454 # parallel processes being able to change the
2455 # flags on the inherited file descriptor.
2456 # TODO: When possible, avoid having child processes
2457 # inherit stdio file descriptors from portage
2458 # (maybe it can't be avoided with
2459 # PROPERTIES=interactive).
2460 fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2461 fcntl.fcntl(files.stdout.fileno(),
2462 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2464 buf.tofile(files.log)
2470 self._unregister_if_appropriate(event)
2471 return self._registered
2473 def _dummy_handler(self, fd, event):
2475 This method is mainly interested in detecting EOF, since
2476 the only purpose of the pipe is to allow the scheduler to
2477 monitor the process from inside a poll() loop.
2480 if event & PollConstants.POLLIN:
2482 buf = array.array('B')
2484 buf.fromfile(self._files.process, self._bufsize)
2494 self._unregister_if_appropriate(event)
2495 return self._registered
2497 class MiscFunctionsProcess(SpawnProcess):
2499 Spawns misc-functions.sh with an existing ebuild environment.
2502 __slots__ = ("commands", "phase", "pkg", "settings")
2505 settings = self.settings
2506 settings.pop("EBUILD_PHASE", None)
2507 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2508 misc_sh_binary = os.path.join(portage_bin_path,
2509 os.path.basename(portage.const.MISC_SH_BINARY))
2511 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2512 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514 portage._doebuild_exit_status_unlink(
2515 settings.get("EBUILD_EXIT_STATUS_FILE"))
2517 SpawnProcess._start(self)
2519 def _spawn(self, args, **kwargs):
2520 settings = self.settings
2521 debug = settings.get("PORTAGE_DEBUG") == "1"
2522 return portage.spawn(" ".join(args), settings,
2523 debug=debug, **kwargs)
2525 def _set_returncode(self, wait_retval):
2526 SpawnProcess._set_returncode(self, wait_retval)
2527 self.returncode = portage._doebuild_exit_status_check_and_log(
2528 self.settings, self.phase, self.returncode)
2530 class EbuildFetcher(SpawnProcess):
2532 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2537 root_config = self.pkg.root_config
2538 portdb = root_config.trees["porttree"].dbapi
2539 ebuild_path = portdb.findname(self.pkg.cpv)
2540 settings = self.config_pool.allocate()
2541 settings.setcpv(self.pkg)
2543 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2544 # should not be touched since otherwise it could interfere with
2545 # another instance of the same cpv concurrently being built for a
2546 # different $ROOT (currently, builds only cooperate with prefetchers
2547 # that are spawned for the same $ROOT).
2548 if not self.prefetch:
2549 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2550 self._build_dir.lock()
2551 self._build_dir.clean_log()
2552 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2553 if self.logfile is None:
2554 self.logfile = settings.get("PORTAGE_LOG_FILE")
2560 # If any incremental variables have been overridden
2561 # via the environment, those values need to be passed
2562 # along here so that they are correctly considered by
2563 # the config instance in the subproccess.
2564 fetch_env = os.environ.copy()
2566 nocolor = settings.get("NOCOLOR")
2567 if nocolor is not None:
2568 fetch_env["NOCOLOR"] = nocolor
2570 fetch_env["PORTAGE_NICENESS"] = "0"
2572 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2574 ebuild_binary = os.path.join(
2575 settings["PORTAGE_BIN_PATH"], "ebuild")
2577 fetch_args = [ebuild_binary, ebuild_path, phase]
2578 debug = settings.get("PORTAGE_DEBUG") == "1"
2580 fetch_args.append("--debug")
2582 self.args = fetch_args
2583 self.env = fetch_env
2584 SpawnProcess._start(self)
2586 def _pipe(self, fd_pipes):
2587 """When appropriate, use a pty so that fetcher progress bars,
2588 like wget has, will work properly."""
2589 if self.background or not sys.stdout.isatty():
2590 # When the output only goes to a log file,
2591 # there's no point in creating a pty.
2593 stdout_pipe = fd_pipes.get(1)
2594 got_pty, master_fd, slave_fd = \
2595 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2596 return (master_fd, slave_fd)
2598 def _set_returncode(self, wait_retval):
2599 SpawnProcess._set_returncode(self, wait_retval)
2600 # Collect elog messages that might have been
2601 # created by the pkg_nofetch phase.
2602 if self._build_dir is not None:
2603 # Skip elog messages for prefetch, in order to avoid duplicates.
2604 if not self.prefetch and self.returncode != os.EX_OK:
2606 if self.logfile is not None:
2608 elog_out = open(self.logfile, 'a')
2609 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2610 if self.logfile is not None:
2611 msg += ", Log file:"
2612 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2613 if self.logfile is not None:
2614 eerror(" '%s'" % (self.logfile,),
2615 phase="unpack", key=self.pkg.cpv, out=elog_out)
2616 if elog_out is not None:
2618 if not self.prefetch:
2619 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2620 features = self._build_dir.settings.features
2621 if self.returncode == os.EX_OK:
2622 self._build_dir.clean_log()
2623 self._build_dir.unlock()
2624 self.config_pool.deallocate(self._build_dir.settings)
2625 self._build_dir = None
2627 class EbuildBuildDir(SlotObject):
2629 __slots__ = ("dir_path", "pkg", "settings",
2630 "locked", "_catdir", "_lock_obj")
2632 def __init__(self, **kwargs):
2633 SlotObject.__init__(self, **kwargs)
2638 This raises an AlreadyLocked exception if lock() is called
2639 while a lock is already held. In order to avoid this, call
2640 unlock() or check whether the "locked" attribute is True
2641 or False before calling lock().
2643 if self._lock_obj is not None:
2644 raise self.AlreadyLocked((self._lock_obj,))
2646 dir_path = self.dir_path
2647 if dir_path is None:
2648 root_config = self.pkg.root_config
2649 portdb = root_config.trees["porttree"].dbapi
2650 ebuild_path = portdb.findname(self.pkg.cpv)
2651 settings = self.settings
2652 settings.setcpv(self.pkg)
2653 debug = settings.get("PORTAGE_DEBUG") == "1"
2654 use_cache = 1 # always true
2655 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2656 self.settings, debug, use_cache, portdb)
2657 dir_path = self.settings["PORTAGE_BUILDDIR"]
2659 catdir = os.path.dirname(dir_path)
2660 self._catdir = catdir
2662 portage.util.ensure_dirs(os.path.dirname(catdir),
2663 gid=portage.portage_gid,
2667 catdir_lock = portage.locks.lockdir(catdir)
2668 portage.util.ensure_dirs(catdir,
2669 gid=portage.portage_gid,
2671 self._lock_obj = portage.locks.lockdir(dir_path)
2673 self.locked = self._lock_obj is not None
2674 if catdir_lock is not None:
2675 portage.locks.unlockdir(catdir_lock)
2677 def clean_log(self):
2678 """Discard existing log."""
2679 settings = self.settings
2681 for x in ('.logid', 'temp/build.log'):
2683 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2688 if self._lock_obj is None:
2691 portage.locks.unlockdir(self._lock_obj)
2692 self._lock_obj = None
2695 catdir = self._catdir
2698 catdir_lock = portage.locks.lockdir(catdir)
2704 if e.errno not in (errno.ENOENT,
2705 errno.ENOTEMPTY, errno.EEXIST):
2708 portage.locks.unlockdir(catdir_lock)
2710 class AlreadyLocked(portage.exception.PortageException):
2713 class EbuildBuild(CompositeTask):
2715 __slots__ = ("args_set", "config_pool", "find_blockers",
2716 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2717 "prefetcher", "settings", "world_atom") + \
2718 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2722 logger = self.logger
2725 settings = self.settings
2726 world_atom = self.world_atom
2727 root_config = pkg.root_config
2730 portdb = root_config.trees[tree].dbapi
2731 settings.setcpv(pkg)
2732 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2733 ebuild_path = portdb.findname(self.pkg.cpv)
2734 self._ebuild_path = ebuild_path
2736 prefetcher = self.prefetcher
2737 if prefetcher is None:
2739 elif not prefetcher.isAlive():
2741 elif prefetcher.poll() is None:
2743 waiting_msg = "Fetching files " + \
2744 "in the background. " + \
2745 "To view fetch progress, run `tail -f " + \
2746 "/var/log/emerge-fetch.log` in another " + \
2748 msg_prefix = colorize("GOOD", " * ")
2749 from textwrap import wrap
2750 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2751 for line in wrap(waiting_msg, 65))
2752 if not self.background:
2753 writemsg(waiting_msg, noiselevel=-1)
2755 self._current_task = prefetcher
2756 prefetcher.addExitListener(self._prefetch_exit)
2759 self._prefetch_exit(prefetcher)
2761 def _prefetch_exit(self, prefetcher):
2765 settings = self.settings
2768 fetcher = EbuildFetchonly(
2769 fetch_all=opts.fetch_all_uri,
2770 pkg=pkg, pretend=opts.pretend,
2772 retval = fetcher.execute()
2773 self.returncode = retval
2777 fetcher = EbuildFetcher(config_pool=self.config_pool,
2778 fetchall=opts.fetch_all_uri,
2779 fetchonly=opts.fetchonly,
2780 background=self.background,
2781 pkg=pkg, scheduler=self.scheduler)
2783 self._start_task(fetcher, self._fetch_exit)
2785 def _fetch_exit(self, fetcher):
2789 fetch_failed = False
2791 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2793 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2795 if fetch_failed and fetcher.logfile is not None and \
2796 os.path.exists(fetcher.logfile):
2797 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2799 if not fetch_failed and fetcher.logfile is not None:
2800 # Fetch was successful, so remove the fetch log.
2802 os.unlink(fetcher.logfile)
2806 if fetch_failed or opts.fetchonly:
2810 logger = self.logger
2812 pkg_count = self.pkg_count
2813 scheduler = self.scheduler
2814 settings = self.settings
2815 features = settings.features
2816 ebuild_path = self._ebuild_path
2817 system_set = pkg.root_config.sets["system"]
2819 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2820 self._build_dir.lock()
2822 # Cleaning is triggered before the setup
2823 # phase, in portage.doebuild().
2824 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2825 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2826 short_msg = "emerge: (%s of %s) %s Clean" % \
2827 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2828 logger.log(msg, short_msg=short_msg)
2830 #buildsyspkg: Check if we need to _force_ binary package creation
2831 self._issyspkg = "buildsyspkg" in features and \
2832 system_set.findAtomForPackage(pkg) and \
2835 if opts.buildpkg or self._issyspkg:
2837 self._buildpkg = True
2839 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2840 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2841 short_msg = "emerge: (%s of %s) %s Compile" % \
2842 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2843 logger.log(msg, short_msg=short_msg)
2846 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2847 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2848 short_msg = "emerge: (%s of %s) %s Compile" % \
2849 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2850 logger.log(msg, short_msg=short_msg)
2852 build = EbuildExecuter(background=self.background, pkg=pkg,
2853 scheduler=scheduler, settings=settings)
2854 self._start_task(build, self._build_exit)
2856 def _unlock_builddir(self):
2857 portage.elog.elog_process(self.pkg.cpv, self.settings)
2858 self._build_dir.unlock()
2860 def _build_exit(self, build):
2861 if self._default_exit(build) != os.EX_OK:
2862 self._unlock_builddir()
2867 buildpkg = self._buildpkg
2870 self._final_exit(build)
2875 msg = ">>> This is a system package, " + \
2876 "let's pack a rescue tarball.\n"
2878 log_path = self.settings.get("PORTAGE_LOG_FILE")
2879 if log_path is not None:
2880 log_file = open(log_path, 'a')
2886 if not self.background:
2887 portage.writemsg_stdout(msg, noiselevel=-1)
2889 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2890 scheduler=self.scheduler, settings=self.settings)
2892 self._start_task(packager, self._buildpkg_exit)
2894 def _buildpkg_exit(self, packager):
2896 Released build dir lock when there is a failure or
2897 when in buildpkgonly mode. Otherwise, the lock will
2898 be released when merge() is called.
2901 if self._default_exit(packager) != os.EX_OK:
2902 self._unlock_builddir()
2906 if self.opts.buildpkgonly:
2907 # Need to call "clean" phase for buildpkgonly mode
2908 portage.elog.elog_process(self.pkg.cpv, self.settings)
2910 clean_phase = EbuildPhase(background=self.background,
2911 pkg=self.pkg, phase=phase,
2912 scheduler=self.scheduler, settings=self.settings,
2914 self._start_task(clean_phase, self._clean_exit)
2917 # Continue holding the builddir lock until
2918 # after the package has been installed.
2919 self._current_task = None
2920 self.returncode = packager.returncode
2923 def _clean_exit(self, clean_phase):
2924 if self._final_exit(clean_phase) != os.EX_OK or \
2925 self.opts.buildpkgonly:
2926 self._unlock_builddir()
2931 Install the package and then clean up and release locks.
2932 Only call this after the build has completed successfully
2933 and neither fetchonly nor buildpkgonly mode are enabled.
2936 find_blockers = self.find_blockers
2937 ldpath_mtimes = self.ldpath_mtimes
2938 logger = self.logger
2940 pkg_count = self.pkg_count
2941 settings = self.settings
2942 world_atom = self.world_atom
2943 ebuild_path = self._ebuild_path
2946 merge = EbuildMerge(find_blockers=self.find_blockers,
2947 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2948 pkg_count=pkg_count, pkg_path=ebuild_path,
2949 scheduler=self.scheduler,
2950 settings=settings, tree=tree, world_atom=world_atom)
2952 msg = " === (%s of %s) Merging (%s::%s)" % \
2953 (pkg_count.curval, pkg_count.maxval,
2954 pkg.cpv, ebuild_path)
2955 short_msg = "emerge: (%s of %s) %s Merge" % \
2956 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2957 logger.log(msg, short_msg=short_msg)
2960 rval = merge.execute()
2962 self._unlock_builddir()
2966 class EbuildExecuter(CompositeTask):
2968 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2970 _phases = ("prepare", "configure", "compile", "test", "install")
2972 _live_eclasses = frozenset([
2982 self._tree = "porttree"
2985 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2986 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2987 self._start_task(clean_phase, self._clean_phase_exit)
2989 def _clean_phase_exit(self, clean_phase):
2991 if self._default_exit(clean_phase) != os.EX_OK:
2996 scheduler = self.scheduler
2997 settings = self.settings
3000 # This initializes PORTAGE_LOG_FILE.
3001 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3003 setup_phase = EbuildPhase(background=self.background,
3004 pkg=pkg, phase="setup", scheduler=scheduler,
3005 settings=settings, tree=self._tree)
3007 setup_phase.addExitListener(self._setup_exit)
3008 self._current_task = setup_phase
3009 self.scheduler.scheduleSetup(setup_phase)
3011 def _setup_exit(self, setup_phase):
3013 if self._default_exit(setup_phase) != os.EX_OK:
3017 unpack_phase = EbuildPhase(background=self.background,
3018 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3019 settings=self.settings, tree=self._tree)
3021 if self._live_eclasses.intersection(self.pkg.inherited):
3022 # Serialize $DISTDIR access for live ebuilds since
3023 # otherwise they can interfere with eachother.
3025 unpack_phase.addExitListener(self._unpack_exit)
3026 self._current_task = unpack_phase
3027 self.scheduler.scheduleUnpack(unpack_phase)
3030 self._start_task(unpack_phase, self._unpack_exit)
3032 def _unpack_exit(self, unpack_phase):
3034 if self._default_exit(unpack_phase) != os.EX_OK:
3038 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3041 phases = self._phases
3042 eapi = pkg.metadata["EAPI"]
3043 if eapi in ("0", "1"):
3044 # skip src_prepare and src_configure
3047 for phase in phases:
3048 ebuild_phases.add(EbuildPhase(background=self.background,
3049 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3050 settings=self.settings, tree=self._tree))
3052 self._start_task(ebuild_phases, self._default_final_exit)
3054 class EbuildMetadataPhase(SubProcess):
3057 Asynchronous interface for the ebuild "depend" phase which is
3058 used to extract metadata from the ebuild.
3061 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3062 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3065 _file_names = ("ebuild",)
3066 _files_dict = slot_dict_class(_file_names, prefix="")
3070 settings = self.settings
3071 settings.setcpv(self.cpv)
3072 ebuild_path = self.ebuild_path
3075 if 'parse-eapi-glep-55' in settings.features:
3076 pf, eapi = portage._split_ebuild_name_glep55(
3077 os.path.basename(ebuild_path))
3078 if eapi is None and \
3079 'parse-eapi-ebuild-head' in settings.features:
3080 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3081 mode='r', encoding='utf_8', errors='replace'))
3083 if eapi is not None:
3084 if not portage.eapi_is_supported(eapi):
3085 self.metadata_callback(self.cpv, self.ebuild_path,
3086 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3087 self.returncode = os.EX_OK
3091 settings.configdict['pkg']['EAPI'] = eapi
3093 debug = settings.get("PORTAGE_DEBUG") == "1"
3097 if self.fd_pipes is not None:
3098 fd_pipes = self.fd_pipes.copy()
3102 fd_pipes.setdefault(0, sys.stdin.fileno())
3103 fd_pipes.setdefault(1, sys.stdout.fileno())
3104 fd_pipes.setdefault(2, sys.stderr.fileno())
3106 # flush any pending output
3107 for fd in fd_pipes.itervalues():
3108 if fd == sys.stdout.fileno():
3110 if fd == sys.stderr.fileno():
3113 fd_pipes_orig = fd_pipes.copy()
3114 self._files = self._files_dict()
3117 master_fd, slave_fd = os.pipe()
3118 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3119 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3121 fd_pipes[self._metadata_fd] = slave_fd
3123 self._raw_metadata = []
3124 files.ebuild = os.fdopen(master_fd, 'r')
3125 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3126 self._registered_events, self._output_handler)
3127 self._registered = True
3129 retval = portage.doebuild(ebuild_path, "depend",
3130 settings["ROOT"], settings, debug,
3131 mydbapi=self.portdb, tree="porttree",
3132 fd_pipes=fd_pipes, returnpid=True)
3136 if isinstance(retval, int):
3137 # doebuild failed before spawning
3139 self.returncode = retval
3143 self.pid = retval[0]
3144 portage.process.spawned_pids.remove(self.pid)
3146 def _output_handler(self, fd, event):
3148 if event & PollConstants.POLLIN:
3149 self._raw_metadata.append(self._files.ebuild.read())
3150 if not self._raw_metadata[-1]:
3154 self._unregister_if_appropriate(event)
3155 return self._registered
3157 def _set_returncode(self, wait_retval):
3158 SubProcess._set_returncode(self, wait_retval)
3159 if self.returncode == os.EX_OK:
3160 metadata_lines = "".join(self._raw_metadata).splitlines()
3161 if len(portage.auxdbkeys) != len(metadata_lines):
3162 # Don't trust bash's returncode if the
3163 # number of lines is incorrect.
3166 metadata = izip(portage.auxdbkeys, metadata_lines)
3167 self.metadata = self.metadata_callback(self.cpv,
3168 self.ebuild_path, self.repo_path, metadata,
3171 class EbuildProcess(SpawnProcess):
3173 __slots__ = ("phase", "pkg", "settings", "tree")
3176 # Don't open the log file during the clean phase since the
3177 # open file can result in an nfs lock on $T/build.log which
3178 # prevents the clean phase from removing $T.
3179 if self.phase not in ("clean", "cleanrm"):
3180 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3181 SpawnProcess._start(self)
3183 def _pipe(self, fd_pipes):
3184 stdout_pipe = fd_pipes.get(1)
3185 got_pty, master_fd, slave_fd = \
3186 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3187 return (master_fd, slave_fd)
3189 def _spawn(self, args, **kwargs):
3191 root_config = self.pkg.root_config
3193 mydbapi = root_config.trees[tree].dbapi
3194 settings = self.settings
3195 ebuild_path = settings["EBUILD"]
3196 debug = settings.get("PORTAGE_DEBUG") == "1"
3198 rval = portage.doebuild(ebuild_path, self.phase,
3199 root_config.root, settings, debug,
3200 mydbapi=mydbapi, tree=tree, **kwargs)
3204 def _set_returncode(self, wait_retval):
3205 SpawnProcess._set_returncode(self, wait_retval)
3207 if self.phase not in ("clean", "cleanrm"):
3208 self.returncode = portage._doebuild_exit_status_check_and_log(
3209 self.settings, self.phase, self.returncode)
3211 if self.phase == "test" and self.returncode != os.EX_OK and \
3212 "test-fail-continue" in self.settings.features:
3213 self.returncode = os.EX_OK
3215 portage._post_phase_userpriv_perms(self.settings)
3217 class EbuildPhase(CompositeTask):
3219 __slots__ = ("background", "pkg", "phase",
3220 "scheduler", "settings", "tree")
3222 _post_phase_cmds = portage._post_phase_cmds
3226 ebuild_process = EbuildProcess(background=self.background,
3227 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3228 settings=self.settings, tree=self.tree)
3230 self._start_task(ebuild_process, self._ebuild_exit)
3232 def _ebuild_exit(self, ebuild_process):
3234 if self.phase == "install":
3236 log_path = self.settings.get("PORTAGE_LOG_FILE")
3238 if self.background and log_path is not None:
3239 log_file = open(log_path, 'a')
3242 portage._check_build_log(self.settings, out=out)
3244 if log_file is not None:
3247 if self._default_exit(ebuild_process) != os.EX_OK:
3251 settings = self.settings
3253 if self.phase == "install":
3254 portage._post_src_install_chost_fix(settings)
3255 portage._post_src_install_uid_fix(settings)
3257 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3258 if post_phase_cmds is not None:
3259 post_phase = MiscFunctionsProcess(background=self.background,
3260 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3261 scheduler=self.scheduler, settings=settings)
3262 self._start_task(post_phase, self._post_phase_exit)
3265 self.returncode = ebuild_process.returncode
3266 self._current_task = None
3269 def _post_phase_exit(self, post_phase):
3270 if self._final_exit(post_phase) != os.EX_OK:
3271 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3273 self._current_task = None
3277 class EbuildBinpkg(EbuildProcess):
3279 This assumes that src_install() has successfully completed.
3281 __slots__ = ("_binpkg_tmpfile",)
3284 self.phase = "package"
3285 self.tree = "porttree"
3287 root_config = pkg.root_config
3288 portdb = root_config.trees["porttree"].dbapi
3289 bintree = root_config.trees["bintree"]
3290 ebuild_path = portdb.findname(self.pkg.cpv)
3291 settings = self.settings
3292 debug = settings.get("PORTAGE_DEBUG") == "1"
3294 bintree.prevent_collision(pkg.cpv)
3295 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3296 pkg.cpv + ".tbz2." + str(os.getpid()))
3297 self._binpkg_tmpfile = binpkg_tmpfile
3298 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3299 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3302 EbuildProcess._start(self)
3304 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3306 def _set_returncode(self, wait_retval):
3307 EbuildProcess._set_returncode(self, wait_retval)
3310 bintree = pkg.root_config.trees["bintree"]
3311 binpkg_tmpfile = self._binpkg_tmpfile
3312 if self.returncode == os.EX_OK:
3313 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3315 class EbuildMerge(SlotObject):
3317 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3318 "pkg", "pkg_count", "pkg_path", "pretend",
3319 "scheduler", "settings", "tree", "world_atom")
3322 root_config = self.pkg.root_config
3323 settings = self.settings
3324 retval = portage.merge(settings["CATEGORY"],
3325 settings["PF"], settings["D"],
3326 os.path.join(settings["PORTAGE_BUILDDIR"],
3327 "build-info"), root_config.root, settings,
3328 myebuild=settings["EBUILD"],
3329 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3330 vartree=root_config.trees["vartree"],
3331 prev_mtimes=self.ldpath_mtimes,
3332 scheduler=self.scheduler,
3333 blockers=self.find_blockers)
3335 if retval == os.EX_OK:
3336 self.world_atom(self.pkg)
3341 def _log_success(self):
3343 pkg_count = self.pkg_count
3344 pkg_path = self.pkg_path
3345 logger = self.logger
3346 if "noclean" not in self.settings.features:
3347 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3348 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3349 logger.log((" === (%s of %s) " + \
3350 "Post-Build Cleaning (%s::%s)") % \
3351 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3352 short_msg=short_msg)
3353 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3354 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3356 class PackageUninstall(AsynchronousTask):
3358 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3362 unmerge(self.pkg.root_config, self.opts, "unmerge",
3363 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3364 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3365 writemsg_level=self._writemsg_level)
3366 except UninstallFailure, e:
3367 self.returncode = e.status
3369 self.returncode = os.EX_OK
3372 def _writemsg_level(self, msg, level=0, noiselevel=0):
3374 log_path = self.settings.get("PORTAGE_LOG_FILE")
3375 background = self.background
3377 if log_path is None:
3378 if not (background and level < logging.WARNING):
3379 portage.util.writemsg_level(msg,
3380 level=level, noiselevel=noiselevel)
3383 portage.util.writemsg_level(msg,
3384 level=level, noiselevel=noiselevel)
3386 f = open(log_path, 'a')
3392 class Binpkg(CompositeTask):
3394 __slots__ = ("find_blockers",
3395 "ldpath_mtimes", "logger", "opts",
3396 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3397 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3398 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3400 def _writemsg_level(self, msg, level=0, noiselevel=0):
3402 if not self.background:
3403 portage.util.writemsg_level(msg,
3404 level=level, noiselevel=noiselevel)
3406 log_path = self.settings.get("PORTAGE_LOG_FILE")
3407 if log_path is not None:
3408 f = open(log_path, 'a')
3417 settings = self.settings
3418 settings.setcpv(pkg)
3419 self._tree = "bintree"
3420 self._bintree = self.pkg.root_config.trees[self._tree]
3421 self._verify = not self.opts.pretend
3423 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3424 "portage", pkg.category, pkg.pf)
3425 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3426 pkg=pkg, settings=settings)
3427 self._image_dir = os.path.join(dir_path, "image")
3428 self._infloc = os.path.join(dir_path, "build-info")
3429 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3430 settings["EBUILD"] = self._ebuild_path
3431 debug = settings.get("PORTAGE_DEBUG") == "1"
3432 portage.doebuild_environment(self._ebuild_path, "setup",
3433 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3434 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3436 # The prefetcher has already completed or it
3437 # could be running now. If it's running now,
3438 # wait for it to complete since it holds
3439 # a lock on the file being fetched. The
3440 # portage.locks functions are only designed
3441 # to work between separate processes. Since
3442 # the lock is held by the current process,
3443 # use the scheduler and fetcher methods to
3444 # synchronize with the fetcher.
3445 prefetcher = self.prefetcher
3446 if prefetcher is None:
3448 elif not prefetcher.isAlive():
3450 elif prefetcher.poll() is None:
3452 waiting_msg = ("Fetching '%s' " + \
3453 "in the background. " + \
3454 "To view fetch progress, run `tail -f " + \
3455 "/var/log/emerge-fetch.log` in another " + \
3456 "terminal.") % prefetcher.pkg_path
3457 msg_prefix = colorize("GOOD", " * ")
3458 from textwrap import wrap
3459 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3460 for line in wrap(waiting_msg, 65))
3461 if not self.background:
3462 writemsg(waiting_msg, noiselevel=-1)
3464 self._current_task = prefetcher
3465 prefetcher.addExitListener(self._prefetch_exit)
3468 self._prefetch_exit(prefetcher)
3470 def _prefetch_exit(self, prefetcher):
3473 pkg_count = self.pkg_count
3474 if not (self.opts.pretend or self.opts.fetchonly):
3475 self._build_dir.lock()
3476 # If necessary, discard old log so that we don't
3478 self._build_dir.clean_log()
3479 # Initialze PORTAGE_LOG_FILE.
3480 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3481 fetcher = BinpkgFetcher(background=self.background,
3482 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3483 pretend=self.opts.pretend, scheduler=self.scheduler)
3484 pkg_path = fetcher.pkg_path
3485 self._pkg_path = pkg_path
3487 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3489 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3490 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3491 short_msg = "emerge: (%s of %s) %s Fetch" % \
3492 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3493 self.logger.log(msg, short_msg=short_msg)
3494 self._start_task(fetcher, self._fetcher_exit)
3497 self._fetcher_exit(fetcher)
3499 def _fetcher_exit(self, fetcher):
3501 # The fetcher only has a returncode when
3502 # --getbinpkg is enabled.
3503 if fetcher.returncode is not None:
3504 self._fetched_pkg = True
3505 if self._default_exit(fetcher) != os.EX_OK:
3506 self._unlock_builddir()
3510 if self.opts.pretend:
3511 self._current_task = None
3512 self.returncode = os.EX_OK
3520 logfile = self.settings.get("PORTAGE_LOG_FILE")
3521 verifier = BinpkgVerifier(background=self.background,
3522 logfile=logfile, pkg=self.pkg)
3523 self._start_task(verifier, self._verifier_exit)
3526 self._verifier_exit(verifier)
3528 def _verifier_exit(self, verifier):
3529 if verifier is not None and \
3530 self._default_exit(verifier) != os.EX_OK:
3531 self._unlock_builddir()
3535 logger = self.logger
3537 pkg_count = self.pkg_count
3538 pkg_path = self._pkg_path
3540 if self._fetched_pkg:
3541 self._bintree.inject(pkg.cpv, filename=pkg_path)
3543 if self.opts.fetchonly:
3544 self._current_task = None
3545 self.returncode = os.EX_OK
3549 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3550 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3551 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3552 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3553 logger.log(msg, short_msg=short_msg)
3556 settings = self.settings
3557 ebuild_phase = EbuildPhase(background=self.background,
3558 pkg=pkg, phase=phase, scheduler=self.scheduler,
3559 settings=settings, tree=self._tree)
3561 self._start_task(ebuild_phase, self._clean_exit)
3563 def _clean_exit(self, clean_phase):
3564 if self._default_exit(clean_phase) != os.EX_OK:
3565 self._unlock_builddir()
3569 dir_path = self._build_dir.dir_path
3571 infloc = self._infloc
3573 pkg_path = self._pkg_path
3576 for mydir in (dir_path, self._image_dir, infloc):
3577 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3578 gid=portage.data.portage_gid, mode=dir_mode)
3580 # This initializes PORTAGE_LOG_FILE.
3581 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3582 self._writemsg_level(">>> Extracting info\n")
3584 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3585 check_missing_metadata = ("CATEGORY", "PF")
3586 missing_metadata = set()
3587 for k in check_missing_metadata:
3588 v = pkg_xpak.getfile(k)
3590 missing_metadata.add(k)
3592 pkg_xpak.unpackinfo(infloc)
3593 for k in missing_metadata:
3601 f = open(os.path.join(infloc, k), 'wb')
3607 # Store the md5sum in the vdb.
3608 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3610 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3614 # This gives bashrc users an opportunity to do various things
3615 # such as remove binary packages after they're installed.
3616 settings = self.settings
3617 settings.setcpv(self.pkg)
3618 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3619 settings.backup_changes("PORTAGE_BINPKG_FILE")
3622 setup_phase = EbuildPhase(background=self.background,
3623 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3624 settings=settings, tree=self._tree)
3626 setup_phase.addExitListener(self._setup_exit)
3627 self._current_task = setup_phase
3628 self.scheduler.scheduleSetup(setup_phase)
3630 def _setup_exit(self, setup_phase):
3631 if self._default_exit(setup_phase) != os.EX_OK:
3632 self._unlock_builddir()
3636 extractor = BinpkgExtractorAsync(background=self.background,
3637 image_dir=self._image_dir,
3638 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3639 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3640 self._start_task(extractor, self._extractor_exit)
3642 def _extractor_exit(self, extractor):
3643 if self._final_exit(extractor) != os.EX_OK:
3644 self._unlock_builddir()
3645 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3649 def _unlock_builddir(self):
3650 if self.opts.pretend or self.opts.fetchonly:
3652 portage.elog.elog_process(self.pkg.cpv, self.settings)
3653 self._build_dir.unlock()
3657 # This gives bashrc users an opportunity to do various things
3658 # such as remove binary packages after they're installed.
3659 settings = self.settings
3660 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3661 settings.backup_changes("PORTAGE_BINPKG_FILE")
3663 merge = EbuildMerge(find_blockers=self.find_blockers,
3664 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3665 pkg=self.pkg, pkg_count=self.pkg_count,
3666 pkg_path=self._pkg_path, scheduler=self.scheduler,
3667 settings=settings, tree=self._tree, world_atom=self.world_atom)
3670 retval = merge.execute()
3672 settings.pop("PORTAGE_BINPKG_FILE", None)
3673 self._unlock_builddir()
3676 class BinpkgFetcher(SpawnProcess):
3678 __slots__ = ("pkg", "pretend",
3679 "locked", "pkg_path", "_lock_obj")
3681 def __init__(self, **kwargs):
3682 SpawnProcess.__init__(self, **kwargs)
3684 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3692 pretend = self.pretend
3693 bintree = pkg.root_config.trees["bintree"]
3694 settings = bintree.settings
3695 use_locks = "distlocks" in settings.features
3696 pkg_path = self.pkg_path
3699 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3702 exists = os.path.exists(pkg_path)
3703 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3704 if not (pretend or resume):
3705 # Remove existing file or broken symlink.
3711 # urljoin doesn't work correctly with
3712 # unrecognized protocols like sftp
3713 if bintree._remote_has_index:
3714 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3716 rel_uri = pkg.cpv + ".tbz2"
3717 uri = bintree._remote_base_uri.rstrip("/") + \
3718 "/" + rel_uri.lstrip("/")
3720 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3721 "/" + pkg.pf + ".tbz2"
3724 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3725 self.returncode = os.EX_OK
3729 protocol = urlparse.urlparse(uri)[0]
3730 fcmd_prefix = "FETCHCOMMAND"
3732 fcmd_prefix = "RESUMECOMMAND"
3733 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3735 fcmd = settings.get(fcmd_prefix)
3738 "DISTDIR" : os.path.dirname(pkg_path),
3740 "FILE" : os.path.basename(pkg_path)
3743 fetch_env = dict(settings.iteritems())
3744 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3745 for x in shlex.split(fcmd)]
3747 if self.fd_pipes is None:
3749 fd_pipes = self.fd_pipes
3751 # Redirect all output to stdout since some fetchers like
3752 # wget pollute stderr (if portage detects a problem then it
3753 # can send it's own message to stderr).
3754 fd_pipes.setdefault(0, sys.stdin.fileno())
3755 fd_pipes.setdefault(1, sys.stdout.fileno())
3756 fd_pipes.setdefault(2, sys.stdout.fileno())
3758 self.args = fetch_args
3759 self.env = fetch_env
3760 SpawnProcess._start(self)
3762 def _set_returncode(self, wait_retval):
3763 SpawnProcess._set_returncode(self, wait_retval)
3764 if self.returncode == os.EX_OK:
3765 # If possible, update the mtime to match the remote package if
3766 # the fetcher didn't already do it automatically.
3767 bintree = self.pkg.root_config.trees["bintree"]
3768 if bintree._remote_has_index:
3769 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3770 if remote_mtime is not None:
3772 remote_mtime = long(remote_mtime)
3777 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3781 if remote_mtime != local_mtime:
3783 os.utime(self.pkg_path,
3784 (remote_mtime, remote_mtime))
3793 This raises an AlreadyLocked exception if lock() is called
3794 while a lock is already held. In order to avoid this, call
3795 unlock() or check whether the "locked" attribute is True
3796 or False before calling lock().
3798 if self._lock_obj is not None:
3799 raise self.AlreadyLocked((self._lock_obj,))
3801 self._lock_obj = portage.locks.lockfile(
3802 self.pkg_path, wantnewlockfile=1)
3805 class AlreadyLocked(portage.exception.PortageException):
3809 if self._lock_obj is None:
3811 portage.locks.unlockfile(self._lock_obj)
3812 self._lock_obj = None
3815 class BinpkgVerifier(AsynchronousTask):
3816 __slots__ = ("logfile", "pkg",)
3820 Note: Unlike a normal AsynchronousTask.start() method,
3821 this one does all work is synchronously. The returncode
3822 attribute will be set before it returns.
3826 root_config = pkg.root_config
3827 bintree = root_config.trees["bintree"]
3829 stdout_orig = sys.stdout
3830 stderr_orig = sys.stderr
3832 if self.background and self.logfile is not None:
3833 log_file = open(self.logfile, 'a')
3835 if log_file is not None:
3836 sys.stdout = log_file
3837 sys.stderr = log_file
3839 bintree.digestCheck(pkg)
3840 except portage.exception.FileNotFound:
3841 writemsg("!!! Fetching Binary failed " + \
3842 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3844 except portage.exception.DigestException, e:
3845 writemsg("\n!!! Digest verification failed:\n",
3847 writemsg("!!! %s\n" % e.value[0],
3849 writemsg("!!! Reason: %s\n" % e.value[1],
3851 writemsg("!!! Got: %s\n" % e.value[2],
3853 writemsg("!!! Expected: %s\n" % e.value[3],
3856 if rval != os.EX_OK:
3857 pkg_path = bintree.getname(pkg.cpv)
3858 head, tail = os.path.split(pkg_path)
3859 temp_filename = portage._checksum_failure_temp_file(head, tail)
3860 writemsg("File renamed to '%s'\n" % (temp_filename,),
3863 sys.stdout = stdout_orig
3864 sys.stderr = stderr_orig
3865 if log_file is not None:
3868 self.returncode = rval
3871 class BinpkgPrefetcher(CompositeTask):
3873 __slots__ = ("pkg",) + \
3874 ("pkg_path", "_bintree",)
3877 self._bintree = self.pkg.root_config.trees["bintree"]
3878 fetcher = BinpkgFetcher(background=self.background,
3879 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3880 scheduler=self.scheduler)
3881 self.pkg_path = fetcher.pkg_path
3882 self._start_task(fetcher, self._fetcher_exit)
3884 def _fetcher_exit(self, fetcher):
3886 if self._default_exit(fetcher) != os.EX_OK:
3890 verifier = BinpkgVerifier(background=self.background,
3891 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3892 self._start_task(verifier, self._verifier_exit)
3894 def _verifier_exit(self, verifier):
3895 if self._default_exit(verifier) != os.EX_OK:
3899 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3901 self._current_task = None
3902 self.returncode = os.EX_OK
3905 class BinpkgExtractorAsync(SpawnProcess):
3907 __slots__ = ("image_dir", "pkg", "pkg_path")
3909 _shell_binary = portage.const.BASH_BINARY
3912 self.args = [self._shell_binary, "-c",
3913 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3914 (portage._shell_quote(self.pkg_path),
3915 portage._shell_quote(self.image_dir))]
3917 self.env = self.pkg.root_config.settings.environ()
3918 SpawnProcess._start(self)
3920 class MergeListItem(CompositeTask):
3923 TODO: For parallel scheduling, everything here needs asynchronous
3924 execution support (start, poll, and wait methods).
3927 __slots__ = ("args_set",
3928 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3929 "find_blockers", "logger", "mtimedb", "pkg",
3930 "pkg_count", "pkg_to_replace", "prefetcher",
3931 "settings", "statusMessage", "world_atom") + \
3937 build_opts = self.build_opts
3940 # uninstall, executed by self.merge()
3941 self.returncode = os.EX_OK
3945 args_set = self.args_set
3946 find_blockers = self.find_blockers
3947 logger = self.logger
3948 mtimedb = self.mtimedb
3949 pkg_count = self.pkg_count
3950 scheduler = self.scheduler
3951 settings = self.settings
3952 world_atom = self.world_atom
3953 ldpath_mtimes = mtimedb["ldpath"]
3955 action_desc = "Emerging"
3957 if pkg.type_name == "binary":
3958 action_desc += " binary"
3960 if build_opts.fetchonly:
3961 action_desc = "Fetching"
3963 msg = "%s (%s of %s) %s" % \
3965 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3966 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3967 colorize("GOOD", pkg.cpv))
3969 portdb = pkg.root_config.trees["porttree"].dbapi
3970 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3971 if portdir_repo_name:
3972 pkg_repo_name = pkg.metadata.get("repository")
3973 if pkg_repo_name != portdir_repo_name:
3974 if not pkg_repo_name:
3975 pkg_repo_name = "unknown repo"
3976 msg += " from %s" % pkg_repo_name
3979 msg += " %s %s" % (preposition, pkg.root)
3981 if not build_opts.pretend:
3982 self.statusMessage(msg)
3983 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3984 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3986 if pkg.type_name == "ebuild":
3988 build = EbuildBuild(args_set=args_set,
3989 background=self.background,
3990 config_pool=self.config_pool,
3991 find_blockers=find_blockers,
3992 ldpath_mtimes=ldpath_mtimes, logger=logger,
3993 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3994 prefetcher=self.prefetcher, scheduler=scheduler,
3995 settings=settings, world_atom=world_atom)
3997 self._install_task = build
3998 self._start_task(build, self._default_final_exit)
4001 elif pkg.type_name == "binary":
4003 binpkg = Binpkg(background=self.background,
4004 find_blockers=find_blockers,
4005 ldpath_mtimes=ldpath_mtimes, logger=logger,
4006 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4007 prefetcher=self.prefetcher, settings=settings,
4008 scheduler=scheduler, world_atom=world_atom)
4010 self._install_task = binpkg
4011 self._start_task(binpkg, self._default_final_exit)
4015 self._install_task.poll()
4016 return self.returncode
4019 self._install_task.wait()
4020 return self.returncode
4025 build_opts = self.build_opts
4026 find_blockers = self.find_blockers
4027 logger = self.logger
4028 mtimedb = self.mtimedb
4029 pkg_count = self.pkg_count
4030 prefetcher = self.prefetcher
4031 scheduler = self.scheduler
4032 settings = self.settings
4033 world_atom = self.world_atom
4034 ldpath_mtimes = mtimedb["ldpath"]
4037 if not (build_opts.buildpkgonly or \
4038 build_opts.fetchonly or build_opts.pretend):
4040 uninstall = PackageUninstall(background=self.background,
4041 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4042 pkg=pkg, scheduler=scheduler, settings=settings)
4045 retval = uninstall.wait()
4046 if retval != os.EX_OK:
4050 if build_opts.fetchonly or \
4051 build_opts.buildpkgonly:
4052 return self.returncode
4054 retval = self._install_task.install()
4057 class PackageMerge(AsynchronousTask):
4059 TODO: Implement asynchronous merge so that the scheduler can
4060 run while a merge is executing.
4063 __slots__ = ("merge",)
4067 pkg = self.merge.pkg
4068 pkg_count = self.merge.pkg_count
4071 action_desc = "Uninstalling"
4072 preposition = "from"
4075 action_desc = "Installing"
4077 counter_str = "(%s of %s) " % \
4078 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4079 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4084 colorize("GOOD", pkg.cpv))
4087 msg += " %s %s" % (preposition, pkg.root)
4089 if not self.merge.build_opts.fetchonly and \
4090 not self.merge.build_opts.pretend and \
4091 not self.merge.build_opts.buildpkgonly:
4092 self.merge.statusMessage(msg)
4094 self.returncode = self.merge.merge()
4097 class DependencyArg(object):
4098 def __init__(self, arg=None, root_config=None):
4100 self.root_config = root_config
4103 return str(self.arg)
4105 class AtomArg(DependencyArg):
4106 def __init__(self, atom=None, **kwargs):
4107 DependencyArg.__init__(self, **kwargs)
4109 if not isinstance(self.atom, portage.dep.Atom):
4110 self.atom = portage.dep.Atom(self.atom)
4111 self.set = (self.atom, )
4113 class PackageArg(DependencyArg):
4114 def __init__(self, package=None, **kwargs):
4115 DependencyArg.__init__(self, **kwargs)
4116 self.package = package
4117 self.atom = portage.dep.Atom("=" + package.cpv)
4118 self.set = (self.atom, )
4120 class SetArg(DependencyArg):
4121 def __init__(self, set=None, **kwargs):
4122 DependencyArg.__init__(self, **kwargs)
4124 self.name = self.arg[len(SETPREFIX):]
4126 class Dependency(SlotObject):
4127 __slots__ = ("atom", "blocker", "depth",
4128 "parent", "onlydeps", "priority", "root")
4129 def __init__(self, **kwargs):
4130 SlotObject.__init__(self, **kwargs)
4131 if self.priority is None:
4132 self.priority = DepPriority()
4133 if self.depth is None:
4136 class BlockerCache(portage.cache.mappings.MutableMapping):
4137 """This caches blockers of installed packages so that dep_check does not
4138 have to be done for every single installed package on every invocation of
4139 emerge. The cache is invalidated whenever it is detected that something
4140 has changed that might alter the results of dep_check() calls:
4141 1) the set of installed packages (including COUNTER) has changed
4142 2) the old-style virtuals have changed
4145 # Number of uncached packages to trigger cache update, since
4146 # it's wasteful to update it for every vdb change.
4147 _cache_threshold = 5
4149 class BlockerData(object):
4151 __slots__ = ("__weakref__", "atoms", "counter")
4153 def __init__(self, counter, atoms):
4154 self.counter = counter
4157 def __init__(self, myroot, vardb):
4159 self._virtuals = vardb.settings.getvirtuals()
4160 self._cache_filename = os.path.join(myroot,
4161 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4162 self._cache_version = "1"
4163 self._cache_data = None
4164 self._modified = set()
4169 f = open(self._cache_filename, mode='rb')
4170 mypickle = pickle.Unpickler(f)
4172 mypickle.find_global = None
4173 except AttributeError:
4174 # TODO: If py3k, override Unpickler.find_class().
4176 self._cache_data = mypickle.load()
4179 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4180 if isinstance(e, pickle.UnpicklingError):
4181 writemsg("!!! Error loading '%s': %s\n" % \
4182 (self._cache_filename, str(e)), noiselevel=-1)
4185 cache_valid = self._cache_data and \
4186 isinstance(self._cache_data, dict) and \
4187 self._cache_data.get("version") == self._cache_version and \
4188 isinstance(self._cache_data.get("blockers"), dict)
4190 # Validate all the atoms and counters so that
4191 # corruption is detected as soon as possible.
4192 invalid_items = set()
4193 for k, v in self._cache_data["blockers"].iteritems():
4194 if not isinstance(k, basestring):
4195 invalid_items.add(k)
4198 if portage.catpkgsplit(k) is None:
4199 invalid_items.add(k)
4201 except portage.exception.InvalidData:
4202 invalid_items.add(k)
4204 if not isinstance(v, tuple) or \
4206 invalid_items.add(k)
4209 if not isinstance(counter, (int, long)):
4210 invalid_items.add(k)
4212 if not isinstance(atoms, (list, tuple)):
4213 invalid_items.add(k)
4215 invalid_atom = False
4217 if not isinstance(atom, basestring):
4220 if atom[:1] != "!" or \
4221 not portage.isvalidatom(
4222 atom, allow_blockers=True):
4226 invalid_items.add(k)
4229 for k in invalid_items:
4230 del self._cache_data["blockers"][k]
4231 if not self._cache_data["blockers"]:
4235 self._cache_data = {"version":self._cache_version}
4236 self._cache_data["blockers"] = {}
4237 self._cache_data["virtuals"] = self._virtuals
4238 self._modified.clear()
4241 """If the current user has permission and the internal blocker cache
4242 been updated, save it to disk and mark it unmodified. This is called
4243 by emerge after it has proccessed blockers for all installed packages.
4244 Currently, the cache is only written if the user has superuser
4245 privileges (since that's required to obtain a lock), but all users
4246 have read access and benefit from faster blocker lookups (as long as
4247 the entire cache is still valid). The cache is stored as a pickled
4248 dict object with the following format:
4252 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4253 "virtuals" : vardb.settings.getvirtuals()
4256 if len(self._modified) >= self._cache_threshold and \
4259 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4260 pickle.dump(self._cache_data, f, protocol=2)
4262 portage.util.apply_secpass_permissions(
4263 self._cache_filename, gid=portage.portage_gid, mode=0644)
4264 except (IOError, OSError), e:
4266 self._modified.clear()
4268 def __setitem__(self, cpv, blocker_data):
4270 Update the cache and mark it as modified for a future call to
4273 @param cpv: Package for which to cache blockers.
4275 @param blocker_data: An object with counter and atoms attributes.
4276 @type blocker_data: BlockerData
4278 self._cache_data["blockers"][cpv] = \
4279 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4280 self._modified.add(cpv)
4283 if self._cache_data is None:
4284 # triggered by python-trace
4286 return iter(self._cache_data["blockers"])
4288 def __delitem__(self, cpv):
4289 del self._cache_data["blockers"][cpv]
4291 def __getitem__(self, cpv):
4294 @returns: An object with counter and atoms attributes.
4296 return self.BlockerData(*self._cache_data["blockers"][cpv])
4298 class BlockerDB(object):
4300 def __init__(self, root_config):
4301 self._root_config = root_config
4302 self._vartree = root_config.trees["vartree"]
4303 self._portdb = root_config.trees["porttree"].dbapi
4305 self._dep_check_trees = None
4306 self._fake_vartree = None
4308 def _get_fake_vartree(self, acquire_lock=0):
4309 fake_vartree = self._fake_vartree
4310 if fake_vartree is None:
4311 fake_vartree = FakeVartree(self._root_config,
4312 acquire_lock=acquire_lock)
4313 self._fake_vartree = fake_vartree
4314 self._dep_check_trees = { self._vartree.root : {
4315 "porttree" : fake_vartree,
4316 "vartree" : fake_vartree,
4319 fake_vartree.sync(acquire_lock=acquire_lock)
4322 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4323 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4324 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4325 settings = self._vartree.settings
4326 stale_cache = set(blocker_cache)
4327 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4328 dep_check_trees = self._dep_check_trees
4329 vardb = fake_vartree.dbapi
4330 installed_pkgs = list(vardb)
4332 for inst_pkg in installed_pkgs:
4333 stale_cache.discard(inst_pkg.cpv)
4334 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4335 if cached_blockers is not None and \
4336 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4337 cached_blockers = None
4338 if cached_blockers is not None:
4339 blocker_atoms = cached_blockers.atoms
4341 # Use aux_get() to trigger FakeVartree global
4342 # updates on *DEPEND when appropriate.
4343 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4345 portage.dep._dep_check_strict = False
4346 success, atoms = portage.dep_check(depstr,
4347 vardb, settings, myuse=inst_pkg.use.enabled,
4348 trees=dep_check_trees, myroot=inst_pkg.root)
4350 portage.dep._dep_check_strict = True
4352 pkg_location = os.path.join(inst_pkg.root,
4353 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4354 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4355 (pkg_location, atoms), noiselevel=-1)
4358 blocker_atoms = [atom for atom in atoms \
4359 if atom.startswith("!")]
4360 blocker_atoms.sort()
4361 counter = long(inst_pkg.metadata["COUNTER"])
4362 blocker_cache[inst_pkg.cpv] = \
4363 blocker_cache.BlockerData(counter, blocker_atoms)
4364 for cpv in stale_cache:
4365 del blocker_cache[cpv]
4366 blocker_cache.flush()
4368 blocker_parents = digraph()
4370 for pkg in installed_pkgs:
4371 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4372 blocker_atom = blocker_atom.lstrip("!")
4373 blocker_atoms.append(blocker_atom)
4374 blocker_parents.add(blocker_atom, pkg)
4376 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4377 blocking_pkgs = set()
4378 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4379 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4381 # Check for blockers in the other direction.
4382 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4384 portage.dep._dep_check_strict = False
4385 success, atoms = portage.dep_check(depstr,
4386 vardb, settings, myuse=new_pkg.use.enabled,
4387 trees=dep_check_trees, myroot=new_pkg.root)
4389 portage.dep._dep_check_strict = True
4391 # We should never get this far with invalid deps.
4392 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4395 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4398 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4399 for inst_pkg in installed_pkgs:
4401 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4402 except (portage.exception.InvalidDependString, StopIteration):
4404 blocking_pkgs.add(inst_pkg)
4406 return blocking_pkgs
4408 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4410 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4411 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4412 p_type, p_root, p_key, p_status = parent_node
4414 if p_status == "nomerge":
4415 category, pf = portage.catsplit(p_key)
4416 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4417 msg.append("Portage is unable to process the dependencies of the ")
4418 msg.append("'%s' package. " % p_key)
4419 msg.append("In order to correct this problem, the package ")
4420 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4421 msg.append("As a temporary workaround, the --nodeps option can ")
4422 msg.append("be used to ignore all dependencies. For reference, ")
4423 msg.append("the problematic dependencies can be found in the ")
4424 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4426 msg.append("This package can not be installed. ")
4427 msg.append("Please notify the '%s' package maintainer " % p_key)
4428 msg.append("about this problem.")
4430 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4431 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4433 class PackageVirtualDbapi(portage.dbapi):
4435 A dbapi-like interface class that represents the state of the installed
4436 package database as new packages are installed, replacing any packages
4437 that previously existed in the same slot. The main difference between
4438 this class and fakedbapi is that this one uses Package instances
4439 internally (passed in via cpv_inject() and cpv_remove() calls).
4441 def __init__(self, settings):
4442 portage.dbapi.__init__(self)
4443 self.settings = settings
4444 self._match_cache = {}
4450 Remove all packages.
4454 self._cp_map.clear()
4455 self._cpv_map.clear()
4458 obj = PackageVirtualDbapi(self.settings)
4459 obj._match_cache = self._match_cache.copy()
4460 obj._cp_map = self._cp_map.copy()
4461 for k, v in obj._cp_map.iteritems():
4462 obj._cp_map[k] = v[:]
4463 obj._cpv_map = self._cpv_map.copy()
4467 return self._cpv_map.itervalues()
4469 def __contains__(self, item):
4470 existing = self._cpv_map.get(item.cpv)
4471 if existing is not None and \
4476 def get(self, item, default=None):
4477 cpv = getattr(item, "cpv", None)
4481 type_name, root, cpv, operation = item
4483 existing = self._cpv_map.get(cpv)
4484 if existing is not None and \
4489 def match_pkgs(self, atom):
4490 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4492 def _clear_cache(self):
4493 if self._categories is not None:
4494 self._categories = None
4495 if self._match_cache:
4496 self._match_cache = {}
4498 def match(self, origdep, use_cache=1):
4499 result = self._match_cache.get(origdep)
4500 if result is not None:
4502 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4503 self._match_cache[origdep] = result
4506 def cpv_exists(self, cpv):
4507 return cpv in self._cpv_map
4509 def cp_list(self, mycp, use_cache=1):
4510 cachelist = self._match_cache.get(mycp)
4511 # cp_list() doesn't expand old-style virtuals
4512 if cachelist and cachelist[0].startswith(mycp):
4514 cpv_list = self._cp_map.get(mycp)
4515 if cpv_list is None:
4518 cpv_list = [pkg.cpv for pkg in cpv_list]
4519 self._cpv_sort_ascending(cpv_list)
4520 if not (not cpv_list and mycp.startswith("virtual/")):
4521 self._match_cache[mycp] = cpv_list
4525 return list(self._cp_map)
4528 return list(self._cpv_map)
4530 def cpv_inject(self, pkg):
4531 cp_list = self._cp_map.get(pkg.cp)
4534 self._cp_map[pkg.cp] = cp_list
4535 e_pkg = self._cpv_map.get(pkg.cpv)
4536 if e_pkg is not None:
4539 self.cpv_remove(e_pkg)
4540 for e_pkg in cp_list:
4541 if e_pkg.slot_atom == pkg.slot_atom:
4544 self.cpv_remove(e_pkg)
4547 self._cpv_map[pkg.cpv] = pkg
4550 def cpv_remove(self, pkg):
4551 old_pkg = self._cpv_map.get(pkg.cpv)
4554 self._cp_map[pkg.cp].remove(pkg)
4555 del self._cpv_map[pkg.cpv]
4558 def aux_get(self, cpv, wants):
4559 metadata = self._cpv_map[cpv].metadata
4560 return [metadata.get(x, "") for x in wants]
4562 def aux_update(self, cpv, values):
4563 self._cpv_map[cpv].metadata.update(values)
4566 class depgraph(object):
4568 pkg_tree_map = RootConfig.pkg_tree_map
4570 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4572 def __init__(self, settings, trees, myopts, myparams, spinner):
4573 self.settings = settings
4574 self.target_root = settings["ROOT"]
4575 self.myopts = myopts
4576 self.myparams = myparams
4578 if settings.get("PORTAGE_DEBUG", "") == "1":
4580 self.spinner = spinner
4581 self._running_root = trees["/"]["root_config"]
4582 self._opts_no_restart = Scheduler._opts_no_restart
4583 self.pkgsettings = {}
4584 # Maps slot atom to package for each Package added to the graph.
4585 self._slot_pkg_map = {}
4586 # Maps nodes to the reasons they were selected for reinstallation.
4587 self._reinstall_nodes = {}
4590 self._trees_orig = trees
4592 # Contains a filtered view of preferred packages that are selected
4593 # from available repositories.
4594 self._filtered_trees = {}
4595 # Contains installed packages and new packages that have been added
4597 self._graph_trees = {}
4598 # All Package instances
4599 self._pkg_cache = {}
4600 for myroot in trees:
4601 self.trees[myroot] = {}
4602 # Create a RootConfig instance that references
4603 # the FakeVartree instead of the real one.
4604 self.roots[myroot] = RootConfig(
4605 trees[myroot]["vartree"].settings,
4607 trees[myroot]["root_config"].setconfig)
4608 for tree in ("porttree", "bintree"):
4609 self.trees[myroot][tree] = trees[myroot][tree]
4610 self.trees[myroot]["vartree"] = \
4611 FakeVartree(trees[myroot]["root_config"],
4612 pkg_cache=self._pkg_cache)
4613 self.pkgsettings[myroot] = portage.config(
4614 clone=self.trees[myroot]["vartree"].settings)
4615 self._slot_pkg_map[myroot] = {}
4616 vardb = self.trees[myroot]["vartree"].dbapi
4617 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4618 "--buildpkgonly" not in self.myopts
4619 # This fakedbapi instance will model the state that the vdb will
4620 # have after new packages have been installed.
4621 fakedb = PackageVirtualDbapi(vardb.settings)
4622 if preload_installed_pkgs:
4624 self.spinner.update()
4625 # This triggers metadata updates via FakeVartree.
4626 vardb.aux_get(pkg.cpv, [])
4627 fakedb.cpv_inject(pkg)
4629 # Now that the vardb state is cached in our FakeVartree,
4630 # we won't be needing the real vartree cache for awhile.
4631 # To make some room on the heap, clear the vardbapi
4633 trees[myroot]["vartree"].dbapi._clear_cache()
4636 self.mydbapi[myroot] = fakedb
4639 graph_tree.dbapi = fakedb
4640 self._graph_trees[myroot] = {}
4641 self._filtered_trees[myroot] = {}
4642 # Substitute the graph tree for the vartree in dep_check() since we
4643 # want atom selections to be consistent with package selections
4644 # have already been made.
4645 self._graph_trees[myroot]["porttree"] = graph_tree
4646 self._graph_trees[myroot]["vartree"] = graph_tree
4647 def filtered_tree():
4649 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4650 self._filtered_trees[myroot]["porttree"] = filtered_tree
4652 # Passing in graph_tree as the vartree here could lead to better
4653 # atom selections in some cases by causing atoms for packages that
4654 # have been added to the graph to be preferred over other choices.
4655 # However, it can trigger atom selections that result in
4656 # unresolvable direct circular dependencies. For example, this
4657 # happens with gwydion-dylan which depends on either itself or
4658 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4659 # gwydion-dylan-bin needs to be selected in order to avoid a
4660 # an unresolvable direct circular dependency.
4662 # To solve the problem described above, pass in "graph_db" so that
4663 # packages that have been added to the graph are distinguishable
4664 # from other available packages and installed packages. Also, pass
4665 # the parent package into self._select_atoms() calls so that
4666 # unresolvable direct circular dependencies can be detected and
4667 # avoided when possible.
4668 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4669 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4672 portdb = self.trees[myroot]["porttree"].dbapi
4673 bindb = self.trees[myroot]["bintree"].dbapi
4674 vardb = self.trees[myroot]["vartree"].dbapi
4675 # (db, pkg_type, built, installed, db_keys)
4676 if "--usepkgonly" not in self.myopts:
4677 db_keys = list(portdb._aux_cache_keys)
4678 dbs.append((portdb, "ebuild", False, False, db_keys))
4679 if "--usepkg" in self.myopts:
4680 db_keys = list(bindb._aux_cache_keys)
4681 dbs.append((bindb, "binary", True, False, db_keys))
4682 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4683 dbs.append((vardb, "installed", True, True, db_keys))
4684 self._filtered_trees[myroot]["dbs"] = dbs
4685 if "--usepkg" in self.myopts:
4686 self.trees[myroot]["bintree"].populate(
4687 "--getbinpkg" in self.myopts,
4688 "--getbinpkgonly" in self.myopts)
4691 self.digraph=portage.digraph()
4692 # contains all sets added to the graph
4694 # contains atoms given as arguments
4695 self._sets["args"] = InternalPackageSet()
4696 # contains all atoms from all sets added to the graph, including
4697 # atoms given as arguments
4698 self._set_atoms = InternalPackageSet()
4699 self._atom_arg_map = {}
4700 # contains all nodes pulled in by self._set_atoms
4701 self._set_nodes = set()
4702 # Contains only Blocker -> Uninstall edges
4703 self._blocker_uninstalls = digraph()
4704 # Contains only Package -> Blocker edges
4705 self._blocker_parents = digraph()
4706 # Contains only irrelevant Package -> Blocker edges
4707 self._irrelevant_blockers = digraph()
4708 # Contains only unsolvable Package -> Blocker edges
4709 self._unsolvable_blockers = digraph()
4710 # Contains all Blocker -> Blocked Package edges
4711 self._blocked_pkgs = digraph()
4712 # Contains world packages that have been protected from
4713 # uninstallation but may not have been added to the graph
4714 # if the graph is not complete yet.
4715 self._blocked_world_pkgs = {}
4716 self._slot_collision_info = {}
4717 # Slot collision nodes are not allowed to block other packages since
4718 # blocker validation is only able to account for one package per slot.
4719 self._slot_collision_nodes = set()
4720 self._parent_atoms = {}
4721 self._slot_conflict_parent_atoms = set()
4722 self._serialized_tasks_cache = None
4723 self._scheduler_graph = None
4724 self._displayed_list = None
4725 self._pprovided_args = []
4726 self._missing_args = []
4727 self._masked_installed = set()
4728 self._unsatisfied_deps_for_display = []
4729 self._unsatisfied_blockers_for_display = None
4730 self._circular_deps_for_display = None
4731 self._dep_stack = []
4732 self._unsatisfied_deps = []
4733 self._initially_unsatisfied_deps = []
4734 self._ignored_deps = []
4735 self._required_set_names = set(["system", "world"])
4736 self._select_atoms = self._select_atoms_highest_available
4737 self._select_package = self._select_pkg_highest_available
4738 self._highest_pkg_cache = {}
4740 def _show_slot_collision_notice(self):
4741 """Show an informational message advising the user to mask one of the
4742 the packages. In some cases it may be possible to resolve this
4743 automatically, but support for backtracking (removal nodes that have
4744 already been selected) will be required in order to handle all possible
4748 if not self._slot_collision_info:
4751 self._show_merge_list()
4754 msg.append("\n!!! Multiple package instances within a single " + \
4755 "package slot have been pulled\n")
4756 msg.append("!!! into the dependency graph, resulting" + \
4757 " in a slot conflict:\n\n")
4759 # Max number of parents shown, to avoid flooding the display.
4761 explanation_columns = 70
4763 for (slot_atom, root), slot_nodes \
4764 in self._slot_collision_info.iteritems():
4765 msg.append(str(slot_atom))
4768 for node in slot_nodes:
4770 msg.append(str(node))
4771 parent_atoms = self._parent_atoms.get(node)
4774 # Prefer conflict atoms over others.
4775 for parent_atom in parent_atoms:
4776 if len(pruned_list) >= max_parents:
4778 if parent_atom in self._slot_conflict_parent_atoms:
4779 pruned_list.add(parent_atom)
4781 # If this package was pulled in by conflict atoms then
4782 # show those alone since those are the most interesting.
4784 # When generating the pruned list, prefer instances
4785 # of DependencyArg over instances of Package.
4786 for parent_atom in parent_atoms:
4787 if len(pruned_list) >= max_parents:
4789 parent, atom = parent_atom
4790 if isinstance(parent, DependencyArg):
4791 pruned_list.add(parent_atom)
4792 # Prefer Packages instances that themselves have been
4793 # pulled into collision slots.
4794 for parent_atom in parent_atoms:
4795 if len(pruned_list) >= max_parents:
4797 parent, atom = parent_atom
4798 if isinstance(parent, Package) and \
4799 (parent.slot_atom, parent.root) \
4800 in self._slot_collision_info:
4801 pruned_list.add(parent_atom)
4802 for parent_atom in parent_atoms:
4803 if len(pruned_list) >= max_parents:
4805 pruned_list.add(parent_atom)
4806 omitted_parents = len(parent_atoms) - len(pruned_list)
4807 parent_atoms = pruned_list
4808 msg.append(" pulled in by\n")
4809 for parent_atom in parent_atoms:
4810 parent, atom = parent_atom
4811 msg.append(2*indent)
4812 if isinstance(parent,
4813 (PackageArg, AtomArg)):
4814 # For PackageArg and AtomArg types, it's
4815 # redundant to display the atom attribute.
4816 msg.append(str(parent))
4818 # Display the specific atom from SetArg or
4820 msg.append("%s required by %s" % (atom, parent))
4823 msg.append(2*indent)
4824 msg.append("(and %d more)\n" % omitted_parents)
4826 msg.append(" (no parents)\n")
4828 explanation = self._slot_conflict_explanation(slot_nodes)
4831 msg.append(indent + "Explanation:\n\n")
4832 for line in textwrap.wrap(explanation, explanation_columns):
4833 msg.append(2*indent + line + "\n")
4836 sys.stderr.write("".join(msg))
4839 explanations_for_all = explanations == len(self._slot_collision_info)
4841 if explanations_for_all or "--quiet" in self.myopts:
4845 msg.append("It may be possible to solve this problem ")
4846 msg.append("by using package.mask to prevent one of ")
4847 msg.append("those packages from being selected. ")
4848 msg.append("However, it is also possible that conflicting ")
4849 msg.append("dependencies exist such that they are impossible to ")
4850 msg.append("satisfy simultaneously. If such a conflict exists in ")
4851 msg.append("the dependencies of two different packages, then those ")
4852 msg.append("packages can not be installed simultaneously.")
4854 from formatter import AbstractFormatter, DumbWriter
4855 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4857 f.add_flowing_data(x)
4861 msg.append("For more information, see MASKED PACKAGES ")
4862 msg.append("section in the emerge man page or refer ")
4863 msg.append("to the Gentoo Handbook.")
4865 f.add_flowing_data(x)
4869 def _slot_conflict_explanation(self, slot_nodes):
4871 When a slot conflict occurs due to USE deps, there are a few
4872 different cases to consider:
4874 1) New USE are correctly set but --newuse wasn't requested so an
4875 installed package with incorrect USE happened to get pulled
4876 into graph before the new one.
4878 2) New USE are incorrectly set but an installed package has correct
4879 USE so it got pulled into the graph, and a new instance also got
4880 pulled in due to --newuse or an upgrade.
4882 3) Multiple USE deps exist that can't be satisfied simultaneously,
4883 and multiple package instances got pulled into the same slot to
4884 satisfy the conflicting deps.
4886 Currently, explanations and suggested courses of action are generated
4887 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4890 if len(slot_nodes) != 2:
4891 # Suggestions are only implemented for
4892 # conflicts between two packages.
4895 all_conflict_atoms = self._slot_conflict_parent_atoms
4897 matched_atoms = None
4898 unmatched_node = None
4899 for node in slot_nodes:
4900 parent_atoms = self._parent_atoms.get(node)
4901 if not parent_atoms:
4902 # Normally, there are always parent atoms. If there are
4903 # none then something unexpected is happening and there's
4904 # currently no suggestion for this case.
4906 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4907 for parent_atom in conflict_atoms:
4908 parent, atom = parent_atom
4910 # Suggestions are currently only implemented for cases
4911 # in which all conflict atoms have USE deps.
4914 if matched_node is not None:
4915 # If conflict atoms match multiple nodes
4916 # then there's no suggestion.
4919 matched_atoms = conflict_atoms
4921 if unmatched_node is not None:
4922 # Neither node is matched by conflict atoms, and
4923 # there is no suggestion for this case.
4925 unmatched_node = node
4927 if matched_node is None or unmatched_node is None:
4928 # This shouldn't happen.
4931 if unmatched_node.installed and not matched_node.installed and \
4932 unmatched_node.cpv == matched_node.cpv:
4933 # If the conflicting packages are the same version then
4934 # --newuse should be all that's needed. If they are different
4935 # versions then there's some other problem.
4936 return "New USE are correctly set, but --newuse wasn't" + \
4937 " requested, so an installed package with incorrect USE " + \
4938 "happened to get pulled into the dependency graph. " + \
4939 "In order to solve " + \
4940 "this, either specify the --newuse option or explicitly " + \
4941 " reinstall '%s'." % matched_node.slot_atom
4943 if matched_node.installed and not unmatched_node.installed:
4944 atoms = sorted(set(atom for parent, atom in matched_atoms))
4945 explanation = ("New USE for '%s' are incorrectly set. " + \
4946 "In order to solve this, adjust USE to satisfy '%s'") % \
4947 (matched_node.slot_atom, atoms[0])
4949 for atom in atoms[1:-1]:
4950 explanation += ", '%s'" % (atom,)
4953 explanation += " and '%s'" % (atoms[-1],)
4959 def _process_slot_conflicts(self):
4961 Process slot conflict data to identify specific atoms which
4962 lead to conflict. These atoms only match a subset of the
4963 packages that have been pulled into a given slot.
4965 for (slot_atom, root), slot_nodes \
4966 in self._slot_collision_info.iteritems():
4968 all_parent_atoms = set()
4969 for pkg in slot_nodes:
4970 parent_atoms = self._parent_atoms.get(pkg)
4971 if not parent_atoms:
4973 all_parent_atoms.update(parent_atoms)
4975 for pkg in slot_nodes:
4976 parent_atoms = self._parent_atoms.get(pkg)
4977 if parent_atoms is None:
4978 parent_atoms = set()
4979 self._parent_atoms[pkg] = parent_atoms
4980 for parent_atom in all_parent_atoms:
4981 if parent_atom in parent_atoms:
4983 # Use package set for matching since it will match via
4984 # PROVIDE when necessary, while match_from_list does not.
4985 parent, atom = parent_atom
4986 atom_set = InternalPackageSet(
4987 initial_atoms=(atom,))
4988 if atom_set.findAtomForPackage(pkg):
4989 parent_atoms.add(parent_atom)
4991 self._slot_conflict_parent_atoms.add(parent_atom)
4993 def _reinstall_for_flags(self, forced_flags,
4994 orig_use, orig_iuse, cur_use, cur_iuse):
4995 """Return a set of flags that trigger reinstallation, or None if there
4996 are no such flags."""
4997 if "--newuse" in self.myopts:
4998 flags = set(orig_iuse.symmetric_difference(
4999 cur_iuse).difference(forced_flags))
5000 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5001 cur_iuse.intersection(cur_use)))
5004 elif "changed-use" == self.myopts.get("--reinstall"):
5005 flags = orig_iuse.intersection(orig_use).symmetric_difference(
5006 cur_iuse.intersection(cur_use))
5011 def _create_graph(self, allow_unsatisfied=False):
5012 dep_stack = self._dep_stack
5014 self.spinner.update()
5015 dep = dep_stack.pop()
5016 if isinstance(dep, Package):
5017 if not self._add_pkg_deps(dep,
5018 allow_unsatisfied=allow_unsatisfied):
5021 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5025 def _add_dep(self, dep, allow_unsatisfied=False):
5026 debug = "--debug" in self.myopts
5027 buildpkgonly = "--buildpkgonly" in self.myopts
5028 nodeps = "--nodeps" in self.myopts
5029 empty = "empty" in self.myparams
5030 deep = "deep" in self.myparams
5031 update = "--update" in self.myopts and dep.depth <= 1
5033 if not buildpkgonly and \
5035 dep.parent not in self._slot_collision_nodes:
5036 if dep.parent.onlydeps:
5037 # It's safe to ignore blockers if the
5038 # parent is an --onlydeps node.
5040 # The blocker applies to the root where
5041 # the parent is or will be installed.
5042 blocker = Blocker(atom=dep.atom,
5043 eapi=dep.parent.metadata["EAPI"],
5044 root=dep.parent.root)
5045 self._blocker_parents.add(blocker, dep.parent)
5047 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5048 onlydeps=dep.onlydeps)
5050 if dep.priority.optional:
5051 # This could be an unecessary build-time dep
5052 # pulled in by --with-bdeps=y.
5054 if allow_unsatisfied:
5055 self._unsatisfied_deps.append(dep)
5057 self._unsatisfied_deps_for_display.append(
5058 ((dep.root, dep.atom), {"myparent":dep.parent}))
5060 # In some cases, dep_check will return deps that shouldn't
5061 # be proccessed any further, so they are identified and
5062 # discarded here. Try to discard as few as possible since
5063 # discarded dependencies reduce the amount of information
5064 # available for optimization of merge order.
5065 if dep.priority.satisfied and \
5066 not dep_pkg.installed and \
5067 not (existing_node or empty or deep or update):
5069 if dep.root == self.target_root:
5071 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5072 except StopIteration:
5074 except portage.exception.InvalidDependString:
5075 if not dep_pkg.installed:
5076 # This shouldn't happen since the package
5077 # should have been masked.
5080 self._ignored_deps.append(dep)
5083 if not self._add_pkg(dep_pkg, dep):
5087 def _add_pkg(self, pkg, dep):
5094 myparent = dep.parent
5095 priority = dep.priority
5097 if priority is None:
5098 priority = DepPriority()
5100 Fills the digraph with nodes comprised of packages to merge.
5101 mybigkey is the package spec of the package to merge.
5102 myparent is the package depending on mybigkey ( or None )
5103 addme = Should we add this package to the digraph or are we just looking at it's deps?
5104 Think --onlydeps, we need to ignore packages in that case.
5107 #IUSE-aware emerge -> USE DEP aware depgraph
5108 #"no downgrade" emerge
5110 # Ensure that the dependencies of the same package
5111 # are never processed more than once.
5112 previously_added = pkg in self.digraph
5114 # select the correct /var database that we'll be checking against
5115 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5116 pkgsettings = self.pkgsettings[pkg.root]
5121 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5122 except portage.exception.InvalidDependString, e:
5123 if not pkg.installed:
5124 show_invalid_depstring_notice(
5125 pkg, pkg.metadata["PROVIDE"], str(e))
5129 if not pkg.onlydeps:
5130 if not pkg.installed and \
5131 "empty" not in self.myparams and \
5132 vardbapi.match(pkg.slot_atom):
5133 # Increase the priority of dependencies on packages that
5134 # are being rebuilt. This optimizes merge order so that
5135 # dependencies are rebuilt/updated as soon as possible,
5136 # which is needed especially when emerge is called by
5137 # revdep-rebuild since dependencies may be affected by ABI
5138 # breakage that has rendered them useless. Don't adjust
5139 # priority here when in "empty" mode since all packages
5140 # are being merged in that case.
5141 priority.rebuild = True
5143 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5144 slot_collision = False
5146 existing_node_matches = pkg.cpv == existing_node.cpv
5147 if existing_node_matches and \
5148 pkg != existing_node and \
5149 dep.atom is not None:
5150 # Use package set for matching since it will match via
5151 # PROVIDE when necessary, while match_from_list does not.
5152 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5153 if not atom_set.findAtomForPackage(existing_node):
5154 existing_node_matches = False
5155 if existing_node_matches:
5156 # The existing node can be reused.
5158 for parent_atom in arg_atoms:
5159 parent, atom = parent_atom
5160 self.digraph.add(existing_node, parent,
5162 self._add_parent_atom(existing_node, parent_atom)
5163 # If a direct circular dependency is not an unsatisfied
5164 # buildtime dependency then drop it here since otherwise
5165 # it can skew the merge order calculation in an unwanted
5167 if existing_node != myparent or \
5168 (priority.buildtime and not priority.satisfied):
5169 self.digraph.addnode(existing_node, myparent,
5171 if dep.atom is not None and dep.parent is not None:
5172 self._add_parent_atom(existing_node,
5173 (dep.parent, dep.atom))
5177 # A slot collision has occurred. Sometimes this coincides
5178 # with unresolvable blockers, so the slot collision will be
5179 # shown later if there are no unresolvable blockers.
5180 self._add_slot_conflict(pkg)
5181 slot_collision = True
5184 # Now add this node to the graph so that self.display()
5185 # can show use flags and --tree portage.output. This node is
5186 # only being partially added to the graph. It must not be
5187 # allowed to interfere with the other nodes that have been
5188 # added. Do not overwrite data for existing nodes in
5189 # self.mydbapi since that data will be used for blocker
5191 # Even though the graph is now invalid, continue to process
5192 # dependencies so that things like --fetchonly can still
5193 # function despite collisions.
5195 elif not previously_added:
5196 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5197 self.mydbapi[pkg.root].cpv_inject(pkg)
5198 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5200 if not pkg.installed:
5201 # Allow this package to satisfy old-style virtuals in case it
5202 # doesn't already. Any pre-existing providers will be preferred
5205 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5206 # For consistency, also update the global virtuals.
5207 settings = self.roots[pkg.root].settings
5209 settings.setinst(pkg.cpv, pkg.metadata)
5211 except portage.exception.InvalidDependString, e:
5212 show_invalid_depstring_notice(
5213 pkg, pkg.metadata["PROVIDE"], str(e))
5218 self._set_nodes.add(pkg)
5220 # Do this even when addme is False (--onlydeps) so that the
5221 # parent/child relationship is always known in case
5222 # self._show_slot_collision_notice() needs to be called later.
5223 self.digraph.add(pkg, myparent, priority=priority)
5224 if dep.atom is not None and dep.parent is not None:
5225 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5228 for parent_atom in arg_atoms:
5229 parent, atom = parent_atom
5230 self.digraph.add(pkg, parent, priority=priority)
5231 self._add_parent_atom(pkg, parent_atom)
5233 """ This section determines whether we go deeper into dependencies or not.
5234 We want to go deeper on a few occasions:
5235 Installing package A, we need to make sure package A's deps are met.
5236 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5237 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5239 dep_stack = self._dep_stack
5240 if "recurse" not in self.myparams:
5242 elif pkg.installed and \
5243 "deep" not in self.myparams:
5244 dep_stack = self._ignored_deps
5246 self.spinner.update()
5251 if not previously_added:
5252 dep_stack.append(pkg)
5255 def _add_parent_atom(self, pkg, parent_atom):
5256 parent_atoms = self._parent_atoms.get(pkg)
5257 if parent_atoms is None:
5258 parent_atoms = set()
5259 self._parent_atoms[pkg] = parent_atoms
5260 parent_atoms.add(parent_atom)
5262 def _add_slot_conflict(self, pkg):
5263 self._slot_collision_nodes.add(pkg)
5264 slot_key = (pkg.slot_atom, pkg.root)
5265 slot_nodes = self._slot_collision_info.get(slot_key)
5266 if slot_nodes is None:
5268 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5269 self._slot_collision_info[slot_key] = slot_nodes
5272 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5274 mytype = pkg.type_name
5277 metadata = pkg.metadata
5278 myuse = pkg.use.enabled
5280 depth = pkg.depth + 1
5281 removal_action = "remove" in self.myparams
5284 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5286 edepend[k] = metadata[k]
5288 if not pkg.built and \
5289 "--buildpkgonly" in self.myopts and \
5290 "deep" not in self.myparams and \
5291 "empty" not in self.myparams:
5292 edepend["RDEPEND"] = ""
5293 edepend["PDEPEND"] = ""
5294 bdeps_optional = False
5296 if pkg.built and not removal_action:
5297 if self.myopts.get("--with-bdeps", "n") == "y":
5298 # Pull in build time deps as requested, but marked them as
5299 # "optional" since they are not strictly required. This allows
5300 # more freedom in the merge order calculation for solving
5301 # circular dependencies. Don't convert to PDEPEND since that
5302 # could make --with-bdeps=y less effective if it is used to
5303 # adjust merge order to prevent built_with_use() calls from
5305 bdeps_optional = True
5307 # built packages do not have build time dependencies.
5308 edepend["DEPEND"] = ""
5310 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5311 edepend["DEPEND"] = ""
5314 root_deps = self.myopts.get("--root-deps")
5315 if root_deps is not None:
5316 if root_deps is True:
5318 elif root_deps == "rdeps":
5319 edepend["DEPEND"] = ""
5322 (bdeps_root, edepend["DEPEND"],
5323 self._priority(buildtime=(not bdeps_optional),
5324 optional=bdeps_optional)),
5325 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5326 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5329 debug = "--debug" in self.myopts
5330 strict = mytype != "installed"
5332 for dep_root, dep_string, dep_priority in deps:
5337 print "Parent: ", jbigkey
5338 print "Depstring:", dep_string
5339 print "Priority:", dep_priority
5340 vardb = self.roots[dep_root].trees["vartree"].dbapi
5342 selected_atoms = self._select_atoms(dep_root,
5343 dep_string, myuse=myuse, parent=pkg, strict=strict,
5344 priority=dep_priority)
5345 except portage.exception.InvalidDependString, e:
5346 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5349 print "Candidates:", selected_atoms
5351 for atom in selected_atoms:
5354 atom = portage.dep.Atom(atom)
5356 mypriority = dep_priority.copy()
5357 if not atom.blocker and vardb.match(atom):
5358 mypriority.satisfied = True
5360 if not self._add_dep(Dependency(atom=atom,
5361 blocker=atom.blocker, depth=depth, parent=pkg,
5362 priority=mypriority, root=dep_root),
5363 allow_unsatisfied=allow_unsatisfied):
5366 except portage.exception.InvalidAtom, e:
5367 show_invalid_depstring_notice(
5368 pkg, dep_string, str(e))
5370 if not pkg.installed:
5374 print "Exiting...", jbigkey
5375 except portage.exception.AmbiguousPackageName, e:
5377 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5378 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5380 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5381 portage.writemsg("\n", noiselevel=-1)
5382 if mytype == "binary":
5384 "!!! This binary package cannot be installed: '%s'\n" % \
5385 mykey, noiselevel=-1)
5386 elif mytype == "ebuild":
5387 portdb = self.roots[myroot].trees["porttree"].dbapi
5388 myebuild, mylocation = portdb.findname2(mykey)
5389 portage.writemsg("!!! This ebuild cannot be installed: " + \
5390 "'%s'\n" % myebuild, noiselevel=-1)
5391 portage.writemsg("!!! Please notify the package maintainer " + \
5392 "that atoms must be fully-qualified.\n", noiselevel=-1)
5396 def _priority(self, **kwargs):
5397 if "remove" in self.myparams:
5398 priority_constructor = UnmergeDepPriority
5400 priority_constructor = DepPriority
5401 return priority_constructor(**kwargs)
5403 def _dep_expand(self, root_config, atom_without_category):
5405 @param root_config: a root config instance
5406 @type root_config: RootConfig
5407 @param atom_without_category: an atom without a category component
5408 @type atom_without_category: String
5410 @returns: a list of atoms containing categories (possibly empty)
5412 null_cp = portage.dep_getkey(insert_category_into_atom(
5413 atom_without_category, "null"))
5414 cat, atom_pn = portage.catsplit(null_cp)
5416 dbs = self._filtered_trees[root_config.root]["dbs"]
5418 for db, pkg_type, built, installed, db_keys in dbs:
5419 for cat in db.categories:
5420 if db.cp_list("%s/%s" % (cat, atom_pn)):
5424 for cat in categories:
5425 deps.append(insert_category_into_atom(
5426 atom_without_category, cat))
5429 def _have_new_virt(self, root, atom_cp):
5431 for db, pkg_type, built, installed, db_keys in \
5432 self._filtered_trees[root]["dbs"]:
5433 if db.cp_list(atom_cp):
5438 def _iter_atoms_for_pkg(self, pkg):
5439 # TODO: add multiple $ROOT support
5440 if pkg.root != self.target_root:
5442 atom_arg_map = self._atom_arg_map
5443 root_config = self.roots[pkg.root]
5444 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5445 atom_cp = portage.dep_getkey(atom)
5446 if atom_cp != pkg.cp and \
5447 self._have_new_virt(pkg.root, atom_cp):
5449 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5450 visible_pkgs.reverse() # descending order
5452 for visible_pkg in visible_pkgs:
5453 if visible_pkg.cp != atom_cp:
5455 if pkg >= visible_pkg:
5456 # This is descending order, and we're not
5457 # interested in any versions <= pkg given.
5459 if pkg.slot_atom != visible_pkg.slot_atom:
5460 higher_slot = visible_pkg
5462 if higher_slot is not None:
5464 for arg in atom_arg_map[(atom, pkg.root)]:
5465 if isinstance(arg, PackageArg) and \
5470 def select_files(self, myfiles):
5471 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5472 appropriate depgraph and return a favorite list."""
5473 debug = "--debug" in self.myopts
5474 root_config = self.roots[self.target_root]
5475 sets = root_config.sets
5476 getSetAtoms = root_config.setconfig.getSetAtoms
5478 myroot = self.target_root
5479 dbs = self._filtered_trees[myroot]["dbs"]
5480 vardb = self.trees[myroot]["vartree"].dbapi
5481 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5482 portdb = self.trees[myroot]["porttree"].dbapi
5483 bindb = self.trees[myroot]["bintree"].dbapi
5484 pkgsettings = self.pkgsettings[myroot]
5486 onlydeps = "--onlydeps" in self.myopts
5489 ext = os.path.splitext(x)[1]
5491 if not os.path.exists(x):
5493 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5494 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5495 elif os.path.exists(
5496 os.path.join(pkgsettings["PKGDIR"], x)):
5497 x = os.path.join(pkgsettings["PKGDIR"], x)
5499 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5500 print "!!! Please ensure the tbz2 exists as specified.\n"
5501 return 0, myfavorites
5502 mytbz2=portage.xpak.tbz2(x)
5503 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5504 if os.path.realpath(x) != \
5505 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5506 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5507 return 0, myfavorites
5508 db_keys = list(bindb._aux_cache_keys)
5509 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5510 pkg = Package(type_name="binary", root_config=root_config,
5511 cpv=mykey, built=True, metadata=metadata,
5513 self._pkg_cache[pkg] = pkg
5514 args.append(PackageArg(arg=x, package=pkg,
5515 root_config=root_config))
5516 elif ext==".ebuild":
5517 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5518 pkgdir = os.path.dirname(ebuild_path)
5519 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5520 cp = pkgdir[len(tree_root)+1:]
5521 e = portage.exception.PackageNotFound(
5522 ("%s is not in a valid portage tree " + \
5523 "hierarchy or does not exist") % x)
5524 if not portage.isvalidatom(cp):
5526 cat = portage.catsplit(cp)[0]
5527 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5528 if not portage.isvalidatom("="+mykey):
5530 ebuild_path = portdb.findname(mykey)
5532 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5533 cp, os.path.basename(ebuild_path)):
5534 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5535 return 0, myfavorites
5536 if mykey not in portdb.xmatch(
5537 "match-visible", portage.dep_getkey(mykey)):
5538 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5539 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5540 print colorize("BAD", "*** page for details.")
5541 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5544 raise portage.exception.PackageNotFound(
5545 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5546 db_keys = list(portdb._aux_cache_keys)
5547 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5548 pkg = Package(type_name="ebuild", root_config=root_config,
5549 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5550 pkgsettings.setcpv(pkg)
5551 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5552 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5553 self._pkg_cache[pkg] = pkg
5554 args.append(PackageArg(arg=x, package=pkg,
5555 root_config=root_config))
5556 elif x.startswith(os.path.sep):
5557 if not x.startswith(myroot):
5558 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5559 " $ROOT.\n") % x, noiselevel=-1)
5561 # Queue these up since it's most efficient to handle
5562 # multiple files in a single iter_owners() call.
5563 lookup_owners.append(x)
5565 if x in ("system", "world"):
5567 if x.startswith(SETPREFIX):
5568 s = x[len(SETPREFIX):]
5570 raise portage.exception.PackageSetNotFound(s)
5573 # Recursively expand sets so that containment tests in
5574 # self._get_parent_sets() properly match atoms in nested
5575 # sets (like if world contains system).
5576 expanded_set = InternalPackageSet(
5577 initial_atoms=getSetAtoms(s))
5578 self._sets[s] = expanded_set
5579 args.append(SetArg(arg=x, set=expanded_set,
5580 root_config=root_config))
5582 if not is_valid_package_atom(x):
5583 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5585 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5586 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5588 # Don't expand categories or old-style virtuals here unless
5589 # necessary. Expansion of old-style virtuals here causes at
5590 # least the following problems:
5591 # 1) It's more difficult to determine which set(s) an atom
5592 # came from, if any.
5593 # 2) It takes away freedom from the resolver to choose other
5594 # possible expansions when necessary.
5596 args.append(AtomArg(arg=x, atom=x,
5597 root_config=root_config))
5599 expanded_atoms = self._dep_expand(root_config, x)
5600 installed_cp_set = set()
5601 for atom in expanded_atoms:
5602 atom_cp = portage.dep_getkey(atom)
5603 if vardb.cp_list(atom_cp):
5604 installed_cp_set.add(atom_cp)
5606 if len(installed_cp_set) > 1:
5607 non_virtual_cps = set()
5608 for atom_cp in installed_cp_set:
5609 if not atom_cp.startswith("virtual/"):
5610 non_virtual_cps.add(atom_cp)
5611 if len(non_virtual_cps) == 1:
5612 installed_cp_set = non_virtual_cps
5614 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5615 installed_cp = iter(installed_cp_set).next()
5616 expanded_atoms = [atom for atom in expanded_atoms \
5617 if portage.dep_getkey(atom) == installed_cp]
5619 if len(expanded_atoms) > 1:
5622 ambiguous_package_name(x, expanded_atoms, root_config,
5623 self.spinner, self.myopts)
5624 return False, myfavorites
5626 atom = expanded_atoms[0]
5628 null_atom = insert_category_into_atom(x, "null")
5629 null_cp = portage.dep_getkey(null_atom)
5630 cat, atom_pn = portage.catsplit(null_cp)
5631 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5633 # Allow the depgraph to choose which virtual.
5634 atom = insert_category_into_atom(x, "virtual")
5636 atom = insert_category_into_atom(x, "null")
5638 args.append(AtomArg(arg=x, atom=atom,
5639 root_config=root_config))
5643 search_for_multiple = False
5644 if len(lookup_owners) > 1:
5645 search_for_multiple = True
5647 for x in lookup_owners:
5648 if not search_for_multiple and os.path.isdir(x):
5649 search_for_multiple = True
5650 relative_paths.append(x[len(myroot):])
5653 for pkg, relative_path in \
5654 real_vardb._owners.iter_owners(relative_paths):
5655 owners.add(pkg.mycpv)
5656 if not search_for_multiple:
5660 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5661 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5665 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5667 # portage now masks packages with missing slot, but it's
5668 # possible that one was installed by an older version
5669 atom = portage.cpv_getkey(cpv)
5671 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5672 args.append(AtomArg(arg=atom, atom=atom,
5673 root_config=root_config))
5675 if "--update" in self.myopts:
5676 # In some cases, the greedy slots behavior can pull in a slot that
5677 # the user would want to uninstall due to it being blocked by a
5678 # newer version in a different slot. Therefore, it's necessary to
5679 # detect and discard any that should be uninstalled. Each time
5680 # that arguments are updated, package selections are repeated in
5681 # order to ensure consistency with the current arguments:
5683 # 1) Initialize args
5684 # 2) Select packages and generate initial greedy atoms
5685 # 3) Update args with greedy atoms
5686 # 4) Select packages and generate greedy atoms again, while
5687 # accounting for any blockers between selected packages
5688 # 5) Update args with revised greedy atoms
5690 self._set_args(args)
5693 greedy_args.append(arg)
5694 if not isinstance(arg, AtomArg):
5696 for atom in self._greedy_slots(arg.root_config, arg.atom):
5698 AtomArg(arg=arg.arg, atom=atom,
5699 root_config=arg.root_config))
5701 self._set_args(greedy_args)
5704 # Revise greedy atoms, accounting for any blockers
5705 # between selected packages.
5706 revised_greedy_args = []
5708 revised_greedy_args.append(arg)
5709 if not isinstance(arg, AtomArg):
5711 for atom in self._greedy_slots(arg.root_config, arg.atom,
5712 blocker_lookahead=True):
5713 revised_greedy_args.append(
5714 AtomArg(arg=arg.arg, atom=atom,
5715 root_config=arg.root_config))
5716 args = revised_greedy_args
5717 del revised_greedy_args
5719 self._set_args(args)
5721 myfavorites = set(myfavorites)
5723 if isinstance(arg, (AtomArg, PackageArg)):
5724 myfavorites.add(arg.atom)
5725 elif isinstance(arg, SetArg):
5726 myfavorites.add(arg.arg)
5727 myfavorites = list(myfavorites)
5729 pprovideddict = pkgsettings.pprovideddict
5731 portage.writemsg("\n", noiselevel=-1)
5732 # Order needs to be preserved since a feature of --nodeps
5733 # is to allow the user to force a specific merge order.
5737 for atom in arg.set:
5738 self.spinner.update()
5739 dep = Dependency(atom=atom, onlydeps=onlydeps,
5740 root=myroot, parent=arg)
5741 atom_cp = portage.dep_getkey(atom)
5743 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5744 if pprovided and portage.match_from_list(atom, pprovided):
5745 # A provided package has been specified on the command line.
5746 self._pprovided_args.append((arg, atom))
5748 if isinstance(arg, PackageArg):
5749 if not self._add_pkg(arg.package, dep) or \
5750 not self._create_graph():
5751 sys.stderr.write(("\n\n!!! Problem resolving " + \
5752 "dependencies for %s\n") % arg.arg)
5753 return 0, myfavorites
5756 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5757 (arg, atom), noiselevel=-1)
5758 pkg, existing_node = self._select_package(
5759 myroot, atom, onlydeps=onlydeps)
5761 if not (isinstance(arg, SetArg) and \
5762 arg.name in ("system", "world")):
5763 self._unsatisfied_deps_for_display.append(
5764 ((myroot, atom), {}))
5765 return 0, myfavorites
5766 self._missing_args.append((arg, atom))
5768 if atom_cp != pkg.cp:
5769 # For old-style virtuals, we need to repeat the
5770 # package.provided check against the selected package.
5771 expanded_atom = atom.replace(atom_cp, pkg.cp)
5772 pprovided = pprovideddict.get(pkg.cp)
5774 portage.match_from_list(expanded_atom, pprovided):
5775 # A provided package has been
5776 # specified on the command line.
5777 self._pprovided_args.append((arg, atom))
5779 if pkg.installed and "selective" not in self.myparams:
5780 self._unsatisfied_deps_for_display.append(
5781 ((myroot, atom), {}))
5782 # Previous behavior was to bail out in this case, but
5783 # since the dep is satisfied by the installed package,
5784 # it's more friendly to continue building the graph
5785 # and just show a warning message. Therefore, only bail
5786 # out here if the atom is not from either the system or
5788 if not (isinstance(arg, SetArg) and \
5789 arg.name in ("system", "world")):
5790 return 0, myfavorites
5792 # Add the selected package to the graph as soon as possible
5793 # so that later dep_check() calls can use it as feedback
5794 # for making more consistent atom selections.
5795 if not self._add_pkg(pkg, dep):
5796 if isinstance(arg, SetArg):
5797 sys.stderr.write(("\n\n!!! Problem resolving " + \
5798 "dependencies for %s from %s\n") % \
5801 sys.stderr.write(("\n\n!!! Problem resolving " + \
5802 "dependencies for %s\n") % atom)
5803 return 0, myfavorites
5805 except portage.exception.MissingSignature, e:
5806 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5807 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5808 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5809 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5810 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5811 return 0, myfavorites
5812 except portage.exception.InvalidSignature, e:
5813 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5814 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5815 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5816 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5817 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5818 return 0, myfavorites
5819 except SystemExit, e:
5820 raise # Needed else can't exit
5821 except Exception, e:
5822 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5823 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5826 # Now that the root packages have been added to the graph,
5827 # process the dependencies.
5828 if not self._create_graph():
5829 return 0, myfavorites
5832 if "--usepkgonly" in self.myopts:
5833 for xs in self.digraph.all_nodes():
5834 if not isinstance(xs, Package):
5836 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5840 print "Missing binary for:",xs[2]
5844 except self._unknown_internal_error:
5845 return False, myfavorites
5847 # We're true here unless we are missing binaries.
5848 return (not missing,myfavorites)
5850 def _set_args(self, args):
5852 Create the "args" package set from atoms and packages given as
5853 arguments. This method can be called multiple times if necessary.
5854 The package selection cache is automatically invalidated, since
5855 arguments influence package selections.
5857 args_set = self._sets["args"]
5860 if not isinstance(arg, (AtomArg, PackageArg)):
5863 if atom in args_set:
5867 self._set_atoms.clear()
5868 self._set_atoms.update(chain(*self._sets.itervalues()))
5869 atom_arg_map = self._atom_arg_map
5870 atom_arg_map.clear()
5872 for atom in arg.set:
5873 atom_key = (atom, arg.root_config.root)
5874 refs = atom_arg_map.get(atom_key)
5877 atom_arg_map[atom_key] = refs
5881 # Invalidate the package selection cache, since
5882 # arguments influence package selections.
5883 self._highest_pkg_cache.clear()
5884 for trees in self._filtered_trees.itervalues():
5885 trees["porttree"].dbapi._clear_cache()
5887 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5889 Return a list of slot atoms corresponding to installed slots that
5890 differ from the slot of the highest visible match. When
5891 blocker_lookahead is True, slot atoms that would trigger a blocker
5892 conflict are automatically discarded, potentially allowing automatic
5893 uninstallation of older slots when appropriate.
5895 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5896 if highest_pkg is None:
5898 vardb = root_config.trees["vartree"].dbapi
5900 for cpv in vardb.match(atom):
5901 # don't mix new virtuals with old virtuals
5902 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5903 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5905 slots.add(highest_pkg.metadata["SLOT"])
5909 slots.remove(highest_pkg.metadata["SLOT"])
5912 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5913 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5914 if pkg is not None and \
5915 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5916 greedy_pkgs.append(pkg)
5919 if not blocker_lookahead:
5920 return [pkg.slot_atom for pkg in greedy_pkgs]
5923 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5924 for pkg in greedy_pkgs + [highest_pkg]:
5925 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5927 atoms = self._select_atoms(
5928 pkg.root, dep_str, pkg.use.enabled,
5929 parent=pkg, strict=True)
5930 except portage.exception.InvalidDependString:
5932 blocker_atoms = (x for x in atoms if x.blocker)
5933 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5935 if highest_pkg not in blockers:
5938 # filter packages with invalid deps
5939 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5941 # filter packages that conflict with highest_pkg
5942 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5943 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5944 blockers[pkg].findAtomForPackage(highest_pkg))]
5949 # If two packages conflict, discard the lower version.
5950 discard_pkgs = set()
5951 greedy_pkgs.sort(reverse=True)
5952 for i in xrange(len(greedy_pkgs) - 1):
5953 pkg1 = greedy_pkgs[i]
5954 if pkg1 in discard_pkgs:
5956 for j in xrange(i + 1, len(greedy_pkgs)):
5957 pkg2 = greedy_pkgs[j]
5958 if pkg2 in discard_pkgs:
5960 if blockers[pkg1].findAtomForPackage(pkg2) or \
5961 blockers[pkg2].findAtomForPackage(pkg1):
5963 discard_pkgs.add(pkg2)
5965 return [pkg.slot_atom for pkg in greedy_pkgs \
5966 if pkg not in discard_pkgs]
5968 def _select_atoms_from_graph(self, *pargs, **kwargs):
5970 Prefer atoms matching packages that have already been
5971 added to the graph or those that are installed and have
5972 not been scheduled for replacement.
5974 kwargs["trees"] = self._graph_trees
5975 return self._select_atoms_highest_available(*pargs, **kwargs)
5977 def _select_atoms_highest_available(self, root, depstring,
5978 myuse=None, parent=None, strict=True, trees=None, priority=None):
5979 """This will raise InvalidDependString if necessary. If trees is
5980 None then self._filtered_trees is used."""
5981 pkgsettings = self.pkgsettings[root]
5983 trees = self._filtered_trees
5984 if not getattr(priority, "buildtime", False):
5985 # The parent should only be passed to dep_check() for buildtime
5986 # dependencies since that's the only case when it's appropriate
5987 # to trigger the circular dependency avoidance code which uses it.
5988 # It's important not to trigger the same circular dependency
5989 # avoidance code for runtime dependencies since it's not needed
5990 # and it can promote an incorrect package choice.
5994 if parent is not None:
5995 trees[root]["parent"] = parent
5997 portage.dep._dep_check_strict = False
5998 mycheck = portage.dep_check(depstring, None,
5999 pkgsettings, myuse=myuse,
6000 myroot=root, trees=trees)
6002 if parent is not None:
6003 trees[root].pop("parent")
6004 portage.dep._dep_check_strict = True
6006 raise portage.exception.InvalidDependString(mycheck[1])
6007 selected_atoms = mycheck[1]
6008 return selected_atoms
6010 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6011 atom = portage.dep.Atom(atom)
6012 atom_set = InternalPackageSet(initial_atoms=(atom,))
6013 atom_without_use = atom
6015 atom_without_use = portage.dep.remove_slot(atom)
6017 atom_without_use += ":" + atom.slot
6018 atom_without_use = portage.dep.Atom(atom_without_use)
6019 xinfo = '"%s"' % atom
6022 # Discard null/ from failed cpv_expand category expansion.
6023 xinfo = xinfo.replace("null/", "")
6024 masked_packages = []
6026 masked_pkg_instances = set()
6027 missing_licenses = []
6028 have_eapi_mask = False
6029 pkgsettings = self.pkgsettings[root]
6030 implicit_iuse = pkgsettings._get_implicit_iuse()
6031 root_config = self.roots[root]
6032 portdb = self.roots[root].trees["porttree"].dbapi
6033 dbs = self._filtered_trees[root]["dbs"]
6034 for db, pkg_type, built, installed, db_keys in dbs:
6038 if hasattr(db, "xmatch"):
6039 cpv_list = db.xmatch("match-all", atom_without_use)
6041 cpv_list = db.match(atom_without_use)
6044 for cpv in cpv_list:
6045 metadata, mreasons = get_mask_info(root_config, cpv,
6046 pkgsettings, db, pkg_type, built, installed, db_keys)
6047 if metadata is not None:
6048 pkg = Package(built=built, cpv=cpv,
6049 installed=installed, metadata=metadata,
6050 root_config=root_config)
6051 if pkg.cp != atom.cp:
6052 # A cpv can be returned from dbapi.match() as an
6053 # old-style virtual match even in cases when the
6054 # package does not actually PROVIDE the virtual.
6055 # Filter out any such false matches here.
6056 if not atom_set.findAtomForPackage(pkg):
6059 masked_pkg_instances.add(pkg)
6061 missing_use.append(pkg)
6064 masked_packages.append(
6065 (root_config, pkgsettings, cpv, metadata, mreasons))
6067 missing_use_reasons = []
6068 missing_iuse_reasons = []
6069 for pkg in missing_use:
6070 use = pkg.use.enabled
6071 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6072 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6074 for x in atom.use.required:
6075 if iuse_re.match(x) is None:
6076 missing_iuse.append(x)
6079 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6080 missing_iuse_reasons.append((pkg, mreasons))
6082 need_enable = sorted(atom.use.enabled.difference(use))
6083 need_disable = sorted(atom.use.disabled.intersection(use))
6084 if need_enable or need_disable:
6086 changes.extend(colorize("red", "+" + x) \
6087 for x in need_enable)
6088 changes.extend(colorize("blue", "-" + x) \
6089 for x in need_disable)
6090 mreasons.append("Change USE: %s" % " ".join(changes))
6091 missing_use_reasons.append((pkg, mreasons))
6093 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6094 in missing_use_reasons if pkg not in masked_pkg_instances]
6096 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6097 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6099 show_missing_use = False
6100 if unmasked_use_reasons:
6101 # Only show the latest version.
6102 show_missing_use = unmasked_use_reasons[:1]
6103 elif unmasked_iuse_reasons:
6104 if missing_use_reasons:
6105 # All packages with required IUSE are masked,
6106 # so display a normal masking message.
6109 show_missing_use = unmasked_iuse_reasons
6111 if show_missing_use:
6112 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6113 print "!!! One of the following packages is required to complete your request:"
6114 for pkg, mreasons in show_missing_use:
6115 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6117 elif masked_packages:
6119 colorize("BAD", "All ebuilds that could satisfy ") + \
6120 colorize("INFORM", xinfo) + \
6121 colorize("BAD", " have been masked.")
6122 print "!!! One of the following masked packages is required to complete your request:"
6123 have_eapi_mask = show_masked_packages(masked_packages)
6126 msg = ("The current version of portage supports " + \
6127 "EAPI '%s'. You must upgrade to a newer version" + \
6128 " of portage before EAPI masked packages can" + \
6129 " be installed.") % portage.const.EAPI
6130 from textwrap import wrap
6131 for line in wrap(msg, 75):
6136 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6138 # Show parent nodes and the argument that pulled them in.
6139 traversed_nodes = set()
6142 while node is not None:
6143 traversed_nodes.add(node)
6144 msg.append('(dependency required by "%s" [%s])' % \
6145 (colorize('INFORM', str(node.cpv)), node.type_name))
6146 # When traversing to parents, prefer arguments over packages
6147 # since arguments are root nodes. Never traverse the same
6148 # package twice, in order to prevent an infinite loop.
6149 selected_parent = None
6150 for parent in self.digraph.parent_nodes(node):
6151 if isinstance(parent, DependencyArg):
6152 msg.append('(dependency required by "%s" [argument])' % \
6153 (colorize('INFORM', str(parent))))
6154 selected_parent = None
6156 if parent not in traversed_nodes:
6157 selected_parent = parent
6158 node = selected_parent
6164 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6165 cache_key = (root, atom, onlydeps)
6166 ret = self._highest_pkg_cache.get(cache_key)
6169 if pkg and not existing:
6170 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6171 if existing and existing == pkg:
6172 # Update the cache to reflect that the
6173 # package has been added to the graph.
6175 self._highest_pkg_cache[cache_key] = ret
6177 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6178 self._highest_pkg_cache[cache_key] = ret
6181 settings = pkg.root_config.settings
6182 if visible(settings, pkg) and not (pkg.installed and \
6183 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6184 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6187 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6188 root_config = self.roots[root]
6189 pkgsettings = self.pkgsettings[root]
6190 dbs = self._filtered_trees[root]["dbs"]
6191 vardb = self.roots[root].trees["vartree"].dbapi
6192 portdb = self.roots[root].trees["porttree"].dbapi
6193 # List of acceptable packages, ordered by type preference.
6194 matched_packages = []
6195 highest_version = None
6196 if not isinstance(atom, portage.dep.Atom):
6197 atom = portage.dep.Atom(atom)
6199 atom_set = InternalPackageSet(initial_atoms=(atom,))
6200 existing_node = None
6202 usepkgonly = "--usepkgonly" in self.myopts
6203 empty = "empty" in self.myparams
6204 selective = "selective" in self.myparams
6206 noreplace = "--noreplace" in self.myopts
6207 # Behavior of the "selective" parameter depends on
6208 # whether or not a package matches an argument atom.
6209 # If an installed package provides an old-style
6210 # virtual that is no longer provided by an available
6211 # package, the installed package may match an argument
6212 # atom even though none of the available packages do.
6213 # Therefore, "selective" logic does not consider
6214 # whether or not an installed package matches an
6215 # argument atom. It only considers whether or not
6216 # available packages match argument atoms, which is
6217 # represented by the found_available_arg flag.
6218 found_available_arg = False
6219 for find_existing_node in True, False:
6222 for db, pkg_type, built, installed, db_keys in dbs:
6225 if installed and not find_existing_node:
6226 want_reinstall = reinstall or empty or \
6227 (found_available_arg and not selective)
6228 if want_reinstall and matched_packages:
6230 if hasattr(db, "xmatch"):
6231 cpv_list = db.xmatch("match-all", atom)
6233 cpv_list = db.match(atom)
6235 # USE=multislot can make an installed package appear as if
6236 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6237 # won't do any good as long as USE=multislot is enabled since
6238 # the newly built package still won't have the expected slot.
6239 # Therefore, assume that such SLOT dependencies are already
6240 # satisfied rather than forcing a rebuild.
6241 if installed and not cpv_list and atom.slot:
6242 for cpv in db.match(atom.cp):
6243 slot_available = False
6244 for other_db, other_type, other_built, \
6245 other_installed, other_keys in dbs:
6248 other_db.aux_get(cpv, ["SLOT"])[0]:
6249 slot_available = True
6253 if not slot_available:
6255 inst_pkg = self._pkg(cpv, "installed",
6256 root_config, installed=installed)
6257 # Remove the slot from the atom and verify that
6258 # the package matches the resulting atom.
6259 atom_without_slot = portage.dep.remove_slot(atom)
6261 atom_without_slot += str(atom.use)
6262 atom_without_slot = portage.dep.Atom(atom_without_slot)
6263 if portage.match_from_list(
6264 atom_without_slot, [inst_pkg]):
6265 cpv_list = [inst_pkg.cpv]
6270 pkg_status = "merge"
6271 if installed or onlydeps:
6272 pkg_status = "nomerge"
6275 for cpv in cpv_list:
6276 # Make --noreplace take precedence over --newuse.
6277 if not installed and noreplace and \
6278 cpv in vardb.match(atom):
6279 # If the installed version is masked, it may
6280 # be necessary to look at lower versions,
6281 # in case there is a visible downgrade.
6283 reinstall_for_flags = None
6284 cache_key = (pkg_type, root, cpv, pkg_status)
6285 calculated_use = True
6286 pkg = self._pkg_cache.get(cache_key)
6288 calculated_use = False
6290 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6293 pkg = Package(built=built, cpv=cpv,
6294 installed=installed, metadata=metadata,
6295 onlydeps=onlydeps, root_config=root_config,
6297 metadata = pkg.metadata
6299 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6300 if not built and ("?" in metadata["LICENSE"] or \
6301 "?" in metadata["PROVIDE"]):
6302 # This is avoided whenever possible because
6303 # it's expensive. It only needs to be done here
6304 # if it has an effect on visibility.
6305 pkgsettings.setcpv(pkg)
6306 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6307 calculated_use = True
6308 self._pkg_cache[pkg] = pkg
6310 if not installed or (built and matched_packages):
6311 # Only enforce visibility on installed packages
6312 # if there is at least one other visible package
6313 # available. By filtering installed masked packages
6314 # here, packages that have been masked since they
6315 # were installed can be automatically downgraded
6316 # to an unmasked version.
6318 if not visible(pkgsettings, pkg):
6320 except portage.exception.InvalidDependString:
6324 # Enable upgrade or downgrade to a version
6325 # with visible KEYWORDS when the installed
6326 # version is masked by KEYWORDS, but never
6327 # reinstall the same exact version only due
6328 # to a KEYWORDS mask.
6329 if built and matched_packages:
6331 different_version = None
6332 for avail_pkg in matched_packages:
6333 if not portage.dep.cpvequal(
6334 pkg.cpv, avail_pkg.cpv):
6335 different_version = avail_pkg
6337 if different_version is not None:
6340 pkgsettings._getMissingKeywords(
6341 pkg.cpv, pkg.metadata):
6344 # If the ebuild no longer exists or it's
6345 # keywords have been dropped, reject built
6346 # instances (installed or binary).
6347 # If --usepkgonly is enabled, assume that
6348 # the ebuild status should be ignored.
6352 pkg.cpv, "ebuild", root_config)
6353 except portage.exception.PackageNotFound:
6356 if not visible(pkgsettings, pkg_eb):
6359 if not pkg.built and not calculated_use:
6360 # This is avoided whenever possible because
6362 pkgsettings.setcpv(pkg)
6363 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6365 if pkg.cp != atom.cp:
6366 # A cpv can be returned from dbapi.match() as an
6367 # old-style virtual match even in cases when the
6368 # package does not actually PROVIDE the virtual.
6369 # Filter out any such false matches here.
6370 if not atom_set.findAtomForPackage(pkg):
6374 if root == self.target_root:
6376 # Ebuild USE must have been calculated prior
6377 # to this point, in case atoms have USE deps.
6378 myarg = self._iter_atoms_for_pkg(pkg).next()
6379 except StopIteration:
6381 except portage.exception.InvalidDependString:
6383 # masked by corruption
6385 if not installed and myarg:
6386 found_available_arg = True
6388 if atom.use and not pkg.built:
6389 use = pkg.use.enabled
6390 if atom.use.enabled.difference(use):
6392 if atom.use.disabled.intersection(use):
6394 if pkg.cp == atom_cp:
6395 if highest_version is None:
6396 highest_version = pkg
6397 elif pkg > highest_version:
6398 highest_version = pkg
6399 # At this point, we've found the highest visible
6400 # match from the current repo. Any lower versions
6401 # from this repo are ignored, so this so the loop
6402 # will always end with a break statement below
6404 if find_existing_node:
6405 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6408 if portage.dep.match_from_list(atom, [e_pkg]):
6409 if highest_version and \
6410 e_pkg.cp == atom_cp and \
6411 e_pkg < highest_version and \
6412 e_pkg.slot_atom != highest_version.slot_atom:
6413 # There is a higher version available in a
6414 # different slot, so this existing node is
6418 matched_packages.append(e_pkg)
6419 existing_node = e_pkg
6421 # Compare built package to current config and
6422 # reject the built package if necessary.
6423 if built and not installed and \
6424 ("--newuse" in self.myopts or \
6425 "--reinstall" in self.myopts):
6426 iuses = pkg.iuse.all
6427 old_use = pkg.use.enabled
6429 pkgsettings.setcpv(myeb)
6431 pkgsettings.setcpv(pkg)
6432 now_use = pkgsettings["PORTAGE_USE"].split()
6433 forced_flags = set()
6434 forced_flags.update(pkgsettings.useforce)
6435 forced_flags.update(pkgsettings.usemask)
6437 if myeb and not usepkgonly:
6438 cur_iuse = myeb.iuse.all
6439 if self._reinstall_for_flags(forced_flags,
6443 # Compare current config to installed package
6444 # and do not reinstall if possible.
6445 if not installed and \
6446 ("--newuse" in self.myopts or \
6447 "--reinstall" in self.myopts) and \
6448 cpv in vardb.match(atom):
6449 pkgsettings.setcpv(pkg)
6450 forced_flags = set()
6451 forced_flags.update(pkgsettings.useforce)
6452 forced_flags.update(pkgsettings.usemask)
6453 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6454 old_iuse = set(filter_iuse_defaults(
6455 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6456 cur_use = pkg.use.enabled
6457 cur_iuse = pkg.iuse.all
6458 reinstall_for_flags = \
6459 self._reinstall_for_flags(
6460 forced_flags, old_use, old_iuse,
6462 if reinstall_for_flags:
6466 matched_packages.append(pkg)
6467 if reinstall_for_flags:
6468 self._reinstall_nodes[pkg] = \
6472 if not matched_packages:
6475 if "--debug" in self.myopts:
6476 for pkg in matched_packages:
6477 portage.writemsg("%s %s\n" % \
6478 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6480 # Filter out any old-style virtual matches if they are
6481 # mixed with new-style virtual matches.
6482 cp = portage.dep_getkey(atom)
6483 if len(matched_packages) > 1 and \
6484 "virtual" == portage.catsplit(cp)[0]:
6485 for pkg in matched_packages:
6488 # Got a new-style virtual, so filter
6489 # out any old-style virtuals.
6490 matched_packages = [pkg for pkg in matched_packages \
6494 if len(matched_packages) > 1:
6495 bestmatch = portage.best(
6496 [pkg.cpv for pkg in matched_packages])
6497 matched_packages = [pkg for pkg in matched_packages \
6498 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6500 # ordered by type preference ("ebuild" type is the last resort)
6501 return matched_packages[-1], existing_node
6503 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6505 Select packages that have already been added to the graph or
6506 those that are installed and have not been scheduled for
6509 graph_db = self._graph_trees[root]["porttree"].dbapi
6510 matches = graph_db.match_pkgs(atom)
6513 pkg = matches[-1] # highest match
6514 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6515 return pkg, in_graph
6517 def _complete_graph(self):
6519 Add any deep dependencies of required sets (args, system, world) that
6520 have not been pulled into the graph yet. This ensures that the graph
6521 is consistent such that initially satisfied deep dependencies are not
6522 broken in the new graph. Initially unsatisfied dependencies are
6523 irrelevant since we only want to avoid breaking dependencies that are
6526 Since this method can consume enough time to disturb users, it is
6527 currently only enabled by the --complete-graph option.
6529 if "--buildpkgonly" in self.myopts or \
6530 "recurse" not in self.myparams:
6533 if "complete" not in self.myparams:
6534 # Skip this to avoid consuming enough time to disturb users.
6537 # Put the depgraph into a mode that causes it to only
6538 # select packages that have already been added to the
6539 # graph or those that are installed and have not been
6540 # scheduled for replacement. Also, toggle the "deep"
6541 # parameter so that all dependencies are traversed and
6543 self._select_atoms = self._select_atoms_from_graph
6544 self._select_package = self._select_pkg_from_graph
6545 already_deep = "deep" in self.myparams
6546 if not already_deep:
6547 self.myparams.add("deep")
6549 for root in self.roots:
6550 required_set_names = self._required_set_names.copy()
6551 if root == self.target_root and \
6552 (already_deep or "empty" in self.myparams):
6553 required_set_names.difference_update(self._sets)
6554 if not required_set_names and not self._ignored_deps:
6556 root_config = self.roots[root]
6557 setconfig = root_config.setconfig
6559 # Reuse existing SetArg instances when available.
6560 for arg in self.digraph.root_nodes():
6561 if not isinstance(arg, SetArg):
6563 if arg.root_config != root_config:
6565 if arg.name in required_set_names:
6567 required_set_names.remove(arg.name)
6568 # Create new SetArg instances only when necessary.
6569 for s in required_set_names:
6570 expanded_set = InternalPackageSet(
6571 initial_atoms=setconfig.getSetAtoms(s))
6572 atom = SETPREFIX + s
6573 args.append(SetArg(arg=atom, set=expanded_set,
6574 root_config=root_config))
6575 vardb = root_config.trees["vartree"].dbapi
6577 for atom in arg.set:
6578 self._dep_stack.append(
6579 Dependency(atom=atom, root=root, parent=arg))
6580 if self._ignored_deps:
6581 self._dep_stack.extend(self._ignored_deps)
6582 self._ignored_deps = []
6583 if not self._create_graph(allow_unsatisfied=True):
6585 # Check the unsatisfied deps to see if any initially satisfied deps
6586 # will become unsatisfied due to an upgrade. Initially unsatisfied
6587 # deps are irrelevant since we only want to avoid breaking deps
6588 # that are initially satisfied.
6589 while self._unsatisfied_deps:
6590 dep = self._unsatisfied_deps.pop()
6591 matches = vardb.match_pkgs(dep.atom)
6593 self._initially_unsatisfied_deps.append(dep)
6595 # An scheduled installation broke a deep dependency.
6596 # Add the installed package to the graph so that it
6597 # will be appropriately reported as a slot collision
6598 # (possibly solvable via backtracking).
6599 pkg = matches[-1] # highest match
6600 if not self._add_pkg(pkg, dep):
6602 if not self._create_graph(allow_unsatisfied=True):
6606 def _pkg(self, cpv, type_name, root_config, installed=False):
6608 Get a package instance from the cache, or create a new
6609 one if necessary. Raises KeyError from aux_get if it
6610 failures for some reason (package does not exist or is
6615 operation = "nomerge"
6616 pkg = self._pkg_cache.get(
6617 (type_name, root_config.root, cpv, operation))
6619 tree_type = self.pkg_tree_map[type_name]
6620 db = root_config.trees[tree_type].dbapi
6621 db_keys = list(self._trees_orig[root_config.root][
6622 tree_type].dbapi._aux_cache_keys)
6624 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6626 raise portage.exception.PackageNotFound(cpv)
6627 pkg = Package(cpv=cpv, metadata=metadata,
6628 root_config=root_config, installed=installed)
6629 if type_name == "ebuild":
6630 settings = self.pkgsettings[root_config.root]
6631 settings.setcpv(pkg)
6632 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6633 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6634 self._pkg_cache[pkg] = pkg
6637 def validate_blockers(self):
6638 """Remove any blockers from the digraph that do not match any of the
6639 packages within the graph. If necessary, create hard deps to ensure
6640 correct merge order such that mutually blocking packages are never
6641 installed simultaneously."""
6643 if "--buildpkgonly" in self.myopts or \
6644 "--nodeps" in self.myopts:
6647 #if "deep" in self.myparams:
6649 # Pull in blockers from all installed packages that haven't already
6650 # been pulled into the depgraph. This is not enabled by default
6651 # due to the performance penalty that is incurred by all the
6652 # additional dep_check calls that are required.
6654 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6655 for myroot in self.trees:
6656 vardb = self.trees[myroot]["vartree"].dbapi
6657 portdb = self.trees[myroot]["porttree"].dbapi
6658 pkgsettings = self.pkgsettings[myroot]
6659 final_db = self.mydbapi[myroot]
6661 blocker_cache = BlockerCache(myroot, vardb)
6662 stale_cache = set(blocker_cache)
6665 stale_cache.discard(cpv)
6666 pkg_in_graph = self.digraph.contains(pkg)
6668 # Check for masked installed packages. Only warn about
6669 # packages that are in the graph in order to avoid warning
6670 # about those that will be automatically uninstalled during
6671 # the merge process or by --depclean.
6673 if pkg_in_graph and not visible(pkgsettings, pkg):
6674 self._masked_installed.add(pkg)
6676 blocker_atoms = None
6682 self._blocker_parents.child_nodes(pkg))
6687 self._irrelevant_blockers.child_nodes(pkg))
6690 if blockers is not None:
6691 blockers = set(str(blocker.atom) \
6692 for blocker in blockers)
6694 # If this node has any blockers, create a "nomerge"
6695 # node for it so that they can be enforced.
6696 self.spinner.update()
6697 blocker_data = blocker_cache.get(cpv)
6698 if blocker_data is not None and \
6699 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6702 # If blocker data from the graph is available, use
6703 # it to validate the cache and update the cache if
6705 if blocker_data is not None and \
6706 blockers is not None:
6707 if not blockers.symmetric_difference(
6708 blocker_data.atoms):
6712 if blocker_data is None and \
6713 blockers is not None:
6714 # Re-use the blockers from the graph.
6715 blocker_atoms = sorted(blockers)
6716 counter = long(pkg.metadata["COUNTER"])
6718 blocker_cache.BlockerData(counter, blocker_atoms)
6719 blocker_cache[pkg.cpv] = blocker_data
6723 blocker_atoms = blocker_data.atoms
6725 # Use aux_get() to trigger FakeVartree global
6726 # updates on *DEPEND when appropriate.
6727 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6728 # It is crucial to pass in final_db here in order to
6729 # optimize dep_check calls by eliminating atoms via
6730 # dep_wordreduce and dep_eval calls.
6732 portage.dep._dep_check_strict = False
6734 success, atoms = portage.dep_check(depstr,
6735 final_db, pkgsettings, myuse=pkg.use.enabled,
6736 trees=self._graph_trees, myroot=myroot)
6737 except Exception, e:
6738 if isinstance(e, SystemExit):
6740 # This is helpful, for example, if a ValueError
6741 # is thrown from cpv_expand due to multiple
6742 # matches (this can happen if an atom lacks a
6744 show_invalid_depstring_notice(
6745 pkg, depstr, str(e))
6749 portage.dep._dep_check_strict = True
6751 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6752 if replacement_pkg and \
6753 replacement_pkg[0].operation == "merge":
6754 # This package is being replaced anyway, so
6755 # ignore invalid dependencies so as not to
6756 # annoy the user too much (otherwise they'd be
6757 # forced to manually unmerge it first).
6759 show_invalid_depstring_notice(pkg, depstr, atoms)
6761 blocker_atoms = [myatom for myatom in atoms \
6762 if myatom.startswith("!")]
6763 blocker_atoms.sort()
6764 counter = long(pkg.metadata["COUNTER"])
6765 blocker_cache[cpv] = \
6766 blocker_cache.BlockerData(counter, blocker_atoms)
6769 for atom in blocker_atoms:
6770 blocker = Blocker(atom=portage.dep.Atom(atom),
6771 eapi=pkg.metadata["EAPI"], root=myroot)
6772 self._blocker_parents.add(blocker, pkg)
6773 except portage.exception.InvalidAtom, e:
6774 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6775 show_invalid_depstring_notice(
6776 pkg, depstr, "Invalid Atom: %s" % (e,))
6778 for cpv in stale_cache:
6779 del blocker_cache[cpv]
6780 blocker_cache.flush()
6783 # Discard any "uninstall" tasks scheduled by previous calls
6784 # to this method, since those tasks may not make sense given
6785 # the current graph state.
6786 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6787 if previous_uninstall_tasks:
6788 self._blocker_uninstalls = digraph()
6789 self.digraph.difference_update(previous_uninstall_tasks)
6791 for blocker in self._blocker_parents.leaf_nodes():
6792 self.spinner.update()
6793 root_config = self.roots[blocker.root]
6794 virtuals = root_config.settings.getvirtuals()
6795 myroot = blocker.root
6796 initial_db = self.trees[myroot]["vartree"].dbapi
6797 final_db = self.mydbapi[myroot]
6799 provider_virtual = False
6800 if blocker.cp in virtuals and \
6801 not self._have_new_virt(blocker.root, blocker.cp):
6802 provider_virtual = True
6804 # Use this to check PROVIDE for each matched package
6806 atom_set = InternalPackageSet(
6807 initial_atoms=[blocker.atom])
6809 if provider_virtual:
6811 for provider_entry in virtuals[blocker.cp]:
6813 portage.dep_getkey(provider_entry)
6814 atoms.append(blocker.atom.replace(
6815 blocker.cp, provider_cp))
6817 atoms = [blocker.atom]
6819 blocked_initial = set()
6821 for pkg in initial_db.match_pkgs(atom):
6822 if atom_set.findAtomForPackage(pkg):
6823 blocked_initial.add(pkg)
6825 blocked_final = set()
6827 for pkg in final_db.match_pkgs(atom):
6828 if atom_set.findAtomForPackage(pkg):
6829 blocked_final.add(pkg)
6831 if not blocked_initial and not blocked_final:
6832 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6833 self._blocker_parents.remove(blocker)
6834 # Discard any parents that don't have any more blockers.
6835 for pkg in parent_pkgs:
6836 self._irrelevant_blockers.add(blocker, pkg)
6837 if not self._blocker_parents.child_nodes(pkg):
6838 self._blocker_parents.remove(pkg)
6840 for parent in self._blocker_parents.parent_nodes(blocker):
6841 unresolved_blocks = False
6842 depends_on_order = set()
6843 for pkg in blocked_initial:
6844 if pkg.slot_atom == parent.slot_atom:
6845 # TODO: Support blocks within slots in cases where it
6846 # might make sense. For example, a new version might
6847 # require that the old version be uninstalled at build
6850 if parent.installed:
6851 # Two currently installed packages conflict with
6852 # eachother. Ignore this case since the damage
6853 # is already done and this would be likely to
6854 # confuse users if displayed like a normal blocker.
6857 self._blocked_pkgs.add(pkg, blocker)
6859 if parent.operation == "merge":
6860 # Maybe the blocked package can be replaced or simply
6861 # unmerged to resolve this block.
6862 depends_on_order.add((pkg, parent))
6864 # None of the above blocker resolutions techniques apply,
6865 # so apparently this one is unresolvable.
6866 unresolved_blocks = True
6867 for pkg in blocked_final:
6868 if pkg.slot_atom == parent.slot_atom:
6869 # TODO: Support blocks within slots.
6871 if parent.operation == "nomerge" and \
6872 pkg.operation == "nomerge":
6873 # This blocker will be handled the next time that a
6874 # merge of either package is triggered.
6877 self._blocked_pkgs.add(pkg, blocker)
6879 # Maybe the blocking package can be
6880 # unmerged to resolve this block.
6881 if parent.operation == "merge" and pkg.installed:
6882 depends_on_order.add((pkg, parent))
6884 elif parent.operation == "nomerge":
6885 depends_on_order.add((parent, pkg))
6887 # None of the above blocker resolutions techniques apply,
6888 # so apparently this one is unresolvable.
6889 unresolved_blocks = True
6891 # Make sure we don't unmerge any package that have been pulled
6893 if not unresolved_blocks and depends_on_order:
6894 for inst_pkg, inst_task in depends_on_order:
6895 if self.digraph.contains(inst_pkg) and \
6896 self.digraph.parent_nodes(inst_pkg):
6897 unresolved_blocks = True
6900 if not unresolved_blocks and depends_on_order:
6901 for inst_pkg, inst_task in depends_on_order:
6902 uninst_task = Package(built=inst_pkg.built,
6903 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6904 metadata=inst_pkg.metadata,
6905 operation="uninstall",
6906 root_config=inst_pkg.root_config,
6907 type_name=inst_pkg.type_name)
6908 self._pkg_cache[uninst_task] = uninst_task
6909 # Enforce correct merge order with a hard dep.
6910 self.digraph.addnode(uninst_task, inst_task,
6911 priority=BlockerDepPriority.instance)
6912 # Count references to this blocker so that it can be
6913 # invalidated after nodes referencing it have been
6915 self._blocker_uninstalls.addnode(uninst_task, blocker)
6916 if not unresolved_blocks and not depends_on_order:
6917 self._irrelevant_blockers.add(blocker, parent)
6918 self._blocker_parents.remove_edge(blocker, parent)
6919 if not self._blocker_parents.parent_nodes(blocker):
6920 self._blocker_parents.remove(blocker)
6921 if not self._blocker_parents.child_nodes(parent):
6922 self._blocker_parents.remove(parent)
6923 if unresolved_blocks:
6924 self._unsolvable_blockers.add(blocker, parent)
6928 def _accept_blocker_conflicts(self):
6930 for x in ("--buildpkgonly", "--fetchonly",
6931 "--fetch-all-uri", "--nodeps"):
6932 if x in self.myopts:
6937 def _merge_order_bias(self, mygraph):
6939 For optimal leaf node selection, promote deep system runtime deps and
6940 order nodes from highest to lowest overall reference count.
6944 for node in mygraph.order:
6945 node_info[node] = len(mygraph.parent_nodes(node))
6946 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6948 def cmp_merge_preference(node1, node2):
6950 if node1.operation == 'uninstall':
6951 if node2.operation == 'uninstall':
6955 if node2.operation == 'uninstall':
6956 if node1.operation == 'uninstall':
6960 node1_sys = node1 in deep_system_deps
6961 node2_sys = node2 in deep_system_deps
6962 if node1_sys != node2_sys:
6967 return node_info[node2] - node_info[node1]
6969 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6971 def altlist(self, reversed=False):
6973 while self._serialized_tasks_cache is None:
6974 self._resolve_conflicts()
6976 self._serialized_tasks_cache, self._scheduler_graph = \
6977 self._serialize_tasks()
6978 except self._serialize_tasks_retry:
6981 retlist = self._serialized_tasks_cache[:]
6986 def schedulerGraph(self):
6988 The scheduler graph is identical to the normal one except that
6989 uninstall edges are reversed in specific cases that require
6990 conflicting packages to be temporarily installed simultaneously.
6991 This is intended for use by the Scheduler in it's parallelization
6992 logic. It ensures that temporary simultaneous installation of
6993 conflicting packages is avoided when appropriate (especially for
6994 !!atom blockers), but allowed in specific cases that require it.
6996 Note that this method calls break_refs() which alters the state of
6997 internal Package instances such that this depgraph instance should
6998 not be used to perform any more calculations.
7000 if self._scheduler_graph is None:
7002 self.break_refs(self._scheduler_graph.order)
7003 return self._scheduler_graph
7005 def break_refs(self, nodes):
7007 Take a mergelist like that returned from self.altlist() and
7008 break any references that lead back to the depgraph. This is
7009 useful if you want to hold references to packages without
7010 also holding the depgraph on the heap.
7013 if hasattr(node, "root_config"):
7014 # The FakeVartree references the _package_cache which
7015 # references the depgraph. So that Package instances don't
7016 # hold the depgraph and FakeVartree on the heap, replace
7017 # the RootConfig that references the FakeVartree with the
7018 # original RootConfig instance which references the actual
7020 node.root_config = \
7021 self._trees_orig[node.root_config.root]["root_config"]
7023 def _resolve_conflicts(self):
7024 if not self._complete_graph():
7025 raise self._unknown_internal_error()
7027 if not self.validate_blockers():
7028 raise self._unknown_internal_error()
7030 if self._slot_collision_info:
7031 self._process_slot_conflicts()
7033 def _serialize_tasks(self):
7035 if "--debug" in self.myopts:
7036 writemsg("\ndigraph:\n\n", noiselevel=-1)
7037 self.digraph.debug_print()
7038 writemsg("\n", noiselevel=-1)
7040 scheduler_graph = self.digraph.copy()
7041 mygraph=self.digraph.copy()
7042 # Prune "nomerge" root nodes if nothing depends on them, since
7043 # otherwise they slow down merge order calculation. Don't remove
7044 # non-root nodes since they help optimize merge order in some cases
7045 # such as revdep-rebuild.
7046 removed_nodes = set()
7048 for node in mygraph.root_nodes():
7049 if not isinstance(node, Package) or \
7050 node.installed or node.onlydeps:
7051 removed_nodes.add(node)
7053 self.spinner.update()
7054 mygraph.difference_update(removed_nodes)
7055 if not removed_nodes:
7057 removed_nodes.clear()
7058 self._merge_order_bias(mygraph)
7059 def cmp_circular_bias(n1, n2):
7061 RDEPEND is stronger than PDEPEND and this function
7062 measures such a strength bias within a circular
7063 dependency relationship.
7065 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7066 ignore_priority=priority_range.ignore_medium_soft)
7067 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7068 ignore_priority=priority_range.ignore_medium_soft)
7069 if n1_n2_medium == n2_n1_medium:
7074 myblocker_uninstalls = self._blocker_uninstalls.copy()
7076 # Contains uninstall tasks that have been scheduled to
7077 # occur after overlapping blockers have been installed.
7078 scheduled_uninstalls = set()
7079 # Contains any Uninstall tasks that have been ignored
7080 # in order to avoid the circular deps code path. These
7081 # correspond to blocker conflicts that could not be
7083 ignored_uninstall_tasks = set()
7084 have_uninstall_task = False
7085 complete = "complete" in self.myparams
7088 def get_nodes(**kwargs):
7090 Returns leaf nodes excluding Uninstall instances
7091 since those should be executed as late as possible.
7093 return [node for node in mygraph.leaf_nodes(**kwargs) \
7094 if isinstance(node, Package) and \
7095 (node.operation != "uninstall" or \
7096 node in scheduled_uninstalls)]
7098 # sys-apps/portage needs special treatment if ROOT="/"
7099 running_root = self._running_root.root
7100 from portage.const import PORTAGE_PACKAGE_ATOM
7101 runtime_deps = InternalPackageSet(
7102 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7103 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7104 PORTAGE_PACKAGE_ATOM)
7105 replacement_portage = self.mydbapi[running_root].match_pkgs(
7106 PORTAGE_PACKAGE_ATOM)
7109 running_portage = running_portage[0]
7111 running_portage = None
7113 if replacement_portage:
7114 replacement_portage = replacement_portage[0]
7116 replacement_portage = None
7118 if replacement_portage == running_portage:
7119 replacement_portage = None
7121 if replacement_portage is not None:
7122 # update from running_portage to replacement_portage asap
7123 asap_nodes.append(replacement_portage)
7125 if running_portage is not None:
7127 portage_rdepend = self._select_atoms_highest_available(
7128 running_root, running_portage.metadata["RDEPEND"],
7129 myuse=running_portage.use.enabled,
7130 parent=running_portage, strict=False)
7131 except portage.exception.InvalidDependString, e:
7132 portage.writemsg("!!! Invalid RDEPEND in " + \
7133 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7134 (running_root, running_portage.cpv, e), noiselevel=-1)
7136 portage_rdepend = []
7137 runtime_deps.update(atom for atom in portage_rdepend \
7138 if not atom.startswith("!"))
7140 def gather_deps(ignore_priority, mergeable_nodes,
7141 selected_nodes, node):
7143 Recursively gather a group of nodes that RDEPEND on
7144 eachother. This ensures that they are merged as a group
7145 and get their RDEPENDs satisfied as soon as possible.
7147 if node in selected_nodes:
7149 if node not in mergeable_nodes:
7151 if node == replacement_portage and \
7152 mygraph.child_nodes(node,
7153 ignore_priority=priority_range.ignore_medium_soft):
7154 # Make sure that portage always has all of it's
7155 # RDEPENDs installed first.
7157 selected_nodes.add(node)
7158 for child in mygraph.child_nodes(node,
7159 ignore_priority=ignore_priority):
7160 if not gather_deps(ignore_priority,
7161 mergeable_nodes, selected_nodes, child):
7165 def ignore_uninst_or_med(priority):
7166 if priority is BlockerDepPriority.instance:
7168 return priority_range.ignore_medium(priority)
7170 def ignore_uninst_or_med_soft(priority):
7171 if priority is BlockerDepPriority.instance:
7173 return priority_range.ignore_medium_soft(priority)
7175 tree_mode = "--tree" in self.myopts
7176 # Tracks whether or not the current iteration should prefer asap_nodes
7177 # if available. This is set to False when the previous iteration
7178 # failed to select any nodes. It is reset whenever nodes are
7179 # successfully selected.
7182 # Controls whether or not the current iteration should drop edges that
7183 # are "satisfied" by installed packages, in order to solve circular
7184 # dependencies. The deep runtime dependencies of installed packages are
7185 # not checked in this case (bug #199856), so it must be avoided
7186 # whenever possible.
7187 drop_satisfied = False
7189 # State of variables for successive iterations that loosen the
7190 # criteria for node selection.
7192 # iteration prefer_asap drop_satisfied
7197 # If no nodes are selected on the last iteration, it is due to
7198 # unresolved blockers or circular dependencies.
7200 while not mygraph.empty():
7201 self.spinner.update()
7202 selected_nodes = None
7203 ignore_priority = None
7204 if drop_satisfied or (prefer_asap and asap_nodes):
7205 priority_range = DepPrioritySatisfiedRange
7207 priority_range = DepPriorityNormalRange
7208 if prefer_asap and asap_nodes:
7209 # ASAP nodes are merged before their soft deps. Go ahead and
7210 # select root nodes here if necessary, since it's typical for
7211 # the parent to have been removed from the graph already.
7212 asap_nodes = [node for node in asap_nodes \
7213 if mygraph.contains(node)]
7214 for node in asap_nodes:
7215 if not mygraph.child_nodes(node,
7216 ignore_priority=priority_range.ignore_soft):
7217 selected_nodes = [node]
7218 asap_nodes.remove(node)
7220 if not selected_nodes and \
7221 not (prefer_asap and asap_nodes):
7222 for i in xrange(priority_range.NONE,
7223 priority_range.MEDIUM_SOFT + 1):
7224 ignore_priority = priority_range.ignore_priority[i]
7225 nodes = get_nodes(ignore_priority=ignore_priority)
7227 # If there is a mix of uninstall nodes with other
7228 # types, save the uninstall nodes for later since
7229 # sometimes a merge node will render an uninstall
7230 # node unnecessary (due to occupying the same slot),
7231 # and we want to avoid executing a separate uninstall
7232 # task in that case.
7234 good_uninstalls = []
7235 with_some_uninstalls_excluded = []
7237 if node.operation == "uninstall":
7238 slot_node = self.mydbapi[node.root
7239 ].match_pkgs(node.slot_atom)
7241 slot_node[0].operation == "merge":
7243 good_uninstalls.append(node)
7244 with_some_uninstalls_excluded.append(node)
7246 nodes = good_uninstalls
7247 elif with_some_uninstalls_excluded:
7248 nodes = with_some_uninstalls_excluded
7252 if ignore_priority is None and not tree_mode:
7253 # Greedily pop all of these nodes since no
7254 # relationship has been ignored. This optimization
7255 # destroys --tree output, so it's disabled in tree
7257 selected_nodes = nodes
7259 # For optimal merge order:
7260 # * Only pop one node.
7261 # * Removing a root node (node without a parent)
7262 # will not produce a leaf node, so avoid it.
7263 # * It's normal for a selected uninstall to be a
7264 # root node, so don't check them for parents.
7266 if node.operation == "uninstall" or \
7267 mygraph.parent_nodes(node):
7268 selected_nodes = [node]
7274 if not selected_nodes:
7275 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7277 mergeable_nodes = set(nodes)
7278 if prefer_asap and asap_nodes:
7280 for i in xrange(priority_range.SOFT,
7281 priority_range.MEDIUM_SOFT + 1):
7282 ignore_priority = priority_range.ignore_priority[i]
7284 if not mygraph.parent_nodes(node):
7286 selected_nodes = set()
7287 if gather_deps(ignore_priority,
7288 mergeable_nodes, selected_nodes, node):
7291 selected_nodes = None
7295 if prefer_asap and asap_nodes and not selected_nodes:
7296 # We failed to find any asap nodes to merge, so ignore
7297 # them for the next iteration.
7301 if selected_nodes and ignore_priority is not None:
7302 # Try to merge ignored medium_soft deps as soon as possible
7303 # if they're not satisfied by installed packages.
7304 for node in selected_nodes:
7305 children = set(mygraph.child_nodes(node))
7306 soft = children.difference(
7307 mygraph.child_nodes(node,
7308 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7309 medium_soft = children.difference(
7310 mygraph.child_nodes(node,
7312 DepPrioritySatisfiedRange.ignore_medium_soft))
7313 medium_soft.difference_update(soft)
7314 for child in medium_soft:
7315 if child in selected_nodes:
7317 if child in asap_nodes:
7319 asap_nodes.append(child)
7321 if selected_nodes and len(selected_nodes) > 1:
7322 if not isinstance(selected_nodes, list):
7323 selected_nodes = list(selected_nodes)
7324 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7326 if not selected_nodes and not myblocker_uninstalls.is_empty():
7327 # An Uninstall task needs to be executed in order to
7328 # avoid conflict if possible.
7331 priority_range = DepPrioritySatisfiedRange
7333 priority_range = DepPriorityNormalRange
7335 mergeable_nodes = get_nodes(
7336 ignore_priority=ignore_uninst_or_med)
7338 min_parent_deps = None
7340 for task in myblocker_uninstalls.leaf_nodes():
7341 # Do some sanity checks so that system or world packages
7342 # don't get uninstalled inappropriately here (only really
7343 # necessary when --complete-graph has not been enabled).
7345 if task in ignored_uninstall_tasks:
7348 if task in scheduled_uninstalls:
7349 # It's been scheduled but it hasn't
7350 # been executed yet due to dependence
7351 # on installation of blocking packages.
7354 root_config = self.roots[task.root]
7355 inst_pkg = self._pkg_cache[
7356 ("installed", task.root, task.cpv, "nomerge")]
7358 if self.digraph.contains(inst_pkg):
7361 forbid_overlap = False
7362 heuristic_overlap = False
7363 for blocker in myblocker_uninstalls.parent_nodes(task):
7364 if blocker.eapi in ("0", "1"):
7365 heuristic_overlap = True
7366 elif blocker.atom.blocker.overlap.forbid:
7367 forbid_overlap = True
7369 if forbid_overlap and running_root == task.root:
7372 if heuristic_overlap and running_root == task.root:
7373 # Never uninstall sys-apps/portage or it's essential
7374 # dependencies, except through replacement.
7376 runtime_dep_atoms = \
7377 list(runtime_deps.iterAtomsForPackage(task))
7378 except portage.exception.InvalidDependString, e:
7379 portage.writemsg("!!! Invalid PROVIDE in " + \
7380 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7381 (task.root, task.cpv, e), noiselevel=-1)
7385 # Don't uninstall a runtime dep if it appears
7386 # to be the only suitable one installed.
7388 vardb = root_config.trees["vartree"].dbapi
7389 for atom in runtime_dep_atoms:
7390 other_version = None
7391 for pkg in vardb.match_pkgs(atom):
7392 if pkg.cpv == task.cpv and \
7393 pkg.metadata["COUNTER"] == \
7394 task.metadata["COUNTER"]:
7398 if other_version is None:
7404 # For packages in the system set, don't take
7405 # any chances. If the conflict can't be resolved
7406 # by a normal replacement operation then abort.
7409 for atom in root_config.sets[
7410 "system"].iterAtomsForPackage(task):
7413 except portage.exception.InvalidDependString, e:
7414 portage.writemsg("!!! Invalid PROVIDE in " + \
7415 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7416 (task.root, task.cpv, e), noiselevel=-1)
7422 # Note that the world check isn't always
7423 # necessary since self._complete_graph() will
7424 # add all packages from the system and world sets to the
7425 # graph. This just allows unresolved conflicts to be
7426 # detected as early as possible, which makes it possible
7427 # to avoid calling self._complete_graph() when it is
7428 # unnecessary due to blockers triggering an abortion.
7430 # For packages in the world set, go ahead an uninstall
7431 # when necessary, as long as the atom will be satisfied
7432 # in the final state.
7433 graph_db = self.mydbapi[task.root]
7436 for atom in root_config.sets[
7437 "world"].iterAtomsForPackage(task):
7439 for pkg in graph_db.match_pkgs(atom):
7446 self._blocked_world_pkgs[inst_pkg] = atom
7448 except portage.exception.InvalidDependString, e:
7449 portage.writemsg("!!! Invalid PROVIDE in " + \
7450 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7451 (task.root, task.cpv, e), noiselevel=-1)
7457 # Check the deps of parent nodes to ensure that
7458 # the chosen task produces a leaf node. Maybe
7459 # this can be optimized some more to make the
7460 # best possible choice, but the current algorithm
7461 # is simple and should be near optimal for most
7463 mergeable_parent = False
7465 for parent in mygraph.parent_nodes(task):
7466 parent_deps.update(mygraph.child_nodes(parent,
7467 ignore_priority=priority_range.ignore_medium_soft))
7468 if parent in mergeable_nodes and \
7469 gather_deps(ignore_uninst_or_med_soft,
7470 mergeable_nodes, set(), parent):
7471 mergeable_parent = True
7473 if not mergeable_parent:
7476 parent_deps.remove(task)
7477 if min_parent_deps is None or \
7478 len(parent_deps) < min_parent_deps:
7479 min_parent_deps = len(parent_deps)
7482 if uninst_task is not None:
7483 # The uninstall is performed only after blocking
7484 # packages have been merged on top of it. File
7485 # collisions between blocking packages are detected
7486 # and removed from the list of files to be uninstalled.
7487 scheduled_uninstalls.add(uninst_task)
7488 parent_nodes = mygraph.parent_nodes(uninst_task)
7490 # Reverse the parent -> uninstall edges since we want
7491 # to do the uninstall after blocking packages have
7492 # been merged on top of it.
7493 mygraph.remove(uninst_task)
7494 for blocked_pkg in parent_nodes:
7495 mygraph.add(blocked_pkg, uninst_task,
7496 priority=BlockerDepPriority.instance)
7497 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7498 scheduler_graph.add(blocked_pkg, uninst_task,
7499 priority=BlockerDepPriority.instance)
7501 # Reset the state variables for leaf node selection and
7502 # continue trying to select leaf nodes.
7504 drop_satisfied = False
7507 if not selected_nodes:
7508 # Only select root nodes as a last resort. This case should
7509 # only trigger when the graph is nearly empty and the only
7510 # remaining nodes are isolated (no parents or children). Since
7511 # the nodes must be isolated, ignore_priority is not needed.
7512 selected_nodes = get_nodes()
7514 if not selected_nodes and not drop_satisfied:
7515 drop_satisfied = True
7518 if not selected_nodes and not myblocker_uninstalls.is_empty():
7519 # If possible, drop an uninstall task here in order to avoid
7520 # the circular deps code path. The corresponding blocker will
7521 # still be counted as an unresolved conflict.
7523 for node in myblocker_uninstalls.leaf_nodes():
7525 mygraph.remove(node)
7530 ignored_uninstall_tasks.add(node)
7533 if uninst_task is not None:
7534 # Reset the state variables for leaf node selection and
7535 # continue trying to select leaf nodes.
7537 drop_satisfied = False
7540 if not selected_nodes:
7541 self._circular_deps_for_display = mygraph
7542 raise self._unknown_internal_error()
7544 # At this point, we've succeeded in selecting one or more nodes, so
7545 # reset state variables for leaf node selection.
7547 drop_satisfied = False
7549 mygraph.difference_update(selected_nodes)
7551 for node in selected_nodes:
7552 if isinstance(node, Package) and \
7553 node.operation == "nomerge":
7556 # Handle interactions between blockers
7557 # and uninstallation tasks.
7558 solved_blockers = set()
7560 if isinstance(node, Package) and \
7561 "uninstall" == node.operation:
7562 have_uninstall_task = True
7565 vardb = self.trees[node.root]["vartree"].dbapi
7566 previous_cpv = vardb.match(node.slot_atom)
7568 # The package will be replaced by this one, so remove
7569 # the corresponding Uninstall task if necessary.
7570 previous_cpv = previous_cpv[0]
7572 ("installed", node.root, previous_cpv, "uninstall")
7574 mygraph.remove(uninst_task)
7578 if uninst_task is not None and \
7579 uninst_task not in ignored_uninstall_tasks and \
7580 myblocker_uninstalls.contains(uninst_task):
7581 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7582 myblocker_uninstalls.remove(uninst_task)
7583 # Discard any blockers that this Uninstall solves.
7584 for blocker in blocker_nodes:
7585 if not myblocker_uninstalls.child_nodes(blocker):
7586 myblocker_uninstalls.remove(blocker)
7587 solved_blockers.add(blocker)
7589 retlist.append(node)
7591 if (isinstance(node, Package) and \
7592 "uninstall" == node.operation) or \
7593 (uninst_task is not None and \
7594 uninst_task in scheduled_uninstalls):
7595 # Include satisfied blockers in the merge list
7596 # since the user might be interested and also
7597 # it serves as an indicator that blocking packages
7598 # will be temporarily installed simultaneously.
7599 for blocker in solved_blockers:
7600 retlist.append(Blocker(atom=blocker.atom,
7601 root=blocker.root, eapi=blocker.eapi,
7604 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7605 for node in myblocker_uninstalls.root_nodes():
7606 unsolvable_blockers.add(node)
7608 for blocker in unsolvable_blockers:
7609 retlist.append(blocker)
7611 # If any Uninstall tasks need to be executed in order
7612 # to avoid a conflict, complete the graph with any
7613 # dependencies that may have been initially
7614 # neglected (to ensure that unsafe Uninstall tasks
7615 # are properly identified and blocked from execution).
7616 if have_uninstall_task and \
7618 not unsolvable_blockers:
7619 self.myparams.add("complete")
7620 raise self._serialize_tasks_retry("")
7622 if unsolvable_blockers and \
7623 not self._accept_blocker_conflicts():
7624 self._unsatisfied_blockers_for_display = unsolvable_blockers
7625 self._serialized_tasks_cache = retlist[:]
7626 self._scheduler_graph = scheduler_graph
7627 raise self._unknown_internal_error()
7629 if self._slot_collision_info and \
7630 not self._accept_blocker_conflicts():
7631 self._serialized_tasks_cache = retlist[:]
7632 self._scheduler_graph = scheduler_graph
7633 raise self._unknown_internal_error()
7635 return retlist, scheduler_graph
7637 def _show_circular_deps(self, mygraph):
7638 # No leaf nodes are available, so we have a circular
7639 # dependency panic situation. Reduce the noise level to a
7640 # minimum via repeated elimination of root nodes since they
7641 # have no parents and thus can not be part of a cycle.
7643 root_nodes = mygraph.root_nodes(
7644 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7647 mygraph.difference_update(root_nodes)
7648 # Display the USE flags that are enabled on nodes that are part
7649 # of dependency cycles in case that helps the user decide to
7650 # disable some of them.
7652 tempgraph = mygraph.copy()
7653 while not tempgraph.empty():
7654 nodes = tempgraph.leaf_nodes()
7656 node = tempgraph.order[0]
7659 display_order.append(node)
7660 tempgraph.remove(node)
7661 display_order.reverse()
7662 self.myopts.pop("--quiet", None)
7663 self.myopts.pop("--verbose", None)
7664 self.myopts["--tree"] = True
7665 portage.writemsg("\n\n", noiselevel=-1)
7666 self.display(display_order)
7667 prefix = colorize("BAD", " * ")
7668 portage.writemsg("\n", noiselevel=-1)
7669 portage.writemsg(prefix + "Error: circular dependencies:\n",
7671 portage.writemsg("\n", noiselevel=-1)
7672 mygraph.debug_print()
7673 portage.writemsg("\n", noiselevel=-1)
7674 portage.writemsg(prefix + "Note that circular dependencies " + \
7675 "can often be avoided by temporarily\n", noiselevel=-1)
7676 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7677 "optional dependencies.\n", noiselevel=-1)
7679 def _show_merge_list(self):
7680 if self._serialized_tasks_cache is not None and \
7681 not (self._displayed_list and \
7682 (self._displayed_list == self._serialized_tasks_cache or \
7683 self._displayed_list == \
7684 list(reversed(self._serialized_tasks_cache)))):
7685 display_list = self._serialized_tasks_cache[:]
7686 if "--tree" in self.myopts:
7687 display_list.reverse()
7688 self.display(display_list)
7690 def _show_unsatisfied_blockers(self, blockers):
7691 self._show_merge_list()
7692 msg = "Error: The above package list contains " + \
7693 "packages which cannot be installed " + \
7694 "at the same time on the same system."
7695 prefix = colorize("BAD", " * ")
7696 from textwrap import wrap
7697 portage.writemsg("\n", noiselevel=-1)
7698 for line in wrap(msg, 70):
7699 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7701 # Display the conflicting packages along with the packages
7702 # that pulled them in. This is helpful for troubleshooting
7703 # cases in which blockers don't solve automatically and
7704 # the reasons are not apparent from the normal merge list
7708 for blocker in blockers:
7709 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7710 self._blocker_parents.parent_nodes(blocker)):
7711 parent_atoms = self._parent_atoms.get(pkg)
7712 if not parent_atoms:
7713 atom = self._blocked_world_pkgs.get(pkg)
7714 if atom is not None:
7715 parent_atoms = set([("@world", atom)])
7717 conflict_pkgs[pkg] = parent_atoms
7720 # Reduce noise by pruning packages that are only
7721 # pulled in by other conflict packages.
7723 for pkg, parent_atoms in conflict_pkgs.iteritems():
7724 relevant_parent = False
7725 for parent, atom in parent_atoms:
7726 if parent not in conflict_pkgs:
7727 relevant_parent = True
7729 if not relevant_parent:
7730 pruned_pkgs.add(pkg)
7731 for pkg in pruned_pkgs:
7732 del conflict_pkgs[pkg]
7738 # Max number of parents shown, to avoid flooding the display.
7740 for pkg, parent_atoms in conflict_pkgs.iteritems():
7744 # Prefer packages that are not directly involved in a conflict.
7745 for parent_atom in parent_atoms:
7746 if len(pruned_list) >= max_parents:
7748 parent, atom = parent_atom
7749 if parent not in conflict_pkgs:
7750 pruned_list.add(parent_atom)
7752 for parent_atom in parent_atoms:
7753 if len(pruned_list) >= max_parents:
7755 pruned_list.add(parent_atom)
7757 omitted_parents = len(parent_atoms) - len(pruned_list)
7758 msg.append(indent + "%s pulled in by\n" % pkg)
7760 for parent_atom in pruned_list:
7761 parent, atom = parent_atom
7762 msg.append(2*indent)
7763 if isinstance(parent,
7764 (PackageArg, AtomArg)):
7765 # For PackageArg and AtomArg types, it's
7766 # redundant to display the atom attribute.
7767 msg.append(str(parent))
7769 # Display the specific atom from SetArg or
7771 msg.append("%s required by %s" % (atom, parent))
7775 msg.append(2*indent)
7776 msg.append("(and %d more)\n" % omitted_parents)
7780 sys.stderr.write("".join(msg))
7783 if "--quiet" not in self.myopts:
7784 show_blocker_docs_link()
7786 def display(self, mylist, favorites=[], verbosity=None):
7788 # This is used to prevent display_problems() from
7789 # redundantly displaying this exact same merge list
7790 # again via _show_merge_list().
7791 self._displayed_list = mylist
7793 if verbosity is None:
7794 verbosity = ("--quiet" in self.myopts and 1 or \
7795 "--verbose" in self.myopts and 3 or 2)
7796 favorites_set = InternalPackageSet(favorites)
7797 oneshot = "--oneshot" in self.myopts or \
7798 "--onlydeps" in self.myopts
7799 columns = "--columns" in self.myopts
7804 counters = PackageCounters()
7806 if verbosity == 1 and "--verbose" not in self.myopts:
7807 def create_use_string(*args):
7810 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7812 is_new, reinst_flags,
7813 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7814 alphabetical=("--alphabetical" in self.myopts)):
7822 cur_iuse = set(cur_iuse)
7823 enabled_flags = cur_iuse.intersection(cur_use)
7824 removed_iuse = set(old_iuse).difference(cur_iuse)
7825 any_iuse = cur_iuse.union(old_iuse)
7826 any_iuse = list(any_iuse)
7828 for flag in any_iuse:
7831 reinst_flag = reinst_flags and flag in reinst_flags
7832 if flag in enabled_flags:
7834 if is_new or flag in old_use and \
7835 (all_flags or reinst_flag):
7836 flag_str = red(flag)
7837 elif flag not in old_iuse:
7838 flag_str = yellow(flag) + "%*"
7839 elif flag not in old_use:
7840 flag_str = green(flag) + "*"
7841 elif flag in removed_iuse:
7842 if all_flags or reinst_flag:
7843 flag_str = yellow("-" + flag) + "%"
7846 flag_str = "(" + flag_str + ")"
7847 removed.append(flag_str)
7850 if is_new or flag in old_iuse and \
7851 flag not in old_use and \
7852 (all_flags or reinst_flag):
7853 flag_str = blue("-" + flag)
7854 elif flag not in old_iuse:
7855 flag_str = yellow("-" + flag)
7856 if flag not in iuse_forced:
7858 elif flag in old_use:
7859 flag_str = green("-" + flag) + "*"
7861 if flag in iuse_forced:
7862 flag_str = "(" + flag_str + ")"
7864 enabled.append(flag_str)
7866 disabled.append(flag_str)
7869 ret = " ".join(enabled)
7871 ret = " ".join(enabled + disabled + removed)
7873 ret = '%s="%s" ' % (name, ret)
7876 repo_display = RepoDisplay(self.roots)
7880 mygraph = self.digraph.copy()
7882 # If there are any Uninstall instances, add the corresponding
7883 # blockers to the digraph (useful for --tree display).
7885 executed_uninstalls = set(node for node in mylist \
7886 if isinstance(node, Package) and node.operation == "unmerge")
7888 for uninstall in self._blocker_uninstalls.leaf_nodes():
7889 uninstall_parents = \
7890 self._blocker_uninstalls.parent_nodes(uninstall)
7891 if not uninstall_parents:
7894 # Remove the corresponding "nomerge" node and substitute
7895 # the Uninstall node.
7896 inst_pkg = self._pkg_cache[
7897 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7899 mygraph.remove(inst_pkg)
7904 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7906 inst_pkg_blockers = []
7908 # Break the Package -> Uninstall edges.
7909 mygraph.remove(uninstall)
7911 # Resolution of a package's blockers
7912 # depend on it's own uninstallation.
7913 for blocker in inst_pkg_blockers:
7914 mygraph.add(uninstall, blocker)
7916 # Expand Package -> Uninstall edges into
7917 # Package -> Blocker -> Uninstall edges.
7918 for blocker in uninstall_parents:
7919 mygraph.add(uninstall, blocker)
7920 for parent in self._blocker_parents.parent_nodes(blocker):
7921 if parent != inst_pkg:
7922 mygraph.add(blocker, parent)
7924 # If the uninstall task did not need to be executed because
7925 # of an upgrade, display Blocker -> Upgrade edges since the
7926 # corresponding Blocker -> Uninstall edges will not be shown.
7928 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7929 if upgrade_node is not None and \
7930 uninstall not in executed_uninstalls:
7931 for blocker in uninstall_parents:
7932 mygraph.add(upgrade_node, blocker)
7934 unsatisfied_blockers = []
7939 if isinstance(x, Blocker) and not x.satisfied:
7940 unsatisfied_blockers.append(x)
7943 if "--tree" in self.myopts:
7944 depth = len(tree_nodes)
7945 while depth and graph_key not in \
7946 mygraph.child_nodes(tree_nodes[depth-1]):
7949 tree_nodes = tree_nodes[:depth]
7950 tree_nodes.append(graph_key)
7951 display_list.append((x, depth, True))
7952 shown_edges.add((graph_key, tree_nodes[depth-1]))
7954 traversed_nodes = set() # prevent endless circles
7955 traversed_nodes.add(graph_key)
7956 def add_parents(current_node, ordered):
7958 # Do not traverse to parents if this node is an
7959 # an argument or a direct member of a set that has
7960 # been specified as an argument (system or world).
7961 if current_node not in self._set_nodes:
7962 parent_nodes = mygraph.parent_nodes(current_node)
7964 child_nodes = set(mygraph.child_nodes(current_node))
7965 selected_parent = None
7966 # First, try to avoid a direct cycle.
7967 for node in parent_nodes:
7968 if not isinstance(node, (Blocker, Package)):
7970 if node not in traversed_nodes and \
7971 node not in child_nodes:
7972 edge = (current_node, node)
7973 if edge in shown_edges:
7975 selected_parent = node
7977 if not selected_parent:
7978 # A direct cycle is unavoidable.
7979 for node in parent_nodes:
7980 if not isinstance(node, (Blocker, Package)):
7982 if node not in traversed_nodes:
7983 edge = (current_node, node)
7984 if edge in shown_edges:
7986 selected_parent = node
7989 shown_edges.add((current_node, selected_parent))
7990 traversed_nodes.add(selected_parent)
7991 add_parents(selected_parent, False)
7992 display_list.append((current_node,
7993 len(tree_nodes), ordered))
7994 tree_nodes.append(current_node)
7996 add_parents(graph_key, True)
7998 display_list.append((x, depth, True))
7999 mylist = display_list
8000 for x in unsatisfied_blockers:
8001 mylist.append((x, 0, True))
8003 last_merge_depth = 0
8004 for i in xrange(len(mylist)-1,-1,-1):
8005 graph_key, depth, ordered = mylist[i]
8006 if not ordered and depth == 0 and i > 0 \
8007 and graph_key == mylist[i-1][0] and \
8008 mylist[i-1][1] == 0:
8009 # An ordered node got a consecutive duplicate when the tree was
8013 if ordered and graph_key[-1] != "nomerge":
8014 last_merge_depth = depth
8016 if depth >= last_merge_depth or \
8017 i < len(mylist) - 1 and \
8018 depth >= mylist[i+1][1]:
8021 from portage import flatten
8022 from portage.dep import use_reduce, paren_reduce
8023 # files to fetch list - avoids counting a same file twice
8024 # in size display (verbose mode)
8027 # Use this set to detect when all the "repoadd" strings are "[0]"
8028 # and disable the entire repo display in this case.
8031 for mylist_index in xrange(len(mylist)):
8032 x, depth, ordered = mylist[mylist_index]
8036 portdb = self.trees[myroot]["porttree"].dbapi
8037 bindb = self.trees[myroot]["bintree"].dbapi
8038 vardb = self.trees[myroot]["vartree"].dbapi
8039 vartree = self.trees[myroot]["vartree"]
8040 pkgsettings = self.pkgsettings[myroot]
8043 indent = " " * depth
8045 if isinstance(x, Blocker):
8047 blocker_style = "PKG_BLOCKER_SATISFIED"
8048 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8050 blocker_style = "PKG_BLOCKER"
8051 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8053 counters.blocks += 1
8055 counters.blocks_satisfied += 1
8056 resolved = portage.key_expand(
8057 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8058 if "--columns" in self.myopts and "--quiet" in self.myopts:
8059 addl += " " + colorize(blocker_style, resolved)
8061 addl = "[%s %s] %s%s" % \
8062 (colorize(blocker_style, "blocks"),
8063 addl, indent, colorize(blocker_style, resolved))
8064 block_parents = self._blocker_parents.parent_nodes(x)
8065 block_parents = set([pnode[2] for pnode in block_parents])
8066 block_parents = ", ".join(block_parents)
8068 addl += colorize(blocker_style,
8069 " (\"%s\" is blocking %s)") % \
8070 (str(x.atom).lstrip("!"), block_parents)
8072 addl += colorize(blocker_style,
8073 " (is blocking %s)") % block_parents
8074 if isinstance(x, Blocker) and x.satisfied:
8079 blockers.append(addl)
8082 pkg_merge = ordered and pkg_status == "merge"
8083 if not pkg_merge and pkg_status == "merge":
8084 pkg_status = "nomerge"
8085 built = pkg_type != "ebuild"
8086 installed = pkg_type == "installed"
8088 metadata = pkg.metadata
8090 repo_name = metadata["repository"]
8091 if pkg_type == "ebuild":
8092 ebuild_path = portdb.findname(pkg_key)
8093 if not ebuild_path: # shouldn't happen
8094 raise portage.exception.PackageNotFound(pkg_key)
8095 repo_path_real = os.path.dirname(os.path.dirname(
8096 os.path.dirname(ebuild_path)))
8098 repo_path_real = portdb.getRepositoryPath(repo_name)
8099 pkg_use = list(pkg.use.enabled)
8101 restrict = flatten(use_reduce(paren_reduce(
8102 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8103 except portage.exception.InvalidDependString, e:
8104 if not pkg.installed:
8105 show_invalid_depstring_notice(x,
8106 pkg.metadata["RESTRICT"], str(e))
8110 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8111 "fetch" in restrict:
8114 counters.restrict_fetch += 1
8115 if portdb.fetch_check(pkg_key, pkg_use):
8118 counters.restrict_fetch_satisfied += 1
8120 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8121 #param is used for -u, where you still *do* want to see when something is being upgraded.
8124 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8125 if vardb.cpv_exists(pkg_key):
8126 addl=" "+yellow("R")+fetch+" "
8129 counters.reinst += 1
8130 elif pkg_status == "uninstall":
8131 counters.uninst += 1
8132 # filter out old-style virtual matches
8133 elif installed_versions and \
8134 portage.cpv_getkey(installed_versions[0]) == \
8135 portage.cpv_getkey(pkg_key):
8136 myinslotlist = vardb.match(pkg.slot_atom)
8137 # If this is the first install of a new-style virtual, we
8138 # need to filter out old-style virtual matches.
8139 if myinslotlist and \
8140 portage.cpv_getkey(myinslotlist[0]) != \
8141 portage.cpv_getkey(pkg_key):
8144 myoldbest = myinslotlist[:]
8146 if not portage.dep.cpvequal(pkg_key,
8147 portage.best([pkg_key] + myoldbest)):
8149 addl += turquoise("U")+blue("D")
8151 counters.downgrades += 1
8154 addl += turquoise("U") + " "
8156 counters.upgrades += 1
8158 # New slot, mark it new.
8159 addl = " " + green("NS") + fetch + " "
8160 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8162 counters.newslot += 1
8164 if "--changelog" in self.myopts:
8165 inst_matches = vardb.match(pkg.slot_atom)
8167 changelogs.extend(self.calc_changelog(
8168 portdb.findname(pkg_key),
8169 inst_matches[0], pkg_key))
8171 addl = " " + green("N") + " " + fetch + " "
8180 forced_flags = set()
8181 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8182 forced_flags.update(pkgsettings.useforce)
8183 forced_flags.update(pkgsettings.usemask)
8185 cur_use = [flag for flag in pkg.use.enabled \
8186 if flag in pkg.iuse.all]
8187 cur_iuse = sorted(pkg.iuse.all)
8189 if myoldbest and myinslotlist:
8190 previous_cpv = myoldbest[0]
8192 previous_cpv = pkg.cpv
8193 if vardb.cpv_exists(previous_cpv):
8194 old_iuse, old_use = vardb.aux_get(
8195 previous_cpv, ["IUSE", "USE"])
8196 old_iuse = list(set(
8197 filter_iuse_defaults(old_iuse.split())))
8199 old_use = old_use.split()
8206 old_use = [flag for flag in old_use if flag in old_iuse]
8208 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8210 use_expand.reverse()
8211 use_expand_hidden = \
8212 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8214 def map_to_use_expand(myvals, forcedFlags=False,
8218 for exp in use_expand:
8221 for val in myvals[:]:
8222 if val.startswith(exp.lower()+"_"):
8223 if val in forced_flags:
8224 forced[exp].add(val[len(exp)+1:])
8225 ret[exp].append(val[len(exp)+1:])
8228 forced["USE"] = [val for val in myvals \
8229 if val in forced_flags]
8231 for exp in use_expand_hidden:
8237 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8238 # are the only thing that triggered reinstallation.
8239 reinst_flags_map = {}
8240 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8241 reinst_expand_map = None
8242 if reinstall_for_flags:
8243 reinst_flags_map = map_to_use_expand(
8244 list(reinstall_for_flags), removeHidden=False)
8245 for k in list(reinst_flags_map):
8246 if not reinst_flags_map[k]:
8247 del reinst_flags_map[k]
8248 if not reinst_flags_map.get("USE"):
8249 reinst_expand_map = reinst_flags_map.copy()
8250 reinst_expand_map.pop("USE", None)
8251 if reinst_expand_map and \
8252 not set(reinst_expand_map).difference(
8254 use_expand_hidden = \
8255 set(use_expand_hidden).difference(
8258 cur_iuse_map, iuse_forced = \
8259 map_to_use_expand(cur_iuse, forcedFlags=True)
8260 cur_use_map = map_to_use_expand(cur_use)
8261 old_iuse_map = map_to_use_expand(old_iuse)
8262 old_use_map = map_to_use_expand(old_use)
8265 use_expand.insert(0, "USE")
8267 for key in use_expand:
8268 if key in use_expand_hidden:
8270 verboseadd += create_use_string(key.upper(),
8271 cur_iuse_map[key], iuse_forced[key],
8272 cur_use_map[key], old_iuse_map[key],
8273 old_use_map[key], is_new,
8274 reinst_flags_map.get(key))
8279 if pkg_type == "ebuild" and pkg_merge:
8281 myfilesdict = portdb.getfetchsizes(pkg_key,
8282 useflags=pkg_use, debug=self.edebug)
8283 except portage.exception.InvalidDependString, e:
8284 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8285 show_invalid_depstring_notice(x, src_uri, str(e))
8288 if myfilesdict is None:
8289 myfilesdict="[empty/missing/bad digest]"
8291 for myfetchfile in myfilesdict:
8292 if myfetchfile not in myfetchlist:
8293 mysize+=myfilesdict[myfetchfile]
8294 myfetchlist.append(myfetchfile)
8296 counters.totalsize += mysize
8297 verboseadd += format_size(mysize)
8300 # assign index for a previous version in the same slot
8301 has_previous = False
8302 repo_name_prev = None
8303 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8305 slot_matches = vardb.match(slot_atom)
8308 repo_name_prev = vardb.aux_get(slot_matches[0],
8311 # now use the data to generate output
8312 if pkg.installed or not has_previous:
8313 repoadd = repo_display.repoStr(repo_path_real)
8315 repo_path_prev = None
8317 repo_path_prev = portdb.getRepositoryPath(
8319 if repo_path_prev == repo_path_real:
8320 repoadd = repo_display.repoStr(repo_path_real)
8322 repoadd = "%s=>%s" % (
8323 repo_display.repoStr(repo_path_prev),
8324 repo_display.repoStr(repo_path_real))
8326 repoadd_set.add(repoadd)
8328 xs = [portage.cpv_getkey(pkg_key)] + \
8329 list(portage.catpkgsplit(pkg_key)[2:])
8336 if "COLUMNWIDTH" in self.settings:
8338 mywidth = int(self.settings["COLUMNWIDTH"])
8339 except ValueError, e:
8340 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8342 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8343 self.settings["COLUMNWIDTH"], noiselevel=-1)
8345 oldlp = mywidth - 30
8348 # Convert myoldbest from a list to a string.
8352 for pos, key in enumerate(myoldbest):
8353 key = portage.catpkgsplit(key)[2] + \
8354 "-" + portage.catpkgsplit(key)[3]
8355 if key[-3:] == "-r0":
8357 myoldbest[pos] = key
8358 myoldbest = blue("["+", ".join(myoldbest)+"]")
8361 root_config = self.roots[myroot]
8362 system_set = root_config.sets["system"]
8363 world_set = root_config.sets["world"]
8368 pkg_system = system_set.findAtomForPackage(pkg)
8369 pkg_world = world_set.findAtomForPackage(pkg)
8370 if not (oneshot or pkg_world) and \
8371 myroot == self.target_root and \
8372 favorites_set.findAtomForPackage(pkg):
8373 # Maybe it will be added to world now.
8374 if create_world_atom(pkg, favorites_set, root_config):
8376 except portage.exception.InvalidDependString:
8377 # This is reported elsewhere if relevant.
8380 def pkgprint(pkg_str):
8383 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8385 return colorize("PKG_MERGE_WORLD", pkg_str)
8387 return colorize("PKG_MERGE", pkg_str)
8388 elif pkg_status == "uninstall":
8389 return colorize("PKG_UNINSTALL", pkg_str)
8392 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8394 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8396 return colorize("PKG_NOMERGE", pkg_str)
8399 properties = flatten(use_reduce(paren_reduce(
8400 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8401 except portage.exception.InvalidDependString, e:
8402 if not pkg.installed:
8403 show_invalid_depstring_notice(pkg,
8404 pkg.metadata["PROPERTIES"], str(e))
8408 interactive = "interactive" in properties
8409 if interactive and pkg.operation == "merge":
8410 addl = colorize("WARN", "I") + addl[1:]
8412 counters.interactive += 1
8417 if "--columns" in self.myopts:
8418 if "--quiet" in self.myopts:
8419 myprint=addl+" "+indent+pkgprint(pkg_cp)
8420 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8421 myprint=myprint+myoldbest
8422 myprint=myprint+darkgreen("to "+x[1])
8426 myprint = "[%s] %s%s" % \
8427 (pkgprint(pkg_status.ljust(13)),
8428 indent, pkgprint(pkg.cp))
8430 myprint = "[%s %s] %s%s" % \
8431 (pkgprint(pkg.type_name), addl,
8432 indent, pkgprint(pkg.cp))
8433 if (newlp-nc_len(myprint)) > 0:
8434 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8435 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8436 if (oldlp-nc_len(myprint)) > 0:
8437 myprint=myprint+" "*(oldlp-nc_len(myprint))
8438 myprint=myprint+myoldbest
8439 myprint += darkgreen("to " + pkg.root)
8442 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8444 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8445 myprint += indent + pkgprint(pkg_key) + " " + \
8446 myoldbest + darkgreen("to " + myroot)
8448 if "--columns" in self.myopts:
8449 if "--quiet" in self.myopts:
8450 myprint=addl+" "+indent+pkgprint(pkg_cp)
8451 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8452 myprint=myprint+myoldbest
8456 myprint = "[%s] %s%s" % \
8457 (pkgprint(pkg_status.ljust(13)),
8458 indent, pkgprint(pkg.cp))
8460 myprint = "[%s %s] %s%s" % \
8461 (pkgprint(pkg.type_name), addl,
8462 indent, pkgprint(pkg.cp))
8463 if (newlp-nc_len(myprint)) > 0:
8464 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8465 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8466 if (oldlp-nc_len(myprint)) > 0:
8467 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8468 myprint += myoldbest
8471 myprint = "[%s] %s%s %s" % \
8472 (pkgprint(pkg_status.ljust(13)),
8473 indent, pkgprint(pkg.cpv),
8476 myprint = "[%s %s] %s%s %s" % \
8477 (pkgprint(pkg_type), addl, indent,
8478 pkgprint(pkg.cpv), myoldbest)
8480 if columns and pkg.operation == "uninstall":
8482 p.append((myprint, verboseadd, repoadd))
8484 if "--tree" not in self.myopts and \
8485 "--quiet" not in self.myopts and \
8486 not self._opts_no_restart.intersection(self.myopts) and \
8487 pkg.root == self._running_root.root and \
8488 portage.match_from_list(
8489 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8490 not vardb.cpv_exists(pkg.cpv) and \
8491 "--quiet" not in self.myopts:
8492 if mylist_index < len(mylist) - 1:
8493 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8494 p.append(colorize("WARN", " then resume the merge."))
8497 show_repos = repoadd_set and repoadd_set != set(["0"])
8500 if isinstance(x, basestring):
8501 out.write("%s\n" % (x,))
8504 myprint, verboseadd, repoadd = x
8507 myprint += " " + verboseadd
8509 if show_repos and repoadd:
8510 myprint += " " + teal("[%s]" % repoadd)
8512 out.write("%s\n" % (myprint,))
8521 sys.stdout.write(str(repo_display))
8523 if "--changelog" in self.myopts:
8525 for revision,text in changelogs:
8526 print bold('*'+revision)
8527 sys.stdout.write(text)
8532 def display_problems(self):
8534 Display problems with the dependency graph such as slot collisions.
8535 This is called internally by display() to show the problems _after_
8536 the merge list where it is most likely to be seen, but if display()
8537 is not going to be called then this method should be called explicitly
8538 to ensure that the user is notified of problems with the graph.
8540 All output goes to stderr, except for unsatisfied dependencies which
8541 go to stdout for parsing by programs such as autounmask.
8544 # Note that show_masked_packages() sends it's output to
8545 # stdout, and some programs such as autounmask parse the
8546 # output in cases when emerge bails out. However, when
8547 # show_masked_packages() is called for installed packages
8548 # here, the message is a warning that is more appropriate
8549 # to send to stderr, so temporarily redirect stdout to
8550 # stderr. TODO: Fix output code so there's a cleaner way
8551 # to redirect everything to stderr.
8556 sys.stdout = sys.stderr
8557 self._display_problems()
8563 # This goes to stdout for parsing by programs like autounmask.
8564 for pargs, kwargs in self._unsatisfied_deps_for_display:
8565 self._show_unsatisfied_dep(*pargs, **kwargs)
8567 def _display_problems(self):
8568 if self._circular_deps_for_display is not None:
8569 self._show_circular_deps(
8570 self._circular_deps_for_display)
8572 # The user is only notified of a slot conflict if
8573 # there are no unresolvable blocker conflicts.
8574 if self._unsatisfied_blockers_for_display is not None:
8575 self._show_unsatisfied_blockers(
8576 self._unsatisfied_blockers_for_display)
8578 self._show_slot_collision_notice()
8580 # TODO: Add generic support for "set problem" handlers so that
8581 # the below warnings aren't special cases for world only.
8583 if self._missing_args:
8584 world_problems = False
8585 if "world" in self._sets:
8586 # Filter out indirect members of world (from nested sets)
8587 # since only direct members of world are desired here.
8588 world_set = self.roots[self.target_root].sets["world"]
8589 for arg, atom in self._missing_args:
8590 if arg.name == "world" and atom in world_set:
8591 world_problems = True
8595 sys.stderr.write("\n!!! Problems have been " + \
8596 "detected with your world file\n")
8597 sys.stderr.write("!!! Please run " + \
8598 green("emaint --check world")+"\n\n")
8600 if self._missing_args:
8601 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8602 " Ebuilds for the following packages are either all\n")
8603 sys.stderr.write(colorize("BAD", "!!!") + \
8604 " masked or don't exist:\n")
8605 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8606 self._missing_args) + "\n")
8608 if self._pprovided_args:
8610 for arg, atom in self._pprovided_args:
8611 if isinstance(arg, SetArg):
8613 arg_atom = (atom, atom)
8616 arg_atom = (arg.arg, atom)
8617 refs = arg_refs.setdefault(arg_atom, [])
8618 if parent not in refs:
8621 msg.append(bad("\nWARNING: "))
8622 if len(self._pprovided_args) > 1:
8623 msg.append("Requested packages will not be " + \
8624 "merged because they are listed in\n")
8626 msg.append("A requested package will not be " + \
8627 "merged because it is listed in\n")
8628 msg.append("package.provided:\n\n")
8629 problems_sets = set()
8630 for (arg, atom), refs in arg_refs.iteritems():
8633 problems_sets.update(refs)
8635 ref_string = ", ".join(["'%s'" % name for name in refs])
8636 ref_string = " pulled in by " + ref_string
8637 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8639 if "world" in problems_sets:
8640 msg.append("This problem can be solved in one of the following ways:\n\n")
8641 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8642 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8643 msg.append(" C) Remove offending entries from package.provided.\n\n")
8644 msg.append("The best course of action depends on the reason that an offending\n")
8645 msg.append("package.provided entry exists.\n\n")
8646 sys.stderr.write("".join(msg))
8648 masked_packages = []
8649 for pkg in self._masked_installed:
8650 root_config = pkg.root_config
8651 pkgsettings = self.pkgsettings[pkg.root]
8652 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8653 masked_packages.append((root_config, pkgsettings,
8654 pkg.cpv, pkg.metadata, mreasons))
8656 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8657 " The following installed packages are masked:\n")
8658 show_masked_packages(masked_packages)
8662 def calc_changelog(self,ebuildpath,current,next):
8663 if ebuildpath == None or not os.path.exists(ebuildpath):
8665 current = '-'.join(portage.catpkgsplit(current)[1:])
8666 if current.endswith('-r0'):
8667 current = current[:-3]
8668 next = '-'.join(portage.catpkgsplit(next)[1:])
8669 if next.endswith('-r0'):
8671 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8673 changelog = open(changelogpath).read()
8674 except SystemExit, e:
8675 raise # Needed else can't exit
8678 divisions = self.find_changelog_tags(changelog)
8679 #print 'XX from',current,'to',next
8680 #for div,text in divisions: print 'XX',div
8681 # skip entries for all revisions above the one we are about to emerge
8682 for i in range(len(divisions)):
8683 if divisions[i][0]==next:
8684 divisions = divisions[i:]
8686 # find out how many entries we are going to display
8687 for i in range(len(divisions)):
8688 if divisions[i][0]==current:
8689 divisions = divisions[:i]
8692 # couldnt find the current revision in the list. display nothing
8696 def find_changelog_tags(self,changelog):
8700 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8702 if release is not None:
8703 divs.append((release,changelog))
8705 if release is not None:
8706 divs.append((release,changelog[:match.start()]))
8707 changelog = changelog[match.end():]
8708 release = match.group(1)
8709 if release.endswith('.ebuild'):
8710 release = release[:-7]
8711 if release.endswith('-r0'):
8712 release = release[:-3]
8714 def saveNomergeFavorites(self):
8715 """Find atoms in favorites that are not in the mergelist and add them
8716 to the world file if necessary."""
8717 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8718 "--oneshot", "--onlydeps", "--pretend"):
8719 if x in self.myopts:
8721 root_config = self.roots[self.target_root]
8722 world_set = root_config.sets["world"]
8724 world_locked = False
8725 if hasattr(world_set, "lock"):
8729 if hasattr(world_set, "load"):
8730 world_set.load() # maybe it's changed on disk
8732 args_set = self._sets["args"]
8733 portdb = self.trees[self.target_root]["porttree"].dbapi
8734 added_favorites = set()
8735 for x in self._set_nodes:
8736 pkg_type, root, pkg_key, pkg_status = x
8737 if pkg_status != "nomerge":
8741 myfavkey = create_world_atom(x, args_set, root_config)
8743 if myfavkey in added_favorites:
8745 added_favorites.add(myfavkey)
8746 except portage.exception.InvalidDependString, e:
8747 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8748 (pkg_key, str(e)), noiselevel=-1)
8749 writemsg("!!! see '%s'\n\n" % os.path.join(
8750 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8753 for k in self._sets:
8754 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8759 all_added.append(SETPREFIX + k)
8760 all_added.extend(added_favorites)
8763 print ">>> Recording %s in \"world\" favorites file..." % \
8764 colorize("INFORM", str(a))
8766 world_set.update(all_added)
8771 def loadResumeCommand(self, resume_data, skip_masked=True,
8774 Add a resume command to the graph and validate it in the process. This
8775 will raise a PackageNotFound exception if a package is not available.
8778 if not isinstance(resume_data, dict):
8781 mergelist = resume_data.get("mergelist")
8782 if not isinstance(mergelist, list):
8785 fakedb = self.mydbapi
8787 serialized_tasks = []
8790 if not (isinstance(x, list) and len(x) == 4):
8792 pkg_type, myroot, pkg_key, action = x
8793 if pkg_type not in self.pkg_tree_map:
8795 if action != "merge":
8797 tree_type = self.pkg_tree_map[pkg_type]
8798 mydb = trees[myroot][tree_type].dbapi
8799 db_keys = list(self._trees_orig[myroot][
8800 tree_type].dbapi._aux_cache_keys)
8802 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8804 # It does no exist or it is corrupt.
8805 if action == "uninstall":
8808 # TODO: log these somewhere
8810 raise portage.exception.PackageNotFound(pkg_key)
8811 installed = action == "uninstall"
8812 built = pkg_type != "ebuild"
8813 root_config = self.roots[myroot]
8814 pkg = Package(built=built, cpv=pkg_key,
8815 installed=installed, metadata=metadata,
8816 operation=action, root_config=root_config,
8818 if pkg_type == "ebuild":
8819 pkgsettings = self.pkgsettings[myroot]
8820 pkgsettings.setcpv(pkg)
8821 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8822 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8823 self._pkg_cache[pkg] = pkg
8825 root_config = self.roots[pkg.root]
8826 if "merge" == pkg.operation and \
8827 not visible(root_config.settings, pkg):
8829 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8831 self._unsatisfied_deps_for_display.append(
8832 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8834 fakedb[myroot].cpv_inject(pkg)
8835 serialized_tasks.append(pkg)
8836 self.spinner.update()
8838 if self._unsatisfied_deps_for_display:
8841 if not serialized_tasks or "--nodeps" in self.myopts:
8842 self._serialized_tasks_cache = serialized_tasks
8843 self._scheduler_graph = self.digraph
8845 self._select_package = self._select_pkg_from_graph
8846 self.myparams.add("selective")
8847 # Always traverse deep dependencies in order to account for
8848 # potentially unsatisfied dependencies of installed packages.
8849 # This is necessary for correct --keep-going or --resume operation
8850 # in case a package from a group of circularly dependent packages
8851 # fails. In this case, a package which has recently been installed
8852 # may have an unsatisfied circular dependency (pulled in by
8853 # PDEPEND, for example). So, even though a package is already
8854 # installed, it may not have all of it's dependencies satisfied, so
8855 # it may not be usable. If such a package is in the subgraph of
8856 # deep depenedencies of a scheduled build, that build needs to
8857 # be cancelled. In order for this type of situation to be
8858 # recognized, deep traversal of dependencies is required.
8859 self.myparams.add("deep")
8861 favorites = resume_data.get("favorites")
8862 args_set = self._sets["args"]
8863 if isinstance(favorites, list):
8864 args = self._load_favorites(favorites)
8868 for task in serialized_tasks:
8869 if isinstance(task, Package) and \
8870 task.operation == "merge":
8871 if not self._add_pkg(task, None):
8874 # Packages for argument atoms need to be explicitly
8875 # added via _add_pkg() so that they are included in the
8876 # digraph (needed at least for --tree display).
8878 for atom in arg.set:
8879 pkg, existing_node = self._select_package(
8880 arg.root_config.root, atom)
8881 if existing_node is None and \
8883 if not self._add_pkg(pkg, Dependency(atom=atom,
8884 root=pkg.root, parent=arg)):
8887 # Allow unsatisfied deps here to avoid showing a masking
8888 # message for an unsatisfied dep that isn't necessarily
8890 if not self._create_graph(allow_unsatisfied=True):
8893 unsatisfied_deps = []
8894 for dep in self._unsatisfied_deps:
8895 if not isinstance(dep.parent, Package):
8897 if dep.parent.operation == "merge":
8898 unsatisfied_deps.append(dep)
8901 # For unsatisfied deps of installed packages, only account for
8902 # them if they are in the subgraph of dependencies of a package
8903 # which is scheduled to be installed.
8904 unsatisfied_install = False
8906 dep_stack = self.digraph.parent_nodes(dep.parent)
8908 node = dep_stack.pop()
8909 if not isinstance(node, Package):
8911 if node.operation == "merge":
8912 unsatisfied_install = True
8914 if node in traversed:
8917 dep_stack.extend(self.digraph.parent_nodes(node))
8919 if unsatisfied_install:
8920 unsatisfied_deps.append(dep)
8922 if masked_tasks or unsatisfied_deps:
8923 # This probably means that a required package
8924 # was dropped via --skipfirst. It makes the
8925 # resume list invalid, so convert it to a
8926 # UnsatisfiedResumeDep exception.
8927 raise self.UnsatisfiedResumeDep(self,
8928 masked_tasks + unsatisfied_deps)
8929 self._serialized_tasks_cache = None
8932 except self._unknown_internal_error:
8937 def _load_favorites(self, favorites):
8939 Use a list of favorites to resume state from a
8940 previous select_files() call. This creates similar
8941 DependencyArg instances to those that would have
8942 been created by the original select_files() call.
8943 This allows Package instances to be matched with
8944 DependencyArg instances during graph creation.
8946 root_config = self.roots[self.target_root]
8947 getSetAtoms = root_config.setconfig.getSetAtoms
8948 sets = root_config.sets
8951 if not isinstance(x, basestring):
8953 if x in ("system", "world"):
8955 if x.startswith(SETPREFIX):
8956 s = x[len(SETPREFIX):]
8961 # Recursively expand sets so that containment tests in
8962 # self._get_parent_sets() properly match atoms in nested
8963 # sets (like if world contains system).
8964 expanded_set = InternalPackageSet(
8965 initial_atoms=getSetAtoms(s))
8966 self._sets[s] = expanded_set
8967 args.append(SetArg(arg=x, set=expanded_set,
8968 root_config=root_config))
8970 if not portage.isvalidatom(x):
8972 args.append(AtomArg(arg=x, atom=x,
8973 root_config=root_config))
8975 self._set_args(args)
8978 class UnsatisfiedResumeDep(portage.exception.PortageException):
8980 A dependency of a resume list is not installed. This
8981 can occur when a required package is dropped from the
8982 merge list via --skipfirst.
8984 def __init__(self, depgraph, value):
8985 portage.exception.PortageException.__init__(self, value)
8986 self.depgraph = depgraph
8988 class _internal_exception(portage.exception.PortageException):
8989 def __init__(self, value=""):
8990 portage.exception.PortageException.__init__(self, value)
8992 class _unknown_internal_error(_internal_exception):
8994 Used by the depgraph internally to terminate graph creation.
8995 The specific reason for the failure should have been dumped
8996 to stderr, unfortunately, the exact reason for the failure
9000 class _serialize_tasks_retry(_internal_exception):
9002 This is raised by the _serialize_tasks() method when it needs to
9003 be called again for some reason. The only case that it's currently
9004 used for is when neglected dependencies need to be added to the
9005 graph in order to avoid making a potentially unsafe decision.
9008 class _dep_check_composite_db(portage.dbapi):
9010 A dbapi-like interface that is optimized for use in dep_check() calls.
9011 This is built on top of the existing depgraph package selection logic.
9012 Some packages that have been added to the graph may be masked from this
9013 view in order to influence the atom preference selection that occurs
9016 def __init__(self, depgraph, root):
9017 portage.dbapi.__init__(self)
9018 self._depgraph = depgraph
9020 self._match_cache = {}
9021 self._cpv_pkg_map = {}
9023 def _clear_cache(self):
9024 self._match_cache.clear()
9025 self._cpv_pkg_map.clear()
9027 def match(self, atom):
9028 ret = self._match_cache.get(atom)
9033 atom = self._dep_expand(atom)
9034 pkg, existing = self._depgraph._select_package(self._root, atom)
9038 # Return the highest available from select_package() as well as
9039 # any matching slots in the graph db.
9041 slots.add(pkg.metadata["SLOT"])
9042 atom_cp = portage.dep_getkey(atom)
9043 if pkg.cp.startswith("virtual/"):
9044 # For new-style virtual lookahead that occurs inside
9045 # dep_check(), examine all slots. This is needed
9046 # so that newer slots will not unnecessarily be pulled in
9047 # when a satisfying lower slot is already installed. For
9048 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9049 # there's no need to pull in a newer slot to satisfy a
9050 # virtual/jdk dependency.
9051 for db, pkg_type, built, installed, db_keys in \
9052 self._depgraph._filtered_trees[self._root]["dbs"]:
9053 for cpv in db.match(atom):
9054 if portage.cpv_getkey(cpv) != pkg.cp:
9056 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9058 if self._visible(pkg):
9059 self._cpv_pkg_map[pkg.cpv] = pkg
9061 slots.remove(pkg.metadata["SLOT"])
9063 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9064 pkg, existing = self._depgraph._select_package(
9065 self._root, slot_atom)
9068 if not self._visible(pkg):
9070 self._cpv_pkg_map[pkg.cpv] = pkg
9073 self._cpv_sort_ascending(ret)
9074 self._match_cache[orig_atom] = ret
9077 def _visible(self, pkg):
9078 if pkg.installed and "selective" not in self._depgraph.myparams:
9080 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9081 except (StopIteration, portage.exception.InvalidDependString):
9088 self._depgraph.pkgsettings[pkg.root], pkg):
9090 except portage.exception.InvalidDependString:
9092 in_graph = self._depgraph._slot_pkg_map[
9093 self._root].get(pkg.slot_atom)
9094 if in_graph is None:
9095 # Mask choices for packages which are not the highest visible
9096 # version within their slot (since they usually trigger slot
9098 highest_visible, in_graph = self._depgraph._select_package(
9099 self._root, pkg.slot_atom)
9100 if pkg != highest_visible:
9102 elif in_graph != pkg:
9103 # Mask choices for packages that would trigger a slot
9104 # conflict with a previously selected package.
9108 def _dep_expand(self, atom):
9110 This is only needed for old installed packages that may
9111 contain atoms that are not fully qualified with a specific
9112 category. Emulate the cpv_expand() function that's used by
9113 dbapi.match() in cases like this. If there are multiple
9114 matches, it's often due to a new-style virtual that has
9115 been added, so try to filter those out to avoid raising
9118 root_config = self._depgraph.roots[self._root]
9120 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9121 if len(expanded_atoms) > 1:
9122 non_virtual_atoms = []
9123 for x in expanded_atoms:
9124 if not portage.dep_getkey(x).startswith("virtual/"):
9125 non_virtual_atoms.append(x)
9126 if len(non_virtual_atoms) == 1:
9127 expanded_atoms = non_virtual_atoms
9128 if len(expanded_atoms) > 1:
9129 # compatible with portage.cpv_expand()
9130 raise portage.exception.AmbiguousPackageName(
9131 [portage.dep_getkey(x) for x in expanded_atoms])
9133 atom = expanded_atoms[0]
9135 null_atom = insert_category_into_atom(atom, "null")
9136 null_cp = portage.dep_getkey(null_atom)
9137 cat, atom_pn = portage.catsplit(null_cp)
9138 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9140 # Allow the resolver to choose which virtual.
9141 atom = insert_category_into_atom(atom, "virtual")
9143 atom = insert_category_into_atom(atom, "null")
9146 def aux_get(self, cpv, wants):
9147 metadata = self._cpv_pkg_map[cpv].metadata
9148 return [metadata.get(x, "") for x in wants]
9150 class RepoDisplay(object):
9151 def __init__(self, roots):
9152 self._shown_repos = {}
9153 self._unknown_repo = False
9155 for root_config in roots.itervalues():
9156 portdir = root_config.settings.get("PORTDIR")
9158 repo_paths.add(portdir)
9159 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9161 repo_paths.update(overlays.split())
9162 repo_paths = list(repo_paths)
9163 self._repo_paths = repo_paths
9164 self._repo_paths_real = [ os.path.realpath(repo_path) \
9165 for repo_path in repo_paths ]
9167 # pre-allocate index for PORTDIR so that it always has index 0.
9168 for root_config in roots.itervalues():
9169 portdb = root_config.trees["porttree"].dbapi
9170 portdir = portdb.porttree_root
9172 self.repoStr(portdir)
9174 def repoStr(self, repo_path_real):
9177 real_index = self._repo_paths_real.index(repo_path_real)
9178 if real_index == -1:
9180 self._unknown_repo = True
9182 shown_repos = self._shown_repos
9183 repo_paths = self._repo_paths
9184 repo_path = repo_paths[real_index]
9185 index = shown_repos.get(repo_path)
9187 index = len(shown_repos)
9188 shown_repos[repo_path] = index
9194 shown_repos = self._shown_repos
9195 unknown_repo = self._unknown_repo
9196 if shown_repos or self._unknown_repo:
9197 output.append("Portage tree and overlays:\n")
9198 show_repo_paths = list(shown_repos)
9199 for repo_path, repo_index in shown_repos.iteritems():
9200 show_repo_paths[repo_index] = repo_path
9202 for index, repo_path in enumerate(show_repo_paths):
9203 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9205 output.append(" "+teal("[?]") + \
9206 " indicates that the source repository could not be determined\n")
9207 return "".join(output)
9209 class PackageCounters(object):
9219 self.blocks_satisfied = 0
9221 self.restrict_fetch = 0
9222 self.restrict_fetch_satisfied = 0
9223 self.interactive = 0
9226 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9229 myoutput.append("Total: %s package" % total_installs)
9230 if total_installs != 1:
9231 myoutput.append("s")
9232 if total_installs != 0:
9233 myoutput.append(" (")
9234 if self.upgrades > 0:
9235 details.append("%s upgrade" % self.upgrades)
9236 if self.upgrades > 1:
9238 if self.downgrades > 0:
9239 details.append("%s downgrade" % self.downgrades)
9240 if self.downgrades > 1:
9243 details.append("%s new" % self.new)
9244 if self.newslot > 0:
9245 details.append("%s in new slot" % self.newslot)
9246 if self.newslot > 1:
9249 details.append("%s reinstall" % self.reinst)
9253 details.append("%s uninstall" % self.uninst)
9256 if self.interactive > 0:
9257 details.append("%s %s" % (self.interactive,
9258 colorize("WARN", "interactive")))
9259 myoutput.append(", ".join(details))
9260 if total_installs != 0:
9261 myoutput.append(")")
9262 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9263 if self.restrict_fetch:
9264 myoutput.append("\nFetch Restriction: %s package" % \
9265 self.restrict_fetch)
9266 if self.restrict_fetch > 1:
9267 myoutput.append("s")
9268 if self.restrict_fetch_satisfied < self.restrict_fetch:
9269 myoutput.append(bad(" (%s unsatisfied)") % \
9270 (self.restrict_fetch - self.restrict_fetch_satisfied))
9272 myoutput.append("\nConflict: %s block" % \
9275 myoutput.append("s")
9276 if self.blocks_satisfied < self.blocks:
9277 myoutput.append(bad(" (%s unsatisfied)") % \
9278 (self.blocks - self.blocks_satisfied))
9279 return "".join(myoutput)
9281 class UseFlagDisplay(object):
9283 __slots__ = ('name', 'enabled', 'forced')
9285 def __init__(self, name, enabled, forced):
9287 self.enabled = enabled
9288 self.forced = forced
9301 def _cmp_combined(a, b):
9303 Sort by name, combining enabled and disabled flags.
9305 return (a.name > b.name) - (a.name < b.name)
9307 sort_combined = cmp_sort_key(_cmp_combined)
9310 def _cmp_separated(a, b):
9312 Sort by name, separating enabled flags from disabled flags.
9314 enabled_diff = b.enabled - a.enabled
9317 return (a.name > b.name) - (a.name < b.name)
9319 sort_separated = cmp_sort_key(_cmp_separated)
9322 class PollSelectAdapter(PollConstants):
9325 Use select to emulate a poll object, for
9326 systems that don't support poll().
9330 self._registered = {}
9331 self._select_args = [[], [], []]
9333 def register(self, fd, *args):
9335 Only POLLIN is currently supported!
9339 "register expected at most 2 arguments, got " + \
9340 repr(1 + len(args)))
9342 eventmask = PollConstants.POLLIN | \
9343 PollConstants.POLLPRI | PollConstants.POLLOUT
9347 self._registered[fd] = eventmask
9348 self._select_args = None
9350 def unregister(self, fd):
9351 self._select_args = None
9352 del self._registered[fd]
9354 def poll(self, *args):
9357 "poll expected at most 2 arguments, got " + \
9358 repr(1 + len(args)))
9364 select_args = self._select_args
9365 if select_args is None:
9366 select_args = [self._registered.keys(), [], []]
9368 if timeout is not None:
9369 select_args = select_args[:]
9370 # Translate poll() timeout args to select() timeout args:
9372 # | units | value(s) for indefinite block
9373 # ---------|--------------|------------------------------
9374 # poll | milliseconds | omitted, negative, or None
9375 # ---------|--------------|------------------------------
9376 # select | seconds | omitted
9377 # ---------|--------------|------------------------------
9379 if timeout is not None and timeout < 0:
9381 if timeout is not None:
9382 select_args.append(timeout / 1000)
9384 select_events = select.select(*select_args)
9386 for fd in select_events[0]:
9387 poll_events.append((fd, PollConstants.POLLIN))
9390 class SequentialTaskQueue(SlotObject):
9392 __slots__ = ("max_jobs", "running_tasks") + \
9393 ("_dirty", "_scheduling", "_task_queue")
9395 def __init__(self, **kwargs):
9396 SlotObject.__init__(self, **kwargs)
9397 self._task_queue = deque()
9398 self.running_tasks = set()
9399 if self.max_jobs is None:
9403 def add(self, task):
9404 self._task_queue.append(task)
9407 def addFront(self, task):
9408 self._task_queue.appendleft(task)
9419 if self._scheduling:
9420 # Ignore any recursive schedule() calls triggered via
9421 # self._task_exit().
9424 self._scheduling = True
9426 task_queue = self._task_queue
9427 running_tasks = self.running_tasks
9428 max_jobs = self.max_jobs
9429 state_changed = False
9431 while task_queue and \
9432 (max_jobs is True or len(running_tasks) < max_jobs):
9433 task = task_queue.popleft()
9434 cancelled = getattr(task, "cancelled", None)
9436 running_tasks.add(task)
9437 task.addExitListener(self._task_exit)
9439 state_changed = True
9442 self._scheduling = False
9444 return state_changed
9446 def _task_exit(self, task):
9448 Since we can always rely on exit listeners being called, the set of
9449 running tasks is always pruned automatically and there is never any need
9450 to actively prune it.
9452 self.running_tasks.remove(task)
9453 if self._task_queue:
9457 self._task_queue.clear()
9458 running_tasks = self.running_tasks
9459 while running_tasks:
9460 task = running_tasks.pop()
9461 task.removeExitListener(self._task_exit)
9465 def __nonzero__(self):
9466 return bool(self._task_queue or self.running_tasks)
9469 return len(self._task_queue) + len(self.running_tasks)
9471 _can_poll_device = None
9473 def can_poll_device():
9475 Test if it's possible to use poll() on a device such as a pty. This
9476 is known to fail on Darwin.
9478 @returns: True if poll() on a device succeeds, False otherwise.
9481 global _can_poll_device
9482 if _can_poll_device is not None:
9483 return _can_poll_device
9485 if not hasattr(select, "poll"):
9486 _can_poll_device = False
9487 return _can_poll_device
9490 dev_null = open('/dev/null', 'rb')
9492 _can_poll_device = False
9493 return _can_poll_device
9496 p.register(dev_null.fileno(), PollConstants.POLLIN)
9498 invalid_request = False
9499 for f, event in p.poll():
9500 if event & PollConstants.POLLNVAL:
9501 invalid_request = True
9505 _can_poll_device = not invalid_request
9506 return _can_poll_device
9508 def create_poll_instance():
9510 Create an instance of select.poll, or an instance of
9511 PollSelectAdapter there is no poll() implementation or
9512 it is broken somehow.
9514 if can_poll_device():
9515 return select.poll()
9516 return PollSelectAdapter()
9518 getloadavg = getattr(os, "getloadavg", None)
9519 if getloadavg is None:
9522 Uses /proc/loadavg to emulate os.getloadavg().
9523 Raises OSError if the load average was unobtainable.
9526 loadavg_str = open('/proc/loadavg').readline()
9528 # getloadavg() is only supposed to raise OSError, so convert
9529 raise OSError('unknown')
9530 loadavg_split = loadavg_str.split()
9531 if len(loadavg_split) < 3:
9532 raise OSError('unknown')
9536 loadavg_floats.append(float(loadavg_split[i]))
9538 raise OSError('unknown')
9539 return tuple(loadavg_floats)
9541 class PollScheduler(object):
9543 class _sched_iface_class(SlotObject):
9544 __slots__ = ("register", "schedule", "unregister")
9548 self._max_load = None
9550 self._poll_event_queue = []
9551 self._poll_event_handlers = {}
9552 self._poll_event_handler_ids = {}
9553 # Increment id for each new handler.
9554 self._event_handler_id = 0
9555 self._poll_obj = create_poll_instance()
9556 self._scheduling = False
9558 def _schedule(self):
9560 Calls _schedule_tasks() and automatically returns early from
9561 any recursive calls to this method that the _schedule_tasks()
9562 call might trigger. This makes _schedule() safe to call from
9563 inside exit listeners.
9565 if self._scheduling:
9567 self._scheduling = True
9569 return self._schedule_tasks()
9571 self._scheduling = False
9573 def _running_job_count(self):
9576 def _can_add_job(self):
9577 max_jobs = self._max_jobs
9578 max_load = self._max_load
9580 if self._max_jobs is not True and \
9581 self._running_job_count() >= self._max_jobs:
9584 if max_load is not None and \
9585 (max_jobs is True or max_jobs > 1) and \
9586 self._running_job_count() >= 1:
9588 avg1, avg5, avg15 = getloadavg()
9592 if avg1 >= max_load:
9597 def _poll(self, timeout=None):
9599 All poll() calls pass through here. The poll events
9600 are added directly to self._poll_event_queue.
9601 In order to avoid endless blocking, this raises
9602 StopIteration if timeout is None and there are
9603 no file descriptors to poll.
9605 if not self._poll_event_handlers:
9607 if timeout is None and \
9608 not self._poll_event_handlers:
9609 raise StopIteration(
9610 "timeout is None and there are no poll() event handlers")
9612 # The following error is known to occur with Linux kernel versions
9615 # select.error: (4, 'Interrupted system call')
9617 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9618 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9619 # without any events.
9622 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9624 except select.error, e:
9625 writemsg_level("\n!!! select error: %s\n" % (e,),
9626 level=logging.ERROR, noiselevel=-1)
9628 if timeout is not None:
9631 def _next_poll_event(self, timeout=None):
9633 Since the _schedule_wait() loop is called by event
9634 handlers from _poll_loop(), maintain a central event
9635 queue for both of them to share events from a single
9636 poll() call. In order to avoid endless blocking, this
9637 raises StopIteration if timeout is None and there are
9638 no file descriptors to poll.
9640 if not self._poll_event_queue:
9642 return self._poll_event_queue.pop()
9644 def _poll_loop(self):
9646 event_handlers = self._poll_event_handlers
9647 event_handled = False
9650 while event_handlers:
9651 f, event = self._next_poll_event()
9652 handler, reg_id = event_handlers[f]
9654 event_handled = True
9655 except StopIteration:
9656 event_handled = True
9658 if not event_handled:
9659 raise AssertionError("tight loop")
9661 def _schedule_yield(self):
9663 Schedule for a short period of time chosen by the scheduler based
9664 on internal state. Synchronous tasks should call this periodically
9665 in order to allow the scheduler to service pending poll events. The
9666 scheduler will call poll() exactly once, without blocking, and any
9667 resulting poll events will be serviced.
9669 event_handlers = self._poll_event_handlers
9672 if not event_handlers:
9673 return bool(events_handled)
9675 if not self._poll_event_queue:
9679 while event_handlers and self._poll_event_queue:
9680 f, event = self._next_poll_event()
9681 handler, reg_id = event_handlers[f]
9684 except StopIteration:
9687 return bool(events_handled)
9689 def _register(self, f, eventmask, handler):
9692 @return: A unique registration id, for use in schedule() or
9695 if f in self._poll_event_handlers:
9696 raise AssertionError("fd %d is already registered" % f)
9697 self._event_handler_id += 1
9698 reg_id = self._event_handler_id
9699 self._poll_event_handler_ids[reg_id] = f
9700 self._poll_event_handlers[f] = (handler, reg_id)
9701 self._poll_obj.register(f, eventmask)
9704 def _unregister(self, reg_id):
9705 f = self._poll_event_handler_ids[reg_id]
9706 self._poll_obj.unregister(f)
9707 del self._poll_event_handlers[f]
9708 del self._poll_event_handler_ids[reg_id]
9710 def _schedule_wait(self, wait_ids):
9712 Schedule until wait_id is not longer registered
9715 @param wait_id: a task id to wait for
9717 event_handlers = self._poll_event_handlers
9718 handler_ids = self._poll_event_handler_ids
9719 event_handled = False
9721 if isinstance(wait_ids, int):
9722 wait_ids = frozenset([wait_ids])
9725 while wait_ids.intersection(handler_ids):
9726 f, event = self._next_poll_event()
9727 handler, reg_id = event_handlers[f]
9729 event_handled = True
9730 except StopIteration:
9731 event_handled = True
9733 return event_handled
9735 class QueueScheduler(PollScheduler):
9738 Add instances of SequentialTaskQueue and then call run(). The
9739 run() method returns when no tasks remain.
9742 def __init__(self, max_jobs=None, max_load=None):
9743 PollScheduler.__init__(self)
9745 if max_jobs is None:
9748 self._max_jobs = max_jobs
9749 self._max_load = max_load
9750 self.sched_iface = self._sched_iface_class(
9751 register=self._register,
9752 schedule=self._schedule_wait,
9753 unregister=self._unregister)
9756 self._schedule_listeners = []
9759 self._queues.append(q)
9761 def remove(self, q):
9762 self._queues.remove(q)
9766 while self._schedule():
9769 while self._running_job_count():
9772 def _schedule_tasks(self):
9775 @returns: True if there may be remaining tasks to schedule,
9778 while self._can_add_job():
9779 n = self._max_jobs - self._running_job_count()
9783 if not self._start_next_job(n):
9786 for q in self._queues:
9791 def _running_job_count(self):
9793 for q in self._queues:
9794 job_count += len(q.running_tasks)
9795 self._jobs = job_count
9798 def _start_next_job(self, n=1):
9800 for q in self._queues:
9801 initial_job_count = len(q.running_tasks)
9803 final_job_count = len(q.running_tasks)
9804 if final_job_count > initial_job_count:
9805 started_count += (final_job_count - initial_job_count)
9806 if started_count >= n:
9808 return started_count
9810 class TaskScheduler(object):
9813 A simple way to handle scheduling of AsynchrousTask instances. Simply
9814 add tasks and call run(). The run() method returns when no tasks remain.
9817 def __init__(self, max_jobs=None, max_load=None):
9818 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9819 self._scheduler = QueueScheduler(
9820 max_jobs=max_jobs, max_load=max_load)
9821 self.sched_iface = self._scheduler.sched_iface
9822 self.run = self._scheduler.run
9823 self._scheduler.add(self._queue)
9825 def add(self, task):
9826 self._queue.add(task)
9828 class JobStatusDisplay(object):
9830 _bound_properties = ("curval", "failed", "running")
9831 _jobs_column_width = 48
9833 # Don't update the display unless at least this much
9834 # time has passed, in units of seconds.
9835 _min_display_latency = 2
9837 _default_term_codes = {
9843 _termcap_name_map = {
9844 'carriage_return' : 'cr',
9849 def __init__(self, out=sys.stdout, quiet=False):
9850 object.__setattr__(self, "out", out)
9851 object.__setattr__(self, "quiet", quiet)
9852 object.__setattr__(self, "maxval", 0)
9853 object.__setattr__(self, "merges", 0)
9854 object.__setattr__(self, "_changed", False)
9855 object.__setattr__(self, "_displayed", False)
9856 object.__setattr__(self, "_last_display_time", 0)
9857 object.__setattr__(self, "width", 80)
9860 isatty = hasattr(out, "isatty") and out.isatty()
9861 object.__setattr__(self, "_isatty", isatty)
9862 if not isatty or not self._init_term():
9864 for k, capname in self._termcap_name_map.iteritems():
9865 term_codes[k] = self._default_term_codes[capname]
9866 object.__setattr__(self, "_term_codes", term_codes)
9867 encoding = sys.getdefaultencoding()
9868 for k, v in self._term_codes.items():
9869 if not isinstance(v, basestring):
9870 self._term_codes[k] = v.decode(encoding, 'replace')
9872 def _init_term(self):
9874 Initialize term control codes.
9876 @returns: True if term codes were successfully initialized,
9880 term_type = os.environ.get("TERM", "vt100")
9886 curses.setupterm(term_type, self.out.fileno())
9887 tigetstr = curses.tigetstr
9888 except curses.error:
9893 if tigetstr is None:
9897 for k, capname in self._termcap_name_map.iteritems():
9898 code = tigetstr(capname)
9900 code = self._default_term_codes[capname]
9901 term_codes[k] = code
9902 object.__setattr__(self, "_term_codes", term_codes)
9905 def _format_msg(self, msg):
9906 return ">>> %s" % msg
9910 self._term_codes['carriage_return'] + \
9911 self._term_codes['clr_eol'])
9913 self._displayed = False
9915 def _display(self, line):
9916 self.out.write(line)
9918 self._displayed = True
9920 def _update(self, msg):
9923 if not self._isatty:
9924 out.write(self._format_msg(msg) + self._term_codes['newline'])
9926 self._displayed = True
9932 self._display(self._format_msg(msg))
9934 def displayMessage(self, msg):
9936 was_displayed = self._displayed
9938 if self._isatty and self._displayed:
9941 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9943 self._displayed = False
9946 self._changed = True
9952 for name in self._bound_properties:
9953 object.__setattr__(self, name, 0)
9956 self.out.write(self._term_codes['newline'])
9958 self._displayed = False
9960 def __setattr__(self, name, value):
9961 old_value = getattr(self, name)
9962 if value == old_value:
9964 object.__setattr__(self, name, value)
9965 if name in self._bound_properties:
9966 self._property_change(name, old_value, value)
9968 def _property_change(self, name, old_value, new_value):
9969 self._changed = True
9972 def _load_avg_str(self):
9987 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9991 Display status on stdout, but only if something has
9992 changed since the last call.
9998 current_time = time.time()
9999 time_delta = current_time - self._last_display_time
10000 if self._displayed and \
10002 if not self._isatty:
10004 if time_delta < self._min_display_latency:
10007 self._last_display_time = current_time
10008 self._changed = False
10009 self._display_status()
10011 def _display_status(self):
10012 # Don't use len(self._completed_tasks) here since that also
10013 # can include uninstall tasks.
10014 curval_str = str(self.curval)
10015 maxval_str = str(self.maxval)
10016 running_str = str(self.running)
10017 failed_str = str(self.failed)
10018 load_avg_str = self._load_avg_str()
10020 color_output = StringIO()
10021 plain_output = StringIO()
10022 style_file = portage.output.ConsoleStyleFile(color_output)
10023 style_file.write_listener = plain_output
10024 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10025 style_writer.style_listener = style_file.new_styles
10026 f = formatter.AbstractFormatter(style_writer)
10028 number_style = "INFORM"
10029 f.add_literal_data("Jobs: ")
10030 f.push_style(number_style)
10031 f.add_literal_data(curval_str)
10033 f.add_literal_data(" of ")
10034 f.push_style(number_style)
10035 f.add_literal_data(maxval_str)
10037 f.add_literal_data(" complete")
10040 f.add_literal_data(", ")
10041 f.push_style(number_style)
10042 f.add_literal_data(running_str)
10044 f.add_literal_data(" running")
10047 f.add_literal_data(", ")
10048 f.push_style(number_style)
10049 f.add_literal_data(failed_str)
10051 f.add_literal_data(" failed")
10053 padding = self._jobs_column_width - len(plain_output.getvalue())
10055 f.add_literal_data(padding * " ")
10057 f.add_literal_data("Load avg: ")
10058 f.add_literal_data(load_avg_str)
10060 # Truncate to fit width, to avoid making the terminal scroll if the
10061 # line overflows (happens when the load average is large).
10062 plain_output = plain_output.getvalue()
10063 if self._isatty and len(plain_output) > self.width:
10064 # Use plain_output here since it's easier to truncate
10065 # properly than the color output which contains console
10067 self._update(plain_output[:self.width])
10069 self._update(color_output.getvalue())
10071 xtermTitle(" ".join(plain_output.split()))
10073 class Scheduler(PollScheduler):
10075 _opts_ignore_blockers = \
10076 frozenset(["--buildpkgonly",
10077 "--fetchonly", "--fetch-all-uri",
10078 "--nodeps", "--pretend"])
10080 _opts_no_background = \
10081 frozenset(["--pretend",
10082 "--fetchonly", "--fetch-all-uri"])
10084 _opts_no_restart = frozenset(["--buildpkgonly",
10085 "--fetchonly", "--fetch-all-uri", "--pretend"])
10087 _bad_resume_opts = set(["--ask", "--changelog",
10088 "--resume", "--skipfirst"])
10090 _fetch_log = "/var/log/emerge-fetch.log"
10092 class _iface_class(SlotObject):
10093 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10094 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10095 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10098 class _fetch_iface_class(SlotObject):
10099 __slots__ = ("log_file", "schedule")
10101 _task_queues_class = slot_dict_class(
10102 ("merge", "jobs", "fetch", "unpack"), prefix="")
10104 class _build_opts_class(SlotObject):
10105 __slots__ = ("buildpkg", "buildpkgonly",
10106 "fetch_all_uri", "fetchonly", "pretend")
10108 class _binpkg_opts_class(SlotObject):
10109 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10111 class _pkg_count_class(SlotObject):
10112 __slots__ = ("curval", "maxval")
10114 class _emerge_log_class(SlotObject):
10115 __slots__ = ("xterm_titles",)
10117 def log(self, *pargs, **kwargs):
10118 if not self.xterm_titles:
10119 # Avoid interference with the scheduler's status display.
10120 kwargs.pop("short_msg", None)
10121 emergelog(self.xterm_titles, *pargs, **kwargs)
10123 class _failed_pkg(SlotObject):
10124 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10126 class _ConfigPool(object):
10127 """Interface for a task to temporarily allocate a config
10128 instance from a pool. This allows a task to be constructed
10129 long before the config instance actually becomes needed, like
10130 when prefetchers are constructed for the whole merge list."""
10131 __slots__ = ("_root", "_allocate", "_deallocate")
10132 def __init__(self, root, allocate, deallocate):
10134 self._allocate = allocate
10135 self._deallocate = deallocate
10136 def allocate(self):
10137 return self._allocate(self._root)
10138 def deallocate(self, settings):
10139 self._deallocate(settings)
10141 class _unknown_internal_error(portage.exception.PortageException):
10143 Used internally to terminate scheduling. The specific reason for
10144 the failure should have been dumped to stderr.
10146 def __init__(self, value=""):
10147 portage.exception.PortageException.__init__(self, value)
10149 def __init__(self, settings, trees, mtimedb, myopts,
10150 spinner, mergelist, favorites, digraph):
10151 PollScheduler.__init__(self)
10152 self.settings = settings
10153 self.target_root = settings["ROOT"]
10155 self.myopts = myopts
10156 self._spinner = spinner
10157 self._mtimedb = mtimedb
10158 self._mergelist = mergelist
10159 self._favorites = favorites
10160 self._args_set = InternalPackageSet(favorites)
10161 self._build_opts = self._build_opts_class()
10162 for k in self._build_opts.__slots__:
10163 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10164 self._binpkg_opts = self._binpkg_opts_class()
10165 for k in self._binpkg_opts.__slots__:
10166 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10169 self._logger = self._emerge_log_class()
10170 self._task_queues = self._task_queues_class()
10171 for k in self._task_queues.allowed_keys:
10172 setattr(self._task_queues, k,
10173 SequentialTaskQueue())
10175 # Holds merges that will wait to be executed when no builds are
10176 # executing. This is useful for system packages since dependencies
10177 # on system packages are frequently unspecified.
10178 self._merge_wait_queue = []
10179 # Holds merges that have been transfered from the merge_wait_queue to
10180 # the actual merge queue. They are removed from this list upon
10181 # completion. Other packages can start building only when this list is
10183 self._merge_wait_scheduled = []
10185 # Holds system packages and their deep runtime dependencies. Before
10186 # being merged, these packages go to merge_wait_queue, to be merged
10187 # when no other packages are building.
10188 self._deep_system_deps = set()
10190 # Holds packages to merge which will satisfy currently unsatisfied
10191 # deep runtime dependencies of system packages. If this is not empty
10192 # then no parallel builds will be spawned until it is empty. This
10193 # minimizes the possibility that a build will fail due to the system
10194 # being in a fragile state. For example, see bug #259954.
10195 self._unsatisfied_system_deps = set()
10197 self._status_display = JobStatusDisplay()
10198 self._max_load = myopts.get("--load-average")
10199 max_jobs = myopts.get("--jobs")
10200 if max_jobs is None:
10202 self._set_max_jobs(max_jobs)
10204 # The root where the currently running
10205 # portage instance is installed.
10206 self._running_root = trees["/"]["root_config"]
10208 if settings.get("PORTAGE_DEBUG", "") == "1":
10210 self.pkgsettings = {}
10211 self._config_pool = {}
10212 self._blocker_db = {}
10214 self._config_pool[root] = []
10215 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10217 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10218 schedule=self._schedule_fetch)
10219 self._sched_iface = self._iface_class(
10220 dblinkEbuildPhase=self._dblink_ebuild_phase,
10221 dblinkDisplayMerge=self._dblink_display_merge,
10222 dblinkElog=self._dblink_elog,
10223 dblinkEmergeLog=self._dblink_emerge_log,
10224 fetch=fetch_iface, register=self._register,
10225 schedule=self._schedule_wait,
10226 scheduleSetup=self._schedule_setup,
10227 scheduleUnpack=self._schedule_unpack,
10228 scheduleYield=self._schedule_yield,
10229 unregister=self._unregister)
10231 self._prefetchers = weakref.WeakValueDictionary()
10232 self._pkg_queue = []
10233 self._completed_tasks = set()
10235 self._failed_pkgs = []
10236 self._failed_pkgs_all = []
10237 self._failed_pkgs_die_msgs = []
10238 self._post_mod_echo_msgs = []
10239 self._parallel_fetch = False
10240 merge_count = len([x for x in mergelist \
10241 if isinstance(x, Package) and x.operation == "merge"])
10242 self._pkg_count = self._pkg_count_class(
10243 curval=0, maxval=merge_count)
10244 self._status_display.maxval = self._pkg_count.maxval
10246 # The load average takes some time to respond when new
10247 # jobs are added, so we need to limit the rate of adding
10249 self._job_delay_max = 10
10250 self._job_delay_factor = 1.0
10251 self._job_delay_exp = 1.5
10252 self._previous_job_start_time = None
10254 self._set_digraph(digraph)
10256 # This is used to memoize the _choose_pkg() result when
10257 # no packages can be chosen until one of the existing
10259 self._choose_pkg_return_early = False
10261 features = self.settings.features
10262 if "parallel-fetch" in features and \
10263 not ("--pretend" in self.myopts or \
10264 "--fetch-all-uri" in self.myopts or \
10265 "--fetchonly" in self.myopts):
10266 if "distlocks" not in features:
10267 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10268 portage.writemsg(red("!!!")+" parallel-fetching " + \
10269 "requires the distlocks feature enabled"+"\n",
10271 portage.writemsg(red("!!!")+" you have it disabled, " + \
10272 "thus parallel-fetching is being disabled"+"\n",
10274 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10275 elif len(mergelist) > 1:
10276 self._parallel_fetch = True
10278 if self._parallel_fetch:
10279 # clear out existing fetch log if it exists
10281 open(self._fetch_log, 'w')
10282 except EnvironmentError:
10285 self._running_portage = None
10286 portage_match = self._running_root.trees["vartree"].dbapi.match(
10287 portage.const.PORTAGE_PACKAGE_ATOM)
10289 cpv = portage_match.pop()
10290 self._running_portage = self._pkg(cpv, "installed",
10291 self._running_root, installed=True)
10293 def _poll(self, timeout=None):
10295 PollScheduler._poll(self, timeout=timeout)
10297 def _set_max_jobs(self, max_jobs):
10298 self._max_jobs = max_jobs
10299 self._task_queues.jobs.max_jobs = max_jobs
10301 def _background_mode(self):
10303 Check if background mode is enabled and adjust states as necessary.
10306 @returns: True if background mode is enabled, False otherwise.
10308 background = (self._max_jobs is True or \
10309 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10310 not bool(self._opts_no_background.intersection(self.myopts))
10313 interactive_tasks = self._get_interactive_tasks()
10314 if interactive_tasks:
10316 writemsg_level(">>> Sending package output to stdio due " + \
10317 "to interactive package(s):\n",
10318 level=logging.INFO, noiselevel=-1)
10320 for pkg in interactive_tasks:
10321 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10322 if pkg.root != "/":
10323 pkg_str += " for " + pkg.root
10324 msg.append(pkg_str)
10326 writemsg_level("".join("%s\n" % (l,) for l in msg),
10327 level=logging.INFO, noiselevel=-1)
10328 if self._max_jobs is True or self._max_jobs > 1:
10329 self._set_max_jobs(1)
10330 writemsg_level(">>> Setting --jobs=1 due " + \
10331 "to the above interactive package(s)\n",
10332 level=logging.INFO, noiselevel=-1)
10334 self._status_display.quiet = \
10335 not background or \
10336 ("--quiet" in self.myopts and \
10337 "--verbose" not in self.myopts)
10339 self._logger.xterm_titles = \
10340 "notitles" not in self.settings.features and \
10341 self._status_display.quiet
10345 def _get_interactive_tasks(self):
10346 from portage import flatten
10347 from portage.dep import use_reduce, paren_reduce
10348 interactive_tasks = []
10349 for task in self._mergelist:
10350 if not (isinstance(task, Package) and \
10351 task.operation == "merge"):
10354 properties = flatten(use_reduce(paren_reduce(
10355 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10356 except portage.exception.InvalidDependString, e:
10357 show_invalid_depstring_notice(task,
10358 task.metadata["PROPERTIES"], str(e))
10359 raise self._unknown_internal_error()
10360 if "interactive" in properties:
10361 interactive_tasks.append(task)
10362 return interactive_tasks
10364 def _set_digraph(self, digraph):
10365 if "--nodeps" in self.myopts or \
10366 (self._max_jobs is not True and self._max_jobs < 2):
10368 self._digraph = None
10371 self._digraph = digraph
10372 self._find_system_deps()
10373 self._prune_digraph()
10374 self._prevent_builddir_collisions()
10376 def _find_system_deps(self):
10378 Find system packages and their deep runtime dependencies. Before being
10379 merged, these packages go to merge_wait_queue, to be merged when no
10380 other packages are building.
10382 deep_system_deps = self._deep_system_deps
10383 deep_system_deps.clear()
10384 deep_system_deps.update(
10385 _find_deep_system_runtime_deps(self._digraph))
10386 deep_system_deps.difference_update([pkg for pkg in \
10387 deep_system_deps if pkg.operation != "merge"])
10389 def _prune_digraph(self):
10391 Prune any root nodes that are irrelevant.
10394 graph = self._digraph
10395 completed_tasks = self._completed_tasks
10396 removed_nodes = set()
10398 for node in graph.root_nodes():
10399 if not isinstance(node, Package) or \
10400 (node.installed and node.operation == "nomerge") or \
10402 node in completed_tasks:
10403 removed_nodes.add(node)
10405 graph.difference_update(removed_nodes)
10406 if not removed_nodes:
10408 removed_nodes.clear()
10410 def _prevent_builddir_collisions(self):
10412 When building stages, sometimes the same exact cpv needs to be merged
10413 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10414 in the builddir. Currently, normal file locks would be inappropriate
10415 for this purpose since emerge holds all of it's build dir locks from
10419 for pkg in self._mergelist:
10420 if not isinstance(pkg, Package):
10421 # a satisfied blocker
10425 if pkg.cpv not in cpv_map:
10426 cpv_map[pkg.cpv] = [pkg]
10428 for earlier_pkg in cpv_map[pkg.cpv]:
10429 self._digraph.add(earlier_pkg, pkg,
10430 priority=DepPriority(buildtime=True))
10431 cpv_map[pkg.cpv].append(pkg)
10433 class _pkg_failure(portage.exception.PortageException):
10435 An instance of this class is raised by unmerge() when
10436 an uninstallation fails.
10439 def __init__(self, *pargs):
10440 portage.exception.PortageException.__init__(self, pargs)
10442 self.status = pargs[0]
10444 def _schedule_fetch(self, fetcher):
10446 Schedule a fetcher on the fetch queue, in order to
10447 serialize access to the fetch log.
10449 self._task_queues.fetch.addFront(fetcher)
10451 def _schedule_setup(self, setup_phase):
10453 Schedule a setup phase on the merge queue, in order to
10454 serialize unsandboxed access to the live filesystem.
10456 self._task_queues.merge.addFront(setup_phase)
10459 def _schedule_unpack(self, unpack_phase):
10461 Schedule an unpack phase on the unpack queue, in order
10462 to serialize $DISTDIR access for live ebuilds.
10464 self._task_queues.unpack.add(unpack_phase)
10466 def _find_blockers(self, new_pkg):
10468 Returns a callable which should be called only when
10469 the vdb lock has been acquired.
10471 def get_blockers():
10472 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10473 return get_blockers
10475 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10476 if self._opts_ignore_blockers.intersection(self.myopts):
10479 # Call gc.collect() here to avoid heap overflow that
10480 # triggers 'Cannot allocate memory' errors (reported
10481 # with python-2.5).
10485 blocker_db = self._blocker_db[new_pkg.root]
10487 blocker_dblinks = []
10488 for blocking_pkg in blocker_db.findInstalledBlockers(
10489 new_pkg, acquire_lock=acquire_lock):
10490 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10492 if new_pkg.cpv == blocking_pkg.cpv:
10494 blocker_dblinks.append(portage.dblink(
10495 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10496 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10497 vartree=self.trees[blocking_pkg.root]["vartree"]))
10501 return blocker_dblinks
10503 def _dblink_pkg(self, pkg_dblink):
10504 cpv = pkg_dblink.mycpv
10505 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10506 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10507 installed = type_name == "installed"
10508 return self._pkg(cpv, type_name, root_config, installed=installed)
10510 def _append_to_log_path(self, log_path, msg):
10511 f = open(log_path, 'a')
10517 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10519 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10522 background = self._background
10524 if background and log_path is not None:
10525 log_file = open(log_path, 'a')
10530 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10532 if log_file is not None:
10535 def _dblink_emerge_log(self, msg):
10536 self._logger.log(msg)
10538 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10539 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10540 background = self._background
10542 if log_path is None:
10543 if not (background and level < logging.WARN):
10544 portage.util.writemsg_level(msg,
10545 level=level, noiselevel=noiselevel)
10548 portage.util.writemsg_level(msg,
10549 level=level, noiselevel=noiselevel)
10550 self._append_to_log_path(log_path, msg)
10552 def _dblink_ebuild_phase(self,
10553 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10555 Using this callback for merge phases allows the scheduler
10556 to run while these phases execute asynchronously, and allows
10557 the scheduler control output handling.
10560 scheduler = self._sched_iface
10561 settings = pkg_dblink.settings
10562 pkg = self._dblink_pkg(pkg_dblink)
10563 background = self._background
10564 log_path = settings.get("PORTAGE_LOG_FILE")
10566 ebuild_phase = EbuildPhase(background=background,
10567 pkg=pkg, phase=phase, scheduler=scheduler,
10568 settings=settings, tree=pkg_dblink.treetype)
10569 ebuild_phase.start()
10570 ebuild_phase.wait()
10572 return ebuild_phase.returncode
10574 def _generate_digests(self):
10576 Generate digests if necessary for --digests or FEATURES=digest.
10577 In order to avoid interference, this must done before parallel
10581 if '--fetchonly' in self.myopts:
10584 digest = '--digest' in self.myopts
10586 for pkgsettings in self.pkgsettings.itervalues():
10587 if 'digest' in pkgsettings.features:
10594 for x in self._mergelist:
10595 if not isinstance(x, Package) or \
10596 x.type_name != 'ebuild' or \
10597 x.operation != 'merge':
10599 pkgsettings = self.pkgsettings[x.root]
10600 if '--digest' not in self.myopts and \
10601 'digest' not in pkgsettings.features:
10603 portdb = x.root_config.trees['porttree'].dbapi
10604 ebuild_path = portdb.findname(x.cpv)
10605 if not ebuild_path:
10607 "!!! Could not locate ebuild for '%s'.\n" \
10608 % x.cpv, level=logging.ERROR, noiselevel=-1)
10610 pkgsettings['O'] = os.path.dirname(ebuild_path)
10611 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10613 "!!! Unable to generate manifest for '%s'.\n" \
10614 % x.cpv, level=logging.ERROR, noiselevel=-1)
10619 def _check_manifests(self):
10620 # Verify all the manifests now so that the user is notified of failure
10621 # as soon as possible.
10622 if "strict" not in self.settings.features or \
10623 "--fetchonly" in self.myopts or \
10624 "--fetch-all-uri" in self.myopts:
10627 shown_verifying_msg = False
10628 quiet_settings = {}
10629 for myroot, pkgsettings in self.pkgsettings.iteritems():
10630 quiet_config = portage.config(clone=pkgsettings)
10631 quiet_config["PORTAGE_QUIET"] = "1"
10632 quiet_config.backup_changes("PORTAGE_QUIET")
10633 quiet_settings[myroot] = quiet_config
10636 for x in self._mergelist:
10637 if not isinstance(x, Package) or \
10638 x.type_name != "ebuild":
10641 if not shown_verifying_msg:
10642 shown_verifying_msg = True
10643 self._status_msg("Verifying ebuild manifests")
10645 root_config = x.root_config
10646 portdb = root_config.trees["porttree"].dbapi
10647 quiet_config = quiet_settings[root_config.root]
10648 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10649 if not portage.digestcheck([], quiet_config, strict=True):
10654 def _add_prefetchers(self):
10656 if not self._parallel_fetch:
10659 if self._parallel_fetch:
10660 self._status_msg("Starting parallel fetch")
10662 prefetchers = self._prefetchers
10663 getbinpkg = "--getbinpkg" in self.myopts
10665 # In order to avoid "waiting for lock" messages
10666 # at the beginning, which annoy users, never
10667 # spawn a prefetcher for the first package.
10668 for pkg in self._mergelist[1:]:
10669 prefetcher = self._create_prefetcher(pkg)
10670 if prefetcher is not None:
10671 self._task_queues.fetch.add(prefetcher)
10672 prefetchers[pkg] = prefetcher
10674 def _create_prefetcher(self, pkg):
10676 @return: a prefetcher, or None if not applicable
10680 if not isinstance(pkg, Package):
10683 elif pkg.type_name == "ebuild":
10685 prefetcher = EbuildFetcher(background=True,
10686 config_pool=self._ConfigPool(pkg.root,
10687 self._allocate_config, self._deallocate_config),
10688 fetchonly=1, logfile=self._fetch_log,
10689 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10691 elif pkg.type_name == "binary" and \
10692 "--getbinpkg" in self.myopts and \
10693 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10695 prefetcher = BinpkgPrefetcher(background=True,
10696 pkg=pkg, scheduler=self._sched_iface)
10700 def _is_restart_scheduled(self):
10702 Check if the merge list contains a replacement
10703 for the current running instance, that will result
10704 in restart after merge.
10706 @returns: True if a restart is scheduled, False otherwise.
10708 if self._opts_no_restart.intersection(self.myopts):
10711 mergelist = self._mergelist
10713 for i, pkg in enumerate(mergelist):
10714 if self._is_restart_necessary(pkg) and \
10715 i != len(mergelist) - 1:
10720 def _is_restart_necessary(self, pkg):
10722 @return: True if merging the given package
10723 requires restart, False otherwise.
10726 # Figure out if we need a restart.
10727 if pkg.root == self._running_root.root and \
10728 portage.match_from_list(
10729 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10730 if self._running_portage:
10731 return pkg.cpv != self._running_portage.cpv
10735 def _restart_if_necessary(self, pkg):
10737 Use execv() to restart emerge. This happens
10738 if portage upgrades itself and there are
10739 remaining packages in the list.
10742 if self._opts_no_restart.intersection(self.myopts):
10745 if not self._is_restart_necessary(pkg):
10748 if pkg == self._mergelist[-1]:
10751 self._main_loop_cleanup()
10753 logger = self._logger
10754 pkg_count = self._pkg_count
10755 mtimedb = self._mtimedb
10756 bad_resume_opts = self._bad_resume_opts
10758 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10759 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10761 logger.log(" *** RESTARTING " + \
10762 "emerge via exec() after change of " + \
10763 "portage version.")
10765 mtimedb["resume"]["mergelist"].remove(list(pkg))
10767 portage.run_exitfuncs()
10768 mynewargv = [sys.argv[0], "--resume"]
10769 resume_opts = self.myopts.copy()
10770 # For automatic resume, we need to prevent
10771 # any of bad_resume_opts from leaking in
10772 # via EMERGE_DEFAULT_OPTS.
10773 resume_opts["--ignore-default-opts"] = True
10774 for myopt, myarg in resume_opts.iteritems():
10775 if myopt not in bad_resume_opts:
10777 mynewargv.append(myopt)
10779 mynewargv.append(myopt +"="+ str(myarg))
10780 # priority only needs to be adjusted on the first run
10781 os.environ["PORTAGE_NICENESS"] = "0"
10782 os.execv(mynewargv[0], mynewargv)
10786 if "--resume" in self.myopts:
10788 portage.writemsg_stdout(
10789 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10790 self._logger.log(" *** Resuming merge...")
10792 self._save_resume_list()
10795 self._background = self._background_mode()
10796 except self._unknown_internal_error:
10799 for root in self.trees:
10800 root_config = self.trees[root]["root_config"]
10802 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10803 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10804 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10805 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10806 if not tmpdir or not os.path.isdir(tmpdir):
10807 msg = "The directory specified in your " + \
10808 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10809 "does not exist. Please create this " + \
10810 "directory or correct your PORTAGE_TMPDIR setting."
10811 msg = textwrap.wrap(msg, 70)
10812 out = portage.output.EOutput()
10817 if self._background:
10818 root_config.settings.unlock()
10819 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10820 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10821 root_config.settings.lock()
10823 self.pkgsettings[root] = portage.config(
10824 clone=root_config.settings)
10826 rval = self._generate_digests()
10827 if rval != os.EX_OK:
10830 rval = self._check_manifests()
10831 if rval != os.EX_OK:
10834 keep_going = "--keep-going" in self.myopts
10835 fetchonly = self._build_opts.fetchonly
10836 mtimedb = self._mtimedb
10837 failed_pkgs = self._failed_pkgs
10840 rval = self._merge()
10841 if rval == os.EX_OK or fetchonly or not keep_going:
10843 if "resume" not in mtimedb:
10845 mergelist = self._mtimedb["resume"].get("mergelist")
10849 if not failed_pkgs:
10852 for failed_pkg in failed_pkgs:
10853 mergelist.remove(list(failed_pkg.pkg))
10855 self._failed_pkgs_all.extend(failed_pkgs)
10861 if not self._calc_resume_list():
10864 clear_caches(self.trees)
10865 if not self._mergelist:
10868 self._save_resume_list()
10869 self._pkg_count.curval = 0
10870 self._pkg_count.maxval = len([x for x in self._mergelist \
10871 if isinstance(x, Package) and x.operation == "merge"])
10872 self._status_display.maxval = self._pkg_count.maxval
10874 self._logger.log(" *** Finished. Cleaning up...")
10877 self._failed_pkgs_all.extend(failed_pkgs)
10880 background = self._background
10881 failure_log_shown = False
10882 if background and len(self._failed_pkgs_all) == 1:
10883 # If only one package failed then just show it's
10884 # whole log for easy viewing.
10885 failed_pkg = self._failed_pkgs_all[-1]
10886 build_dir = failed_pkg.build_dir
10889 log_paths = [failed_pkg.build_log]
10891 log_path = self._locate_failure_log(failed_pkg)
10892 if log_path is not None:
10894 log_file = open(log_path)
10898 if log_file is not None:
10900 for line in log_file:
10901 writemsg_level(line, noiselevel=-1)
10904 failure_log_shown = True
10906 # Dump mod_echo output now since it tends to flood the terminal.
10907 # This allows us to avoid having more important output, generated
10908 # later, from being swept away by the mod_echo output.
10909 mod_echo_output = _flush_elog_mod_echo()
10911 if background and not failure_log_shown and \
10912 self._failed_pkgs_all and \
10913 self._failed_pkgs_die_msgs and \
10914 not mod_echo_output:
10916 printer = portage.output.EOutput()
10917 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10919 if mysettings["ROOT"] != "/":
10920 root_msg = " merged to %s" % mysettings["ROOT"]
10922 printer.einfo("Error messages for package %s%s:" % \
10923 (colorize("INFORM", key), root_msg))
10925 for phase in portage.const.EBUILD_PHASES:
10926 if phase not in logentries:
10928 for msgtype, msgcontent in logentries[phase]:
10929 if isinstance(msgcontent, basestring):
10930 msgcontent = [msgcontent]
10931 for line in msgcontent:
10932 printer.eerror(line.strip("\n"))
10934 if self._post_mod_echo_msgs:
10935 for msg in self._post_mod_echo_msgs:
10938 if len(self._failed_pkgs_all) > 1 or \
10939 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10940 if len(self._failed_pkgs_all) > 1:
10941 msg = "The following %d packages have " % \
10942 len(self._failed_pkgs_all) + \
10943 "failed to build or install:"
10945 msg = "The following package has " + \
10946 "failed to build or install:"
10947 prefix = bad(" * ")
10948 writemsg(prefix + "\n", noiselevel=-1)
10949 from textwrap import wrap
10950 for line in wrap(msg, 72):
10951 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10952 writemsg(prefix + "\n", noiselevel=-1)
10953 for failed_pkg in self._failed_pkgs_all:
10954 writemsg("%s\t%s\n" % (prefix,
10955 colorize("INFORM", str(failed_pkg.pkg))),
10957 writemsg(prefix + "\n", noiselevel=-1)
10961 def _elog_listener(self, mysettings, key, logentries, fulltext):
10962 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10964 self._failed_pkgs_die_msgs.append(
10965 (mysettings, key, errors))
10967 def _locate_failure_log(self, failed_pkg):
10969 build_dir = failed_pkg.build_dir
10972 log_paths = [failed_pkg.build_log]
10974 for log_path in log_paths:
10979 log_size = os.stat(log_path).st_size
10990 def _add_packages(self):
10991 pkg_queue = self._pkg_queue
10992 for pkg in self._mergelist:
10993 if isinstance(pkg, Package):
10994 pkg_queue.append(pkg)
10995 elif isinstance(pkg, Blocker):
10998 def _system_merge_started(self, merge):
11000 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11002 graph = self._digraph
11005 pkg = merge.merge.pkg
11007 # Skip this if $ROOT != / since it shouldn't matter if there
11008 # are unsatisfied system runtime deps in this case.
11009 if pkg.root != '/':
11012 completed_tasks = self._completed_tasks
11013 unsatisfied = self._unsatisfied_system_deps
11015 def ignore_non_runtime_or_satisfied(priority):
11017 Ignore non-runtime and satisfied runtime priorities.
11019 if isinstance(priority, DepPriority) and \
11020 not priority.satisfied and \
11021 (priority.runtime or priority.runtime_post):
11025 # When checking for unsatisfied runtime deps, only check
11026 # direct deps since indirect deps are checked when the
11027 # corresponding parent is merged.
11028 for child in graph.child_nodes(pkg,
11029 ignore_priority=ignore_non_runtime_or_satisfied):
11030 if not isinstance(child, Package) or \
11031 child.operation == 'uninstall':
11035 if child.operation == 'merge' and \
11036 child not in completed_tasks:
11037 unsatisfied.add(child)
11039 def _merge_wait_exit_handler(self, task):
11040 self._merge_wait_scheduled.remove(task)
11041 self._merge_exit(task)
11043 def _merge_exit(self, merge):
11044 self._do_merge_exit(merge)
11045 self._deallocate_config(merge.merge.settings)
11046 if merge.returncode == os.EX_OK and \
11047 not merge.merge.pkg.installed:
11048 self._status_display.curval += 1
11049 self._status_display.merges = len(self._task_queues.merge)
11052 def _do_merge_exit(self, merge):
11053 pkg = merge.merge.pkg
11054 if merge.returncode != os.EX_OK:
11055 settings = merge.merge.settings
11056 build_dir = settings.get("PORTAGE_BUILDDIR")
11057 build_log = settings.get("PORTAGE_LOG_FILE")
11059 self._failed_pkgs.append(self._failed_pkg(
11060 build_dir=build_dir, build_log=build_log,
11062 returncode=merge.returncode))
11063 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11065 self._status_display.failed = len(self._failed_pkgs)
11068 self._task_complete(pkg)
11069 pkg_to_replace = merge.merge.pkg_to_replace
11070 if pkg_to_replace is not None:
11071 # When a package is replaced, mark it's uninstall
11072 # task complete (if any).
11073 uninst_hash_key = \
11074 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11075 self._task_complete(uninst_hash_key)
11080 self._restart_if_necessary(pkg)
11082 # Call mtimedb.commit() after each merge so that
11083 # --resume still works after being interrupted
11084 # by reboot, sigkill or similar.
11085 mtimedb = self._mtimedb
11086 mtimedb["resume"]["mergelist"].remove(list(pkg))
11087 if not mtimedb["resume"]["mergelist"]:
11088 del mtimedb["resume"]
11091 def _build_exit(self, build):
11092 if build.returncode == os.EX_OK:
11094 merge = PackageMerge(merge=build)
11095 if not build.build_opts.buildpkgonly and \
11096 build.pkg in self._deep_system_deps:
11097 # Since dependencies on system packages are frequently
11098 # unspecified, merge them only when no builds are executing.
11099 self._merge_wait_queue.append(merge)
11100 merge.addStartListener(self._system_merge_started)
11102 merge.addExitListener(self._merge_exit)
11103 self._task_queues.merge.add(merge)
11104 self._status_display.merges = len(self._task_queues.merge)
11106 settings = build.settings
11107 build_dir = settings.get("PORTAGE_BUILDDIR")
11108 build_log = settings.get("PORTAGE_LOG_FILE")
11110 self._failed_pkgs.append(self._failed_pkg(
11111 build_dir=build_dir, build_log=build_log,
11113 returncode=build.returncode))
11114 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11116 self._status_display.failed = len(self._failed_pkgs)
11117 self._deallocate_config(build.settings)
11119 self._status_display.running = self._jobs
11122 def _extract_exit(self, build):
11123 self._build_exit(build)
11125 def _task_complete(self, pkg):
11126 self._completed_tasks.add(pkg)
11127 self._unsatisfied_system_deps.discard(pkg)
11128 self._choose_pkg_return_early = False
11132 self._add_prefetchers()
11133 self._add_packages()
11134 pkg_queue = self._pkg_queue
11135 failed_pkgs = self._failed_pkgs
11136 portage.locks._quiet = self._background
11137 portage.elog._emerge_elog_listener = self._elog_listener
11143 self._main_loop_cleanup()
11144 portage.locks._quiet = False
11145 portage.elog._emerge_elog_listener = None
11147 rval = failed_pkgs[-1].returncode
11151 def _main_loop_cleanup(self):
11152 del self._pkg_queue[:]
11153 self._completed_tasks.clear()
11154 self._deep_system_deps.clear()
11155 self._unsatisfied_system_deps.clear()
11156 self._choose_pkg_return_early = False
11157 self._status_display.reset()
11158 self._digraph = None
11159 self._task_queues.fetch.clear()
11161 def _choose_pkg(self):
11163 Choose a task that has all it's dependencies satisfied.
11166 if self._choose_pkg_return_early:
11169 if self._digraph is None:
11170 if (self._jobs or self._task_queues.merge) and \
11171 not ("--nodeps" in self.myopts and \
11172 (self._max_jobs is True or self._max_jobs > 1)):
11173 self._choose_pkg_return_early = True
11175 return self._pkg_queue.pop(0)
11177 if not (self._jobs or self._task_queues.merge):
11178 return self._pkg_queue.pop(0)
11180 self._prune_digraph()
11183 later = set(self._pkg_queue)
11184 for pkg in self._pkg_queue:
11186 if not self._dependent_on_scheduled_merges(pkg, later):
11190 if chosen_pkg is not None:
11191 self._pkg_queue.remove(chosen_pkg)
11193 if chosen_pkg is None:
11194 # There's no point in searching for a package to
11195 # choose until at least one of the existing jobs
11197 self._choose_pkg_return_early = True
11201 def _dependent_on_scheduled_merges(self, pkg, later):
11203 Traverse the subgraph of the given packages deep dependencies
11204 to see if it contains any scheduled merges.
11205 @param pkg: a package to check dependencies for
11207 @param later: packages for which dependence should be ignored
11208 since they will be merged later than pkg anyway and therefore
11209 delaying the merge of pkg will not result in a more optimal
11213 @returns: True if the package is dependent, False otherwise.
11216 graph = self._digraph
11217 completed_tasks = self._completed_tasks
11220 traversed_nodes = set([pkg])
11221 direct_deps = graph.child_nodes(pkg)
11222 node_stack = direct_deps
11223 direct_deps = frozenset(direct_deps)
11225 node = node_stack.pop()
11226 if node in traversed_nodes:
11228 traversed_nodes.add(node)
11229 if not ((node.installed and node.operation == "nomerge") or \
11230 (node.operation == "uninstall" and \
11231 node not in direct_deps) or \
11232 node in completed_tasks or \
11236 node_stack.extend(graph.child_nodes(node))
11240 def _allocate_config(self, root):
11242 Allocate a unique config instance for a task in order
11243 to prevent interference between parallel tasks.
11245 if self._config_pool[root]:
11246 temp_settings = self._config_pool[root].pop()
11248 temp_settings = portage.config(clone=self.pkgsettings[root])
11249 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11250 # performance reasons, call it here to make sure all settings from the
11251 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11252 temp_settings.reload()
11253 temp_settings.reset()
11254 return temp_settings
11256 def _deallocate_config(self, settings):
11257 self._config_pool[settings["ROOT"]].append(settings)
11259 def _main_loop(self):
11261 # Only allow 1 job max if a restart is scheduled
11262 # due to portage update.
11263 if self._is_restart_scheduled() or \
11264 self._opts_no_background.intersection(self.myopts):
11265 self._set_max_jobs(1)
11267 merge_queue = self._task_queues.merge
11269 while self._schedule():
11270 if self._poll_event_handlers:
11275 if not (self._jobs or merge_queue):
11277 if self._poll_event_handlers:
11280 def _keep_scheduling(self):
11281 return bool(self._pkg_queue and \
11282 not (self._failed_pkgs and not self._build_opts.fetchonly))
11284 def _schedule_tasks(self):
11286 # When the number of jobs drops to zero, process all waiting merges.
11287 if not self._jobs and self._merge_wait_queue:
11288 for task in self._merge_wait_queue:
11289 task.addExitListener(self._merge_wait_exit_handler)
11290 self._task_queues.merge.add(task)
11291 self._status_display.merges = len(self._task_queues.merge)
11292 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11293 del self._merge_wait_queue[:]
11295 self._schedule_tasks_imp()
11296 self._status_display.display()
11299 for q in self._task_queues.values():
11303 # Cancel prefetchers if they're the only reason
11304 # the main poll loop is still running.
11305 if self._failed_pkgs and not self._build_opts.fetchonly and \
11306 not (self._jobs or self._task_queues.merge) and \
11307 self._task_queues.fetch:
11308 self._task_queues.fetch.clear()
11312 self._schedule_tasks_imp()
11313 self._status_display.display()
11315 return self._keep_scheduling()
11317 def _job_delay(self):
11320 @returns: True if job scheduling should be delayed, False otherwise.
11323 if self._jobs and self._max_load is not None:
11325 current_time = time.time()
11327 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11328 if delay > self._job_delay_max:
11329 delay = self._job_delay_max
11330 if (current_time - self._previous_job_start_time) < delay:
11335 def _schedule_tasks_imp(self):
11338 @returns: True if state changed, False otherwise.
11345 if not self._keep_scheduling():
11346 return bool(state_change)
11348 if self._choose_pkg_return_early or \
11349 self._merge_wait_scheduled or \
11350 (self._jobs and self._unsatisfied_system_deps) or \
11351 not self._can_add_job() or \
11353 return bool(state_change)
11355 pkg = self._choose_pkg()
11357 return bool(state_change)
11361 if not pkg.installed:
11362 self._pkg_count.curval += 1
11364 task = self._task(pkg)
11367 merge = PackageMerge(merge=task)
11368 merge.addExitListener(self._merge_exit)
11369 self._task_queues.merge.add(merge)
11373 self._previous_job_start_time = time.time()
11374 self._status_display.running = self._jobs
11375 task.addExitListener(self._extract_exit)
11376 self._task_queues.jobs.add(task)
11380 self._previous_job_start_time = time.time()
11381 self._status_display.running = self._jobs
11382 task.addExitListener(self._build_exit)
11383 self._task_queues.jobs.add(task)
11385 return bool(state_change)
11387 def _task(self, pkg):
11389 pkg_to_replace = None
11390 if pkg.operation != "uninstall":
11391 vardb = pkg.root_config.trees["vartree"].dbapi
11392 previous_cpv = vardb.match(pkg.slot_atom)
11394 previous_cpv = previous_cpv.pop()
11395 pkg_to_replace = self._pkg(previous_cpv,
11396 "installed", pkg.root_config, installed=True)
11398 task = MergeListItem(args_set=self._args_set,
11399 background=self._background, binpkg_opts=self._binpkg_opts,
11400 build_opts=self._build_opts,
11401 config_pool=self._ConfigPool(pkg.root,
11402 self._allocate_config, self._deallocate_config),
11403 emerge_opts=self.myopts,
11404 find_blockers=self._find_blockers(pkg), logger=self._logger,
11405 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11406 pkg_to_replace=pkg_to_replace,
11407 prefetcher=self._prefetchers.get(pkg),
11408 scheduler=self._sched_iface,
11409 settings=self._allocate_config(pkg.root),
11410 statusMessage=self._status_msg,
11411 world_atom=self._world_atom)
11415 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11416 pkg = failed_pkg.pkg
11417 msg = "%s to %s %s" % \
11418 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11419 if pkg.root != "/":
11420 msg += " %s %s" % (preposition, pkg.root)
11422 log_path = self._locate_failure_log(failed_pkg)
11423 if log_path is not None:
11424 msg += ", Log file:"
11425 self._status_msg(msg)
11427 if log_path is not None:
11428 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11430 def _status_msg(self, msg):
11432 Display a brief status message (no newlines) in the status display.
11433 This is called by tasks to provide feedback to the user. This
11434 delegates the resposibility of generating \r and \n control characters,
11435 to guarantee that lines are created or erased when necessary and
11439 @param msg: a brief status message (no newlines allowed)
11441 if not self._background:
11442 writemsg_level("\n")
11443 self._status_display.displayMessage(msg)
11445 def _save_resume_list(self):
11447 Do this before verifying the ebuild Manifests since it might
11448 be possible for the user to use --resume --skipfirst get past
11449 a non-essential package with a broken digest.
11451 mtimedb = self._mtimedb
11452 mtimedb["resume"]["mergelist"] = [list(x) \
11453 for x in self._mergelist \
11454 if isinstance(x, Package) and x.operation == "merge"]
11458 def _calc_resume_list(self):
11460 Use the current resume list to calculate a new one,
11461 dropping any packages with unsatisfied deps.
11463 @returns: True if successful, False otherwise.
11465 print colorize("GOOD", "*** Resuming merge...")
11467 if self._show_list():
11468 if "--tree" in self.myopts:
11469 portage.writemsg_stdout("\n" + \
11470 darkgreen("These are the packages that " + \
11471 "would be merged, in reverse order:\n\n"))
11474 portage.writemsg_stdout("\n" + \
11475 darkgreen("These are the packages that " + \
11476 "would be merged, in order:\n\n"))
11478 show_spinner = "--quiet" not in self.myopts and \
11479 "--nodeps" not in self.myopts
11482 print "Calculating dependencies ",
11484 myparams = create_depgraph_params(self.myopts, None)
11488 success, mydepgraph, dropped_tasks = resume_depgraph(
11489 self.settings, self.trees, self._mtimedb, self.myopts,
11490 myparams, self._spinner)
11491 except depgraph.UnsatisfiedResumeDep, exc:
11492 # rename variable to avoid python-3.0 error:
11493 # SyntaxError: can not delete variable 'e' referenced in nested
11496 mydepgraph = e.depgraph
11497 dropped_tasks = set()
11500 print "\b\b... done!"
11503 def unsatisfied_resume_dep_msg():
11504 mydepgraph.display_problems()
11505 out = portage.output.EOutput()
11506 out.eerror("One or more packages are either masked or " + \
11507 "have missing dependencies:")
11510 show_parents = set()
11511 for dep in e.value:
11512 if dep.parent in show_parents:
11514 show_parents.add(dep.parent)
11515 if dep.atom is None:
11516 out.eerror(indent + "Masked package:")
11517 out.eerror(2 * indent + str(dep.parent))
11520 out.eerror(indent + str(dep.atom) + " pulled in by:")
11521 out.eerror(2 * indent + str(dep.parent))
11523 msg = "The resume list contains packages " + \
11524 "that are either masked or have " + \
11525 "unsatisfied dependencies. " + \
11526 "Please restart/continue " + \
11527 "the operation manually, or use --skipfirst " + \
11528 "to skip the first package in the list and " + \
11529 "any other packages that may be " + \
11530 "masked or have missing dependencies."
11531 for line in textwrap.wrap(msg, 72):
11533 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11536 if success and self._show_list():
11537 mylist = mydepgraph.altlist()
11539 if "--tree" in self.myopts:
11541 mydepgraph.display(mylist, favorites=self._favorites)
11544 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11546 mydepgraph.display_problems()
11548 mylist = mydepgraph.altlist()
11549 mydepgraph.break_refs(mylist)
11550 mydepgraph.break_refs(dropped_tasks)
11551 self._mergelist = mylist
11552 self._set_digraph(mydepgraph.schedulerGraph())
11555 for task in dropped_tasks:
11556 if not (isinstance(task, Package) and task.operation == "merge"):
11559 msg = "emerge --keep-going:" + \
11561 if pkg.root != "/":
11562 msg += " for %s" % (pkg.root,)
11563 msg += " dropped due to unsatisfied dependency."
11564 for line in textwrap.wrap(msg, msg_width):
11565 eerror(line, phase="other", key=pkg.cpv)
11566 settings = self.pkgsettings[pkg.root]
11567 # Ensure that log collection from $T is disabled inside
11568 # elog_process(), since any logs that might exist are
11570 settings.pop("T", None)
11571 portage.elog.elog_process(pkg.cpv, settings)
11572 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11576 def _show_list(self):
11577 myopts = self.myopts
11578 if "--quiet" not in myopts and \
11579 ("--ask" in myopts or "--tree" in myopts or \
11580 "--verbose" in myopts):
11584 def _world_atom(self, pkg):
11586 Add the package to the world file, but only if
11587 it's supposed to be added. Otherwise, do nothing.
11590 if set(("--buildpkgonly", "--fetchonly",
11592 "--oneshot", "--onlydeps",
11593 "--pretend")).intersection(self.myopts):
11596 if pkg.root != self.target_root:
11599 args_set = self._args_set
11600 if not args_set.findAtomForPackage(pkg):
11603 logger = self._logger
11604 pkg_count = self._pkg_count
11605 root_config = pkg.root_config
11606 world_set = root_config.sets["world"]
11607 world_locked = False
11608 if hasattr(world_set, "lock"):
11610 world_locked = True
11613 if hasattr(world_set, "load"):
11614 world_set.load() # maybe it's changed on disk
11616 atom = create_world_atom(pkg, args_set, root_config)
11618 if hasattr(world_set, "add"):
11619 self._status_msg(('Recording %s in "world" ' + \
11620 'favorites file...') % atom)
11621 logger.log(" === (%s of %s) Updating world file (%s)" % \
11622 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11623 world_set.add(atom)
11625 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11626 (atom,), level=logging.WARN, noiselevel=-1)
11631 def _pkg(self, cpv, type_name, root_config, installed=False):
11633 Get a package instance from the cache, or create a new
11634 one if necessary. Raises KeyError from aux_get if it
11635 failures for some reason (package does not exist or is
11638 operation = "merge"
11640 operation = "nomerge"
11642 if self._digraph is not None:
11643 # Reuse existing instance when available.
11644 pkg = self._digraph.get(
11645 (type_name, root_config.root, cpv, operation))
11646 if pkg is not None:
11649 tree_type = depgraph.pkg_tree_map[type_name]
11650 db = root_config.trees[tree_type].dbapi
11651 db_keys = list(self.trees[root_config.root][
11652 tree_type].dbapi._aux_cache_keys)
11653 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11654 pkg = Package(cpv=cpv, metadata=metadata,
11655 root_config=root_config, installed=installed)
11656 if type_name == "ebuild":
11657 settings = self.pkgsettings[root_config.root]
11658 settings.setcpv(pkg)
11659 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11660 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11664 class MetadataRegen(PollScheduler):
11666 def __init__(self, portdb, cp_iter=None, consumer=None,
11667 max_jobs=None, max_load=None):
11668 PollScheduler.__init__(self)
11669 self._portdb = portdb
11670 self._global_cleanse = False
11671 if cp_iter is None:
11672 cp_iter = self._iter_every_cp()
11673 # We can globally cleanse stale cache only if we
11674 # iterate over every single cp.
11675 self._global_cleanse = True
11676 self._cp_iter = cp_iter
11677 self._consumer = consumer
11679 if max_jobs is None:
11682 self._max_jobs = max_jobs
11683 self._max_load = max_load
11684 self._sched_iface = self._sched_iface_class(
11685 register=self._register,
11686 schedule=self._schedule_wait,
11687 unregister=self._unregister)
11689 self._valid_pkgs = set()
11690 self._cp_set = set()
11691 self._process_iter = self._iter_metadata_processes()
11692 self.returncode = os.EX_OK
11693 self._error_count = 0
11695 def _iter_every_cp(self):
11696 every_cp = self._portdb.cp_all()
11697 every_cp.sort(reverse=True)
11700 yield every_cp.pop()
11704 def _iter_metadata_processes(self):
11705 portdb = self._portdb
11706 valid_pkgs = self._valid_pkgs
11707 cp_set = self._cp_set
11708 consumer = self._consumer
11710 for cp in self._cp_iter:
11712 portage.writemsg_stdout("Processing %s\n" % cp)
11713 cpv_list = portdb.cp_list(cp)
11714 for cpv in cpv_list:
11715 valid_pkgs.add(cpv)
11716 ebuild_path, repo_path = portdb.findname2(cpv)
11717 metadata, st, emtime = portdb._pull_valid_cache(
11718 cpv, ebuild_path, repo_path)
11719 if metadata is not None:
11720 if consumer is not None:
11721 consumer(cpv, ebuild_path,
11722 repo_path, metadata)
11725 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11726 ebuild_mtime=emtime,
11727 metadata_callback=portdb._metadata_callback,
11728 portdb=portdb, repo_path=repo_path,
11729 settings=portdb.doebuild_settings)
11733 portdb = self._portdb
11734 from portage.cache.cache_errors import CacheError
11737 while self._schedule():
11743 if self._global_cleanse:
11744 for mytree in portdb.porttrees:
11746 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11747 except CacheError, e:
11748 portage.writemsg("Error listing cache entries for " + \
11749 "'%s': %s, continuing...\n" % (mytree, e),
11755 cp_set = self._cp_set
11756 cpv_getkey = portage.cpv_getkey
11757 for mytree in portdb.porttrees:
11759 dead_nodes[mytree] = set(cpv for cpv in \
11760 portdb.auxdb[mytree].iterkeys() \
11761 if cpv_getkey(cpv) in cp_set)
11762 except CacheError, e:
11763 portage.writemsg("Error listing cache entries for " + \
11764 "'%s': %s, continuing...\n" % (mytree, e),
11771 for y in self._valid_pkgs:
11772 for mytree in portdb.porttrees:
11773 if portdb.findname2(y, mytree=mytree)[0]:
11774 dead_nodes[mytree].discard(y)
11776 for mytree, nodes in dead_nodes.iteritems():
11777 auxdb = portdb.auxdb[mytree]
11781 except (KeyError, CacheError):
11784 def _schedule_tasks(self):
11787 @returns: True if there may be remaining tasks to schedule,
11790 while self._can_add_job():
11792 metadata_process = self._process_iter.next()
11793 except StopIteration:
11797 metadata_process.scheduler = self._sched_iface
11798 metadata_process.addExitListener(self._metadata_exit)
11799 metadata_process.start()
11802 def _metadata_exit(self, metadata_process):
11804 if metadata_process.returncode != os.EX_OK:
11805 self.returncode = 1
11806 self._error_count += 1
11807 self._valid_pkgs.discard(metadata_process.cpv)
11808 portage.writemsg("Error processing %s, continuing...\n" % \
11809 (metadata_process.cpv,), noiselevel=-1)
11811 if self._consumer is not None:
11812 # On failure, still notify the consumer (in this case the metadata
11813 # argument is None).
11814 self._consumer(metadata_process.cpv,
11815 metadata_process.ebuild_path,
11816 metadata_process.repo_path,
11817 metadata_process.metadata)
11821 class UninstallFailure(portage.exception.PortageException):
11823 An instance of this class is raised by unmerge() when
11824 an uninstallation fails.
11827 def __init__(self, *pargs):
11828 portage.exception.PortageException.__init__(self, pargs)
11830 self.status = pargs[0]
11832 def unmerge(root_config, myopts, unmerge_action,
11833 unmerge_files, ldpath_mtimes, autoclean=0,
11834 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11835 scheduler=None, writemsg_level=portage.util.writemsg_level):
11837 quiet = "--quiet" in myopts
11838 settings = root_config.settings
11839 sets = root_config.sets
11840 vartree = root_config.trees["vartree"]
11841 candidate_catpkgs=[]
11843 xterm_titles = "notitles" not in settings.features
11844 out = portage.output.EOutput()
11846 db_keys = list(vartree.dbapi._aux_cache_keys)
11849 pkg = pkg_cache.get(cpv)
11851 pkg = Package(cpv=cpv, installed=True,
11852 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11853 root_config=root_config,
11854 type_name="installed")
11855 pkg_cache[cpv] = pkg
11858 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11860 # At least the parent needs to exist for the lock file.
11861 portage.util.ensure_dirs(vdb_path)
11862 except portage.exception.PortageException:
11866 if os.access(vdb_path, os.W_OK):
11867 vdb_lock = portage.locks.lockdir(vdb_path)
11868 realsyslist = sets["system"].getAtoms()
11870 for x in realsyslist:
11871 mycp = portage.dep_getkey(x)
11872 if mycp in settings.getvirtuals():
11874 for provider in settings.getvirtuals()[mycp]:
11875 if vartree.dbapi.match(provider):
11876 providers.append(provider)
11877 if len(providers) == 1:
11878 syslist.extend(providers)
11880 syslist.append(mycp)
11882 mysettings = portage.config(clone=settings)
11884 if not unmerge_files:
11885 if unmerge_action == "unmerge":
11887 print bold("emerge unmerge") + " can only be used with specific package names"
11893 localtree = vartree
11894 # process all arguments and add all
11895 # valid db entries to candidate_catpkgs
11897 if not unmerge_files:
11898 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11900 #we've got command-line arguments
11901 if not unmerge_files:
11902 print "\nNo packages to unmerge have been provided.\n"
11904 for x in unmerge_files:
11905 arg_parts = x.split('/')
11906 if x[0] not in [".","/"] and \
11907 arg_parts[-1][-7:] != ".ebuild":
11908 #possible cat/pkg or dep; treat as such
11909 candidate_catpkgs.append(x)
11910 elif unmerge_action in ["prune","clean"]:
11911 print "\n!!! Prune and clean do not accept individual" + \
11912 " ebuilds as arguments;\n skipping.\n"
11915 # it appears that the user is specifying an installed
11916 # ebuild and we're in "unmerge" mode, so it's ok.
11917 if not os.path.exists(x):
11918 print "\n!!! The path '"+x+"' doesn't exist.\n"
11921 absx = os.path.abspath(x)
11922 sp_absx = absx.split("/")
11923 if sp_absx[-1][-7:] == ".ebuild":
11925 absx = "/".join(sp_absx)
11927 sp_absx_len = len(sp_absx)
11929 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11930 vdb_len = len(vdb_path)
11932 sp_vdb = vdb_path.split("/")
11933 sp_vdb_len = len(sp_vdb)
11935 if not os.path.exists(absx+"/CONTENTS"):
11936 print "!!! Not a valid db dir: "+str(absx)
11939 if sp_absx_len <= sp_vdb_len:
11940 # The Path is shorter... so it can't be inside the vdb.
11943 print "\n!!!",x,"cannot be inside "+ \
11944 vdb_path+"; aborting.\n"
11947 for idx in range(0,sp_vdb_len):
11948 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11951 print "\n!!!", x, "is not inside "+\
11952 vdb_path+"; aborting.\n"
11955 print "="+"/".join(sp_absx[sp_vdb_len:])
11956 candidate_catpkgs.append(
11957 "="+"/".join(sp_absx[sp_vdb_len:]))
11960 if (not "--quiet" in myopts):
11962 if settings["ROOT"] != "/":
11963 writemsg_level(darkgreen(newline+ \
11964 ">>> Using system located in ROOT tree %s\n" % \
11967 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11968 not ("--quiet" in myopts):
11969 writemsg_level(darkgreen(newline+\
11970 ">>> These are the packages that would be unmerged:\n"))
11972 # Preservation of order is required for --depclean and --prune so
11973 # that dependencies are respected. Use all_selected to eliminate
11974 # duplicate packages since the same package may be selected by
11977 all_selected = set()
11978 for x in candidate_catpkgs:
11979 # cycle through all our candidate deps and determine
11980 # what will and will not get unmerged
11982 mymatch = vartree.dbapi.match(x)
11983 except portage.exception.AmbiguousPackageName, errpkgs:
11984 print "\n\n!!! The short ebuild name \"" + \
11985 x + "\" is ambiguous. Please specify"
11986 print "!!! one of the following fully-qualified " + \
11987 "ebuild names instead:\n"
11988 for i in errpkgs[0]:
11989 print " " + green(i)
11993 if not mymatch and x[0] not in "<>=~":
11994 mymatch = localtree.dep_match(x)
11996 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11997 (x, unmerge_action), noiselevel=-1)
12001 {"protected": set(), "selected": set(), "omitted": set()})
12002 mykey = len(pkgmap) - 1
12003 if unmerge_action=="unmerge":
12005 if y not in all_selected:
12006 pkgmap[mykey]["selected"].add(y)
12007 all_selected.add(y)
12008 elif unmerge_action == "prune":
12009 if len(mymatch) == 1:
12011 best_version = mymatch[0]
12012 best_slot = vartree.getslot(best_version)
12013 best_counter = vartree.dbapi.cpv_counter(best_version)
12014 for mypkg in mymatch[1:]:
12015 myslot = vartree.getslot(mypkg)
12016 mycounter = vartree.dbapi.cpv_counter(mypkg)
12017 if (myslot == best_slot and mycounter > best_counter) or \
12018 mypkg == portage.best([mypkg, best_version]):
12019 if myslot == best_slot:
12020 if mycounter < best_counter:
12021 # On slot collision, keep the one with the
12022 # highest counter since it is the most
12023 # recently installed.
12025 best_version = mypkg
12027 best_counter = mycounter
12028 pkgmap[mykey]["protected"].add(best_version)
12029 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12030 if mypkg != best_version and mypkg not in all_selected)
12031 all_selected.update(pkgmap[mykey]["selected"])
12033 # unmerge_action == "clean"
12035 for mypkg in mymatch:
12036 if unmerge_action == "clean":
12037 myslot = localtree.getslot(mypkg)
12039 # since we're pruning, we don't care about slots
12040 # and put all the pkgs in together
12042 if myslot not in slotmap:
12043 slotmap[myslot] = {}
12044 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12046 for mypkg in vartree.dbapi.cp_list(
12047 portage.dep_getkey(mymatch[0])):
12048 myslot = vartree.getslot(mypkg)
12049 if myslot not in slotmap:
12050 slotmap[myslot] = {}
12051 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12053 for myslot in slotmap:
12054 counterkeys = slotmap[myslot].keys()
12055 if not counterkeys:
12058 pkgmap[mykey]["protected"].add(
12059 slotmap[myslot][counterkeys[-1]])
12060 del counterkeys[-1]
12062 for counter in counterkeys[:]:
12063 mypkg = slotmap[myslot][counter]
12064 if mypkg not in mymatch:
12065 counterkeys.remove(counter)
12066 pkgmap[mykey]["protected"].add(
12067 slotmap[myslot][counter])
12069 #be pretty and get them in order of merge:
12070 for ckey in counterkeys:
12071 mypkg = slotmap[myslot][ckey]
12072 if mypkg not in all_selected:
12073 pkgmap[mykey]["selected"].add(mypkg)
12074 all_selected.add(mypkg)
12075 # ok, now the last-merged package
12076 # is protected, and the rest are selected
12077 numselected = len(all_selected)
12078 if global_unmerge and not numselected:
12079 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12082 if not numselected:
12083 portage.writemsg_stdout(
12084 "\n>>> No packages selected for removal by " + \
12085 unmerge_action + "\n")
12089 vartree.dbapi.flush_cache()
12090 portage.locks.unlockdir(vdb_lock)
12092 from portage.sets.base import EditablePackageSet
12094 # generate a list of package sets that are directly or indirectly listed in "world",
12095 # as there is no persistent list of "installed" sets
12096 installed_sets = ["world"]
12101 pos = len(installed_sets)
12102 for s in installed_sets[pos - 1:]:
12105 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12108 installed_sets += candidates
12109 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12112 # we don't want to unmerge packages that are still listed in user-editable package sets
12113 # listed in "world" as they would be remerged on the next update of "world" or the
12114 # relevant package sets.
12115 unknown_sets = set()
12116 for cp in xrange(len(pkgmap)):
12117 for cpv in pkgmap[cp]["selected"].copy():
12121 # It could have been uninstalled
12122 # by a concurrent process.
12125 if unmerge_action != "clean" and \
12126 root_config.root == "/" and \
12127 portage.match_from_list(
12128 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12129 msg = ("Not unmerging package %s since there is no valid " + \
12130 "reason for portage to unmerge itself.") % (pkg.cpv,)
12131 for line in textwrap.wrap(msg, 75):
12133 # adjust pkgmap so the display output is correct
12134 pkgmap[cp]["selected"].remove(cpv)
12135 all_selected.remove(cpv)
12136 pkgmap[cp]["protected"].add(cpv)
12140 for s in installed_sets:
12141 # skip sets that the user requested to unmerge, and skip world
12142 # unless we're unmerging a package set (as the package would be
12143 # removed from "world" later on)
12144 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12148 if s in unknown_sets:
12150 unknown_sets.add(s)
12151 out = portage.output.EOutput()
12152 out.eerror(("Unknown set '@%s' in " + \
12153 "%svar/lib/portage/world_sets") % \
12154 (s, root_config.root))
12157 # only check instances of EditablePackageSet as other classes are generally used for
12158 # special purposes and can be ignored here (and are usually generated dynamically, so the
12159 # user can't do much about them anyway)
12160 if isinstance(sets[s], EditablePackageSet):
12162 # This is derived from a snippet of code in the
12163 # depgraph._iter_atoms_for_pkg() method.
12164 for atom in sets[s].iterAtomsForPackage(pkg):
12165 inst_matches = vartree.dbapi.match(atom)
12166 inst_matches.reverse() # descending order
12168 for inst_cpv in inst_matches:
12170 inst_pkg = _pkg(inst_cpv)
12172 # It could have been uninstalled
12173 # by a concurrent process.
12176 if inst_pkg.cp != atom.cp:
12178 if pkg >= inst_pkg:
12179 # This is descending order, and we're not
12180 # interested in any versions <= pkg given.
12182 if pkg.slot_atom != inst_pkg.slot_atom:
12183 higher_slot = inst_pkg
12185 if higher_slot is None:
12189 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12190 #print colorize("WARN", "but still listed in the following package sets:")
12191 #print " %s\n" % ", ".join(parents)
12192 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12193 print colorize("WARN", "still referenced by the following package sets:")
12194 print " %s\n" % ", ".join(parents)
12195 # adjust pkgmap so the display output is correct
12196 pkgmap[cp]["selected"].remove(cpv)
12197 all_selected.remove(cpv)
12198 pkgmap[cp]["protected"].add(cpv)
12202 numselected = len(all_selected)
12203 if not numselected:
12205 "\n>>> No packages selected for removal by " + \
12206 unmerge_action + "\n")
12209 # Unmerge order only matters in some cases
12213 selected = d["selected"]
12216 cp = portage.cpv_getkey(iter(selected).next())
12217 cp_dict = unordered.get(cp)
12218 if cp_dict is None:
12220 unordered[cp] = cp_dict
12223 for k, v in d.iteritems():
12224 cp_dict[k].update(v)
12225 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12227 for x in xrange(len(pkgmap)):
12228 selected = pkgmap[x]["selected"]
12231 for mytype, mylist in pkgmap[x].iteritems():
12232 if mytype == "selected":
12234 mylist.difference_update(all_selected)
12235 cp = portage.cpv_getkey(iter(selected).next())
12236 for y in localtree.dep_match(cp):
12237 if y not in pkgmap[x]["omitted"] and \
12238 y not in pkgmap[x]["selected"] and \
12239 y not in pkgmap[x]["protected"] and \
12240 y not in all_selected:
12241 pkgmap[x]["omitted"].add(y)
12242 if global_unmerge and not pkgmap[x]["selected"]:
12243 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12245 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12246 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12247 "'%s' is part of your system profile.\n" % cp),
12248 level=logging.WARNING, noiselevel=-1)
12249 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12250 "be damaging to your system.\n\n"),
12251 level=logging.WARNING, noiselevel=-1)
12252 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12253 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12254 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12256 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12258 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12259 for mytype in ["selected","protected","omitted"]:
12261 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12262 if pkgmap[x][mytype]:
12263 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12264 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12265 for pn, ver, rev in sorted_pkgs:
12269 myversion = ver + "-" + rev
12270 if mytype == "selected":
12272 colorize("UNMERGE_WARN", myversion + " "),
12276 colorize("GOOD", myversion + " "), noiselevel=-1)
12278 writemsg_level("none ", noiselevel=-1)
12280 writemsg_level("\n", noiselevel=-1)
12282 writemsg_level("\n", noiselevel=-1)
12284 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12285 " packages are slated for removal.\n")
12286 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12287 " and " + colorize("GOOD", "'omitted'") + \
12288 " packages will not be removed.\n\n")
12290 if "--pretend" in myopts:
12291 #we're done... return
12293 if "--ask" in myopts:
12294 if userquery("Would you like to unmerge these packages?")=="No":
12295 # enter pretend mode for correct formatting of results
12296 myopts["--pretend"] = True
12301 #the real unmerging begins, after a short delay....
12302 if clean_delay and not autoclean:
12303 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12305 for x in xrange(len(pkgmap)):
12306 for y in pkgmap[x]["selected"]:
12307 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12308 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12309 mysplit = y.split("/")
12311 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12312 mysettings, unmerge_action not in ["clean","prune"],
12313 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12314 scheduler=scheduler)
12316 if retval != os.EX_OK:
12317 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12319 raise UninstallFailure(retval)
12322 if clean_world and hasattr(sets["world"], "cleanPackage"):
12323 sets["world"].cleanPackage(vartree.dbapi, y)
12324 emergelog(xterm_titles, " >>> unmerge success: "+y)
12325 if clean_world and hasattr(sets["world"], "remove"):
12326 for s in root_config.setconfig.active:
12327 sets["world"].remove(SETPREFIX+s)
12330 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12332 if os.path.exists("/usr/bin/install-info"):
12333 out = portage.output.EOutput()
12338 inforoot=normpath(root+z)
12339 if os.path.isdir(inforoot):
12340 infomtime = long(os.stat(inforoot).st_mtime)
12341 if inforoot not in prev_mtimes or \
12342 prev_mtimes[inforoot] != infomtime:
12343 regen_infodirs.append(inforoot)
12345 if not regen_infodirs:
12346 portage.writemsg_stdout("\n")
12347 out.einfo("GNU info directory index is up-to-date.")
12349 portage.writemsg_stdout("\n")
12350 out.einfo("Regenerating GNU info directory index...")
12352 dir_extensions = ("", ".gz", ".bz2")
12356 for inforoot in regen_infodirs:
12360 if not os.path.isdir(inforoot) or \
12361 not os.access(inforoot, os.W_OK):
12364 file_list = os.listdir(inforoot)
12366 dir_file = os.path.join(inforoot, "dir")
12367 moved_old_dir = False
12368 processed_count = 0
12369 for x in file_list:
12370 if x.startswith(".") or \
12371 os.path.isdir(os.path.join(inforoot, x)):
12373 if x.startswith("dir"):
12375 for ext in dir_extensions:
12376 if x == "dir" + ext or \
12377 x == "dir" + ext + ".old":
12382 if processed_count == 0:
12383 for ext in dir_extensions:
12385 os.rename(dir_file + ext, dir_file + ext + ".old")
12386 moved_old_dir = True
12387 except EnvironmentError, e:
12388 if e.errno != errno.ENOENT:
12391 processed_count += 1
12392 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12393 existsstr="already exists, for file `"
12395 if re.search(existsstr,myso):
12396 # Already exists... Don't increment the count for this.
12398 elif myso[:44]=="install-info: warning: no info dir entry in ":
12399 # This info file doesn't contain a DIR-header: install-info produces this
12400 # (harmless) warning (the --quiet switch doesn't seem to work).
12401 # Don't increment the count for this.
12404 badcount=badcount+1
12405 errmsg += myso + "\n"
12408 if moved_old_dir and not os.path.exists(dir_file):
12409 # We didn't generate a new dir file, so put the old file
12410 # back where it was originally found.
12411 for ext in dir_extensions:
12413 os.rename(dir_file + ext + ".old", dir_file + ext)
12414 except EnvironmentError, e:
12415 if e.errno != errno.ENOENT:
12419 # Clean dir.old cruft so that they don't prevent
12420 # unmerge of otherwise empty directories.
12421 for ext in dir_extensions:
12423 os.unlink(dir_file + ext + ".old")
12424 except EnvironmentError, e:
12425 if e.errno != errno.ENOENT:
12429 #update mtime so we can potentially avoid regenerating.
12430 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12433 out.eerror("Processed %d info files; %d errors." % \
12434 (icount, badcount))
12435 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12438 out.einfo("Processed %d info files." % (icount,))
12441 def display_news_notification(root_config, myopts):
12442 target_root = root_config.root
12443 trees = root_config.trees
12444 settings = trees["vartree"].settings
12445 portdb = trees["porttree"].dbapi
12446 vardb = trees["vartree"].dbapi
12447 NEWS_PATH = os.path.join("metadata", "news")
12448 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12449 newsReaderDisplay = False
12450 update = "--pretend" not in myopts
12452 for repo in portdb.getRepositories():
12453 unreadItems = checkUpdatedNewsItems(
12454 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12456 if not newsReaderDisplay:
12457 newsReaderDisplay = True
12459 print colorize("WARN", " * IMPORTANT:"),
12460 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12463 if newsReaderDisplay:
12464 print colorize("WARN", " *"),
12465 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12468 def display_preserved_libs(vardbapi):
12471 # Ensure the registry is consistent with existing files.
12472 vardbapi.plib_registry.pruneNonExisting()
12474 if vardbapi.plib_registry.hasEntries():
12476 print colorize("WARN", "!!!") + " existing preserved libs:"
12477 plibdata = vardbapi.plib_registry.getPreservedLibs()
12478 linkmap = vardbapi.linkmap
12481 linkmap_broken = False
12485 except portage.exception.CommandNotFound, e:
12486 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12487 level=logging.ERROR, noiselevel=-1)
12489 linkmap_broken = True
12491 search_for_owners = set()
12492 for cpv in plibdata:
12493 internal_plib_keys = set(linkmap._obj_key(f) \
12494 for f in plibdata[cpv])
12495 for f in plibdata[cpv]:
12496 if f in consumer_map:
12499 for c in linkmap.findConsumers(f):
12500 # Filter out any consumers that are also preserved libs
12501 # belonging to the same package as the provider.
12502 if linkmap._obj_key(c) not in internal_plib_keys:
12503 consumers.append(c)
12505 consumer_map[f] = consumers
12506 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12508 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12510 for cpv in plibdata:
12511 print colorize("WARN", ">>>") + " package: %s" % cpv
12513 for f in plibdata[cpv]:
12514 obj_key = linkmap._obj_key(f)
12515 alt_paths = samefile_map.get(obj_key)
12516 if alt_paths is None:
12518 samefile_map[obj_key] = alt_paths
12521 for alt_paths in samefile_map.itervalues():
12522 alt_paths = sorted(alt_paths)
12523 for p in alt_paths:
12524 print colorize("WARN", " * ") + " - %s" % (p,)
12526 consumers = consumer_map.get(f, [])
12527 for c in consumers[:MAX_DISPLAY]:
12528 print colorize("WARN", " * ") + " used by %s (%s)" % \
12529 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12530 if len(consumers) == MAX_DISPLAY + 1:
12531 print colorize("WARN", " * ") + " used by %s (%s)" % \
12532 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12533 for x in owners.get(consumers[MAX_DISPLAY], [])))
12534 elif len(consumers) > MAX_DISPLAY:
12535 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12536 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12539 def _flush_elog_mod_echo():
12541 Dump the mod_echo output now so that our other
12542 notifications are shown last.
12544 @returns: True if messages were shown, False otherwise.
12546 messages_shown = False
12548 from portage.elog import mod_echo
12549 except ImportError:
12550 pass # happens during downgrade to a version without the module
12552 messages_shown = bool(mod_echo._items)
12553 mod_echo.finalize()
12554 return messages_shown
12556 def post_emerge(root_config, myopts, mtimedb, retval):
12558 Misc. things to run at the end of a merge session.
12561 Update Config Files
12564 Display preserved libs warnings
12567 @param trees: A dictionary mapping each ROOT to it's package databases
12569 @param mtimedb: The mtimeDB to store data needed across merge invocations
12570 @type mtimedb: MtimeDB class instance
12571 @param retval: Emerge's return value
12575 1. Calls sys.exit(retval)
12578 target_root = root_config.root
12579 trees = { target_root : root_config.trees }
12580 vardbapi = trees[target_root]["vartree"].dbapi
12581 settings = vardbapi.settings
12582 info_mtimes = mtimedb["info"]
12584 # Load the most current variables from ${ROOT}/etc/profile.env
12587 settings.regenerate()
12590 config_protect = settings.get("CONFIG_PROTECT","").split()
12591 infodirs = settings.get("INFOPATH","").split(":") + \
12592 settings.get("INFODIR","").split(":")
12596 if retval == os.EX_OK:
12597 exit_msg = " *** exiting successfully."
12599 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12600 emergelog("notitles" not in settings.features, exit_msg)
12602 _flush_elog_mod_echo()
12604 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12605 if "--pretend" in myopts or (counter_hash is not None and \
12606 counter_hash == vardbapi._counter_hash()):
12607 display_news_notification(root_config, myopts)
12608 # If vdb state has not changed then there's nothing else to do.
12611 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12612 portage.util.ensure_dirs(vdb_path)
12614 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12615 vdb_lock = portage.locks.lockdir(vdb_path)
12619 if "noinfo" not in settings.features:
12620 chk_updated_info_files(target_root,
12621 infodirs, info_mtimes, retval)
12625 portage.locks.unlockdir(vdb_lock)
12627 chk_updated_cfg_files(target_root, config_protect)
12629 display_news_notification(root_config, myopts)
12630 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12631 display_preserved_libs(vardbapi)
12636 def chk_updated_cfg_files(target_root, config_protect):
12638 #number of directories with some protect files in them
12640 for x in config_protect:
12641 x = os.path.join(target_root, x.lstrip(os.path.sep))
12642 if not os.access(x, os.W_OK):
12643 # Avoid Permission denied errors generated
12647 mymode = os.lstat(x).st_mode
12650 if stat.S_ISLNK(mymode):
12651 # We want to treat it like a directory if it
12652 # is a symlink to an existing directory.
12654 real_mode = os.stat(x).st_mode
12655 if stat.S_ISDIR(real_mode):
12659 if stat.S_ISDIR(mymode):
12660 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12662 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12663 os.path.split(x.rstrip(os.path.sep))
12664 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12665 a = commands.getstatusoutput(mycommand)
12667 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12669 # Show the error message alone, sending stdout to /dev/null.
12670 os.system(mycommand + " 1>/dev/null")
12672 files = a[1].split('\0')
12673 # split always produces an empty string as the last element
12674 if files and not files[-1]:
12678 print "\n"+colorize("WARN", " * IMPORTANT:"),
12679 if stat.S_ISDIR(mymode):
12680 print "%d config files in '%s' need updating." % \
12683 print "config file '%s' needs updating." % x
12686 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12687 " section of the " + bold("emerge")
12688 print " "+yellow("*")+" man page to learn how to update config files."
12690 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12693 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12694 Returns the number of unread (yet relevent) items.
12696 @param portdb: a portage tree database
12697 @type portdb: pordbapi
12698 @param vardb: an installed package database
12699 @type vardb: vardbapi
12702 @param UNREAD_PATH:
12708 1. The number of unread but relevant news items.
12711 from portage.news import NewsManager
12712 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12713 return manager.getUnreadItems( repo_id, update=update )
12715 def insert_category_into_atom(atom, category):
12716 alphanum = re.search(r'\w', atom)
12718 ret = atom[:alphanum.start()] + "%s/" % category + \
12719 atom[alphanum.start():]
12724 def is_valid_package_atom(x):
12726 alphanum = re.search(r'\w', x)
12728 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12729 return portage.isvalidatom(x)
12731 def show_blocker_docs_link():
12733 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12734 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12736 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12739 def show_mask_docs():
12740 print "For more information, see the MASKED PACKAGES section in the emerge"
12741 print "man page or refer to the Gentoo Handbook."
12743 def action_sync(settings, trees, mtimedb, myopts, myaction):
12744 xterm_titles = "notitles" not in settings.features
12745 emergelog(xterm_titles, " === sync")
12746 myportdir = settings.get("PORTDIR", None)
12747 out = portage.output.EOutput()
12749 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12751 if myportdir[-1]=="/":
12752 myportdir=myportdir[:-1]
12754 st = os.stat(myportdir)
12758 print ">>>",myportdir,"not found, creating it."
12759 os.makedirs(myportdir,0755)
12760 st = os.stat(myportdir)
12763 spawn_kwargs["env"] = settings.environ()
12764 if 'usersync' in settings.features and \
12765 portage.data.secpass >= 2 and \
12766 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12767 st.st_gid != os.getgid() and st.st_mode & 0070):
12769 homedir = pwd.getpwuid(st.st_uid).pw_dir
12773 # Drop privileges when syncing, in order to match
12774 # existing uid/gid settings.
12775 spawn_kwargs["uid"] = st.st_uid
12776 spawn_kwargs["gid"] = st.st_gid
12777 spawn_kwargs["groups"] = [st.st_gid]
12778 spawn_kwargs["env"]["HOME"] = homedir
12780 if not st.st_mode & 0020:
12781 umask = umask | 0020
12782 spawn_kwargs["umask"] = umask
12784 syncuri = settings.get("SYNC", "").strip()
12786 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12787 noiselevel=-1, level=logging.ERROR)
12790 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12791 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12794 dosyncuri = syncuri
12795 updatecache_flg = False
12796 if myaction == "metadata":
12797 print "skipping sync"
12798 updatecache_flg = True
12799 elif ".git" in vcs_dirs:
12800 # Update existing git repository, and ignore the syncuri. We are
12801 # going to trust the user and assume that the user is in the branch
12802 # that he/she wants updated. We'll let the user manage branches with
12804 if portage.process.find_binary("git") is None:
12805 msg = ["Command not found: git",
12806 "Type \"emerge dev-util/git\" to enable git support."]
12808 writemsg_level("!!! %s\n" % l,
12809 level=logging.ERROR, noiselevel=-1)
12811 msg = ">>> Starting git pull in %s..." % myportdir
12812 emergelog(xterm_titles, msg )
12813 writemsg_level(msg + "\n")
12814 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12815 (portage._shell_quote(myportdir),), **spawn_kwargs)
12816 if exitcode != os.EX_OK:
12817 msg = "!!! git pull error in %s." % myportdir
12818 emergelog(xterm_titles, msg)
12819 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12821 msg = ">>> Git pull in %s successful" % myportdir
12822 emergelog(xterm_titles, msg)
12823 writemsg_level(msg + "\n")
12824 exitcode = git_sync_timestamps(settings, myportdir)
12825 if exitcode == os.EX_OK:
12826 updatecache_flg = True
12827 elif syncuri[:8]=="rsync://":
12828 for vcs_dir in vcs_dirs:
12829 writemsg_level(("!!! %s appears to be under revision " + \
12830 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12831 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12833 if not os.path.exists("/usr/bin/rsync"):
12834 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12835 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12840 if settings["PORTAGE_RSYNC_OPTS"] == "":
12841 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12842 rsync_opts.extend([
12843 "--recursive", # Recurse directories
12844 "--links", # Consider symlinks
12845 "--safe-links", # Ignore links outside of tree
12846 "--perms", # Preserve permissions
12847 "--times", # Preserive mod times
12848 "--compress", # Compress the data transmitted
12849 "--force", # Force deletion on non-empty dirs
12850 "--whole-file", # Don't do block transfers, only entire files
12851 "--delete", # Delete files that aren't in the master tree
12852 "--stats", # Show final statistics about what was transfered
12853 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12854 "--exclude=/distfiles", # Exclude distfiles from consideration
12855 "--exclude=/local", # Exclude local from consideration
12856 "--exclude=/packages", # Exclude packages from consideration
12860 # The below validation is not needed when using the above hardcoded
12863 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12865 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12866 for opt in ("--recursive", "--times"):
12867 if opt not in rsync_opts:
12868 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12869 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12870 rsync_opts.append(opt)
12872 for exclude in ("distfiles", "local", "packages"):
12873 opt = "--exclude=/%s" % exclude
12874 if opt not in rsync_opts:
12875 portage.writemsg(yellow("WARNING:") + \
12876 " adding required option %s not included in " % opt + \
12877 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12878 rsync_opts.append(opt)
12880 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12881 def rsync_opt_startswith(opt_prefix):
12882 for x in rsync_opts:
12883 if x.startswith(opt_prefix):
12887 if not rsync_opt_startswith("--timeout="):
12888 rsync_opts.append("--timeout=%d" % mytimeout)
12890 for opt in ("--compress", "--whole-file"):
12891 if opt not in rsync_opts:
12892 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12893 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12894 rsync_opts.append(opt)
12896 if "--quiet" in myopts:
12897 rsync_opts.append("--quiet") # Shut up a lot
12899 rsync_opts.append("--verbose") # Print filelist
12901 if "--verbose" in myopts:
12902 rsync_opts.append("--progress") # Progress meter for each file
12904 if "--debug" in myopts:
12905 rsync_opts.append("--checksum") # Force checksum on all files
12907 # Real local timestamp file.
12908 servertimestampfile = os.path.join(
12909 myportdir, "metadata", "timestamp.chk")
12911 content = portage.util.grabfile(servertimestampfile)
12915 mytimestamp = time.mktime(time.strptime(content[0],
12916 "%a, %d %b %Y %H:%M:%S +0000"))
12917 except (OverflowError, ValueError):
12922 rsync_initial_timeout = \
12923 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12925 rsync_initial_timeout = 15
12928 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12929 except SystemExit, e:
12930 raise # Needed else can't exit
12932 maxretries=3 #default number of retries
12935 user_name, hostname, port = re.split(
12936 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12939 if user_name is None:
12941 updatecache_flg=True
12942 all_rsync_opts = set(rsync_opts)
12943 extra_rsync_opts = shlex.split(
12944 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12945 all_rsync_opts.update(extra_rsync_opts)
12946 family = socket.AF_INET
12947 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12948 family = socket.AF_INET
12949 elif socket.has_ipv6 and \
12950 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12951 family = socket.AF_INET6
12953 SERVER_OUT_OF_DATE = -1
12954 EXCEEDED_MAX_RETRIES = -2
12960 for addrinfo in socket.getaddrinfo(
12961 hostname, None, family, socket.SOCK_STREAM):
12962 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12963 # IPv6 addresses need to be enclosed in square brackets
12964 ips.append("[%s]" % addrinfo[4][0])
12966 ips.append(addrinfo[4][0])
12967 from random import shuffle
12969 except SystemExit, e:
12970 raise # Needed else can't exit
12971 except Exception, e:
12972 print "Notice:",str(e)
12977 dosyncuri = syncuri.replace(
12978 "//" + user_name + hostname + port + "/",
12979 "//" + user_name + ips[0] + port + "/", 1)
12980 except SystemExit, e:
12981 raise # Needed else can't exit
12982 except Exception, e:
12983 print "Notice:",str(e)
12987 if "--ask" in myopts:
12988 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12993 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12994 if "--quiet" not in myopts:
12995 print ">>> Starting rsync with "+dosyncuri+"..."
12997 emergelog(xterm_titles,
12998 ">>> Starting retry %d of %d with %s" % \
12999 (retries,maxretries,dosyncuri))
13000 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13002 if mytimestamp != 0 and "--quiet" not in myopts:
13003 print ">>> Checking server timestamp ..."
13005 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13007 if "--debug" in myopts:
13010 exitcode = os.EX_OK
13011 servertimestamp = 0
13012 # Even if there's no timestamp available locally, fetch the
13013 # timestamp anyway as an initial probe to verify that the server is
13014 # responsive. This protects us from hanging indefinitely on a
13015 # connection attempt to an unresponsive server which rsync's
13016 # --timeout option does not prevent.
13018 # Temporary file for remote server timestamp comparison.
13019 from tempfile import mkstemp
13020 fd, tmpservertimestampfile = mkstemp()
13022 mycommand = rsynccommand[:]
13023 mycommand.append(dosyncuri.rstrip("/") + \
13024 "/metadata/timestamp.chk")
13025 mycommand.append(tmpservertimestampfile)
13029 def timeout_handler(signum, frame):
13030 raise portage.exception.PortageException("timed out")
13031 signal.signal(signal.SIGALRM, timeout_handler)
13032 # Timeout here in case the server is unresponsive. The
13033 # --timeout rsync option doesn't apply to the initial
13034 # connection attempt.
13035 if rsync_initial_timeout:
13036 signal.alarm(rsync_initial_timeout)
13038 mypids.extend(portage.process.spawn(
13039 mycommand, env=settings.environ(), returnpid=True))
13040 exitcode = os.waitpid(mypids[0], 0)[1]
13041 content = portage.grabfile(tmpservertimestampfile)
13043 if rsync_initial_timeout:
13046 os.unlink(tmpservertimestampfile)
13049 except portage.exception.PortageException, e:
13053 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13054 os.kill(mypids[0], signal.SIGTERM)
13055 os.waitpid(mypids[0], 0)
13056 # This is the same code rsync uses for timeout.
13059 if exitcode != os.EX_OK:
13060 if exitcode & 0xff:
13061 exitcode = (exitcode & 0xff) << 8
13063 exitcode = exitcode >> 8
13065 portage.process.spawned_pids.remove(mypids[0])
13068 servertimestamp = time.mktime(time.strptime(
13069 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13070 except (OverflowError, ValueError):
13072 del mycommand, mypids, content
13073 if exitcode == os.EX_OK:
13074 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13075 emergelog(xterm_titles,
13076 ">>> Cancelling sync -- Already current.")
13079 print ">>> Timestamps on the server and in the local repository are the same."
13080 print ">>> Cancelling all further sync action. You are already up to date."
13082 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13086 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13087 emergelog(xterm_titles,
13088 ">>> Server out of date: %s" % dosyncuri)
13091 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13093 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13096 exitcode = SERVER_OUT_OF_DATE
13097 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13099 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13100 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13101 if exitcode in [0,1,3,4,11,14,20,21]:
13103 elif exitcode in [1,3,4,11,14,20,21]:
13106 # Code 2 indicates protocol incompatibility, which is expected
13107 # for servers with protocol < 29 that don't support
13108 # --prune-empty-directories. Retry for a server that supports
13109 # at least rsync protocol version 29 (>=rsync-2.6.4).
13114 if retries<=maxretries:
13115 print ">>> Retrying..."
13120 updatecache_flg=False
13121 exitcode = EXCEEDED_MAX_RETRIES
13125 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13126 elif exitcode == SERVER_OUT_OF_DATE:
13128 elif exitcode == EXCEEDED_MAX_RETRIES:
13130 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13135 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13136 msg.append("that your SYNC statement is proper.")
13137 msg.append("SYNC=" + settings["SYNC"])
13139 msg.append("Rsync has reported that there is a File IO error. Normally")
13140 msg.append("this means your disk is full, but can be caused by corruption")
13141 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13142 msg.append("and try again after the problem has been fixed.")
13143 msg.append("PORTDIR=" + settings["PORTDIR"])
13145 msg.append("Rsync was killed before it finished.")
13147 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13148 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13149 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13150 msg.append("temporary problem unless complications exist with your network")
13151 msg.append("(and possibly your system's filesystem) configuration.")
13155 elif syncuri[:6]=="cvs://":
13156 if not os.path.exists("/usr/bin/cvs"):
13157 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13158 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13160 cvsroot=syncuri[6:]
13161 cvsdir=os.path.dirname(myportdir)
13162 if not os.path.exists(myportdir+"/CVS"):
13164 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13165 if os.path.exists(cvsdir+"/gentoo-x86"):
13166 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13169 os.rmdir(myportdir)
13171 if e.errno != errno.ENOENT:
13173 "!!! existing '%s' directory; exiting.\n" % myportdir)
13176 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13177 print "!!! cvs checkout error; exiting."
13179 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13182 print ">>> Starting cvs update with "+syncuri+"..."
13183 retval = portage.process.spawn_bash(
13184 "cd %s; cvs -z0 -q update -dP" % \
13185 (portage._shell_quote(myportdir),), **spawn_kwargs)
13186 if retval != os.EX_OK:
13188 dosyncuri = syncuri
13190 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13191 noiselevel=-1, level=logging.ERROR)
13194 if updatecache_flg and \
13195 myaction != "metadata" and \
13196 "metadata-transfer" not in settings.features:
13197 updatecache_flg = False
13199 # Reload the whole config from scratch.
13200 settings, trees, mtimedb = load_emerge_config(trees=trees)
13201 root_config = trees[settings["ROOT"]]["root_config"]
13202 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13204 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13205 action_metadata(settings, portdb, myopts)
13207 if portage._global_updates(trees, mtimedb["updates"]):
13209 # Reload the whole config from scratch.
13210 settings, trees, mtimedb = load_emerge_config(trees=trees)
13211 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13212 root_config = trees[settings["ROOT"]]["root_config"]
13214 mybestpv = portdb.xmatch("bestmatch-visible",
13215 portage.const.PORTAGE_PACKAGE_ATOM)
13216 mypvs = portage.best(
13217 trees[settings["ROOT"]]["vartree"].dbapi.match(
13218 portage.const.PORTAGE_PACKAGE_ATOM))
13220 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13222 if myaction != "metadata":
13223 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13224 retval = portage.process.spawn(
13225 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13226 dosyncuri], env=settings.environ())
13227 if retval != os.EX_OK:
13228 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13230 if(mybestpv != mypvs) and not "--quiet" in myopts:
13232 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13233 print red(" * ")+"that you update portage now, before any other packages are updated."
13235 print red(" * ")+"To update portage, run 'emerge portage' now."
13238 display_news_notification(root_config, myopts)
13241 def git_sync_timestamps(settings, portdir):
13243 Since git doesn't preserve timestamps, synchronize timestamps between
13244 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13245 for a given file as long as the file in the working tree is not modified
13246 (relative to HEAD).
13248 cache_dir = os.path.join(portdir, "metadata", "cache")
13249 if not os.path.isdir(cache_dir):
13251 writemsg_level(">>> Synchronizing timestamps...\n")
13253 from portage.cache.cache_errors import CacheError
13255 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13256 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13257 except CacheError, e:
13258 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13259 level=logging.ERROR, noiselevel=-1)
13262 ec_dir = os.path.join(portdir, "eclass")
13264 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13265 if f.endswith(".eclass"))
13267 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13268 level=logging.ERROR, noiselevel=-1)
13271 args = [portage.const.BASH_BINARY, "-c",
13272 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13273 portage._shell_quote(portdir)]
13275 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13276 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13278 if rval != os.EX_OK:
13281 modified_eclasses = set(ec for ec in ec_names \
13282 if os.path.join("eclass", ec + ".eclass") in modified_files)
13284 updated_ec_mtimes = {}
13286 for cpv in cache_db:
13287 cpv_split = portage.catpkgsplit(cpv)
13288 if cpv_split is None:
13289 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13290 level=logging.ERROR, noiselevel=-1)
13293 cat, pn, ver, rev = cpv_split
13294 cat, pf = portage.catsplit(cpv)
13295 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13296 if relative_eb_path in modified_files:
13300 cache_entry = cache_db[cpv]
13301 eb_mtime = cache_entry.get("_mtime_")
13302 ec_mtimes = cache_entry.get("_eclasses_")
13304 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13305 level=logging.ERROR, noiselevel=-1)
13307 except CacheError, e:
13308 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13309 (cpv, e), level=logging.ERROR, noiselevel=-1)
13312 if eb_mtime is None:
13313 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13314 level=logging.ERROR, noiselevel=-1)
13318 eb_mtime = long(eb_mtime)
13320 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13321 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13324 if ec_mtimes is None:
13325 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13326 level=logging.ERROR, noiselevel=-1)
13329 if modified_eclasses.intersection(ec_mtimes):
13332 missing_eclasses = set(ec_mtimes).difference(ec_names)
13333 if missing_eclasses:
13334 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13335 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13339 eb_path = os.path.join(portdir, relative_eb_path)
13341 current_eb_mtime = os.stat(eb_path)
13343 writemsg_level("!!! Missing ebuild: %s\n" % \
13344 (cpv,), level=logging.ERROR, noiselevel=-1)
13347 inconsistent = False
13348 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13349 updated_mtime = updated_ec_mtimes.get(ec)
13350 if updated_mtime is not None and updated_mtime != ec_mtime:
13351 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13352 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13353 inconsistent = True
13359 if current_eb_mtime != eb_mtime:
13360 os.utime(eb_path, (eb_mtime, eb_mtime))
13362 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13363 if ec in updated_ec_mtimes:
13365 ec_path = os.path.join(ec_dir, ec + ".eclass")
13366 current_mtime = long(os.stat(ec_path).st_mtime)
13367 if current_mtime != ec_mtime:
13368 os.utime(ec_path, (ec_mtime, ec_mtime))
13369 updated_ec_mtimes[ec] = ec_mtime
13373 def action_metadata(settings, portdb, myopts):
13374 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13375 old_umask = os.umask(0002)
13376 cachedir = os.path.normpath(settings.depcachedir)
13377 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13378 "/lib", "/opt", "/proc", "/root", "/sbin",
13379 "/sys", "/tmp", "/usr", "/var"]:
13380 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13381 "ROOT DIRECTORY ON YOUR SYSTEM."
13382 print >> sys.stderr, \
13383 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13385 if not os.path.exists(cachedir):
13388 ec = portage.eclass_cache.cache(portdb.porttree_root)
13389 myportdir = os.path.realpath(settings["PORTDIR"])
13390 cm = settings.load_best_module("portdbapi.metadbmodule")(
13391 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13393 from portage.cache import util
13395 class percentage_noise_maker(util.quiet_mirroring):
13396 def __init__(self, dbapi):
13398 self.cp_all = dbapi.cp_all()
13399 l = len(self.cp_all)
13400 self.call_update_min = 100000000
13401 self.min_cp_all = l/100.0
13405 def __iter__(self):
13406 for x in self.cp_all:
13408 if self.count > self.min_cp_all:
13409 self.call_update_min = 0
13411 for y in self.dbapi.cp_list(x):
13413 self.call_update_mine = 0
13415 def update(self, *arg):
13417 self.pstr = int(self.pstr) + 1
13420 sys.stdout.write("%s%i%%" % \
13421 ("\b" * (len(str(self.pstr))+1), self.pstr))
13423 self.call_update_min = 10000000
13425 def finish(self, *arg):
13426 sys.stdout.write("\b\b\b\b100%\n")
13429 if "--quiet" in myopts:
13430 def quicky_cpv_generator(cp_all_list):
13431 for x in cp_all_list:
13432 for y in portdb.cp_list(x):
13434 source = quicky_cpv_generator(portdb.cp_all())
13435 noise_maker = portage.cache.util.quiet_mirroring()
13437 noise_maker = source = percentage_noise_maker(portdb)
13438 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13439 eclass_cache=ec, verbose_instance=noise_maker)
13442 os.umask(old_umask)
13444 def action_regen(settings, portdb, max_jobs, max_load):
13445 xterm_titles = "notitles" not in settings.features
13446 emergelog(xterm_titles, " === regen")
13447 #regenerate cache entries
13448 portage.writemsg_stdout("Regenerating cache entries...\n")
13450 os.close(sys.stdin.fileno())
13451 except SystemExit, e:
13452 raise # Needed else can't exit
13457 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13460 portage.writemsg_stdout("done!\n")
13461 return regen.returncode
13463 def action_config(settings, trees, myopts, myfiles):
13464 if len(myfiles) != 1:
13465 print red("!!! config can only take a single package atom at this time\n")
13467 if not is_valid_package_atom(myfiles[0]):
13468 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13470 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13471 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13475 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13476 except portage.exception.AmbiguousPackageName, e:
13477 # Multiple matches thrown from cpv_expand
13480 print "No packages found.\n"
13482 elif len(pkgs) > 1:
13483 if "--ask" in myopts:
13485 print "Please select a package to configure:"
13489 options.append(str(idx))
13490 print options[-1]+") "+pkg
13492 options.append("X")
13493 idx = userquery("Selection?", options)
13496 pkg = pkgs[int(idx)-1]
13498 print "The following packages available:"
13501 print "\nPlease use a specific atom or the --ask option."
13507 if "--ask" in myopts:
13508 if userquery("Ready to configure "+pkg+"?") == "No":
13511 print "Configuring pkg..."
13513 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13514 mysettings = portage.config(clone=settings)
13515 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13516 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13517 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13519 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13520 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13521 if retval == os.EX_OK:
13522 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13523 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13526 def action_info(settings, trees, myopts, myfiles):
13527 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13528 settings.profile_path, settings["CHOST"],
13529 trees[settings["ROOT"]]["vartree"].dbapi)
13531 header_title = "System Settings"
13533 print header_width * "="
13534 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13535 print header_width * "="
13536 print "System uname: "+platform.platform(aliased=1)
13538 lastSync = portage.grabfile(os.path.join(
13539 settings["PORTDIR"], "metadata", "timestamp.chk"))
13540 print "Timestamp of tree:",
13546 output=commands.getstatusoutput("distcc --version")
13548 print str(output[1].split("\n",1)[0]),
13549 if "distcc" in settings.features:
13554 output=commands.getstatusoutput("ccache -V")
13556 print str(output[1].split("\n",1)[0]),
13557 if "ccache" in settings.features:
13562 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13563 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13564 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13565 myvars = portage.util.unique_array(myvars)
13569 if portage.isvalidatom(x):
13570 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13571 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13572 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13574 for pn, ver, rev in pkg_matches:
13576 pkgs.append(ver + "-" + rev)
13580 pkgs = ", ".join(pkgs)
13581 print "%-20s %s" % (x+":", pkgs)
13583 print "%-20s %s" % (x+":", "[NOT VALID]")
13585 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13587 if "--verbose" in myopts:
13588 myvars=settings.keys()
13590 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13591 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13592 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13593 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13595 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13597 myvars = portage.util.unique_array(myvars)
13598 use_expand = settings.get('USE_EXPAND', '').split()
13600 use_expand_hidden = set(
13601 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13602 alphabetical_use = '--alphabetical' in myopts
13603 root_config = trees[settings["ROOT"]]['root_config']
13609 print '%s="%s"' % (x, settings[x])
13611 use = set(settings["USE"].split())
13612 for varname in use_expand:
13613 flag_prefix = varname.lower() + "_"
13614 for f in list(use):
13615 if f.startswith(flag_prefix):
13619 print 'USE="%s"' % " ".join(use),
13620 for varname in use_expand:
13621 myval = settings.get(varname)
13623 print '%s="%s"' % (varname, myval),
13626 unset_vars.append(x)
13628 print "Unset: "+", ".join(unset_vars)
13631 if "--debug" in myopts:
13632 for x in dir(portage):
13633 module = getattr(portage, x)
13634 if "cvs_id_string" in dir(module):
13635 print "%s: %s" % (str(x), str(module.cvs_id_string))
13637 # See if we can find any packages installed matching the strings
13638 # passed on the command line
13640 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13641 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13643 mypkgs.extend(vardb.match(x))
13645 # If some packages were found...
13647 # Get our global settings (we only print stuff if it varies from
13648 # the current config)
13649 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13650 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13651 auxkeys.append('DEFINED_PHASES')
13653 pkgsettings = portage.config(clone=settings)
13655 for myvar in mydesiredvars:
13656 global_vals[myvar] = set(settings.get(myvar, "").split())
13658 # Loop through each package
13659 # Only print settings if they differ from global settings
13660 header_title = "Package Settings"
13661 print header_width * "="
13662 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13663 print header_width * "="
13664 from portage.output import EOutput
13667 # Get all package specific variables
13668 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13669 pkg = Package(built=True, cpv=cpv,
13670 installed=True, metadata=izip(Package.metadata_keys,
13671 (metadata.get(x, '') for x in Package.metadata_keys)),
13672 root_config=root_config, type_name='installed')
13675 valuesmap[k] = set(metadata[k].split())
13678 for myvar in mydesiredvars:
13679 # If the package variable doesn't match the
13680 # current global variable, something has changed
13681 # so set diff_found so we know to print
13682 if valuesmap[myvar] != global_vals[myvar]:
13683 diff_values[myvar] = valuesmap[myvar]
13685 print "\n%s was built with the following:" % \
13686 colorize("INFORM", str(pkg.cpv))
13688 pkgsettings.setcpv(pkg)
13689 forced_flags = set(chain(pkgsettings.useforce,
13690 pkgsettings.usemask))
13691 use = set(pkg.use.enabled)
13692 use.discard(pkgsettings.get('ARCH'))
13693 use_expand_flags = set()
13696 for varname in use_expand:
13697 flag_prefix = varname.lower() + "_"
13699 if f.startswith(flag_prefix):
13700 use_expand_flags.add(f)
13701 use_enabled.setdefault(
13702 varname.upper(), []).append(f[len(flag_prefix):])
13704 for f in pkg.iuse.all:
13705 if f.startswith(flag_prefix):
13706 use_expand_flags.add(f)
13708 use_disabled.setdefault(
13709 varname.upper(), []).append(f[len(flag_prefix):])
13711 var_order = set(use_enabled)
13712 var_order.update(use_disabled)
13713 var_order = sorted(var_order)
13714 var_order.insert(0, 'USE')
13715 use.difference_update(use_expand_flags)
13716 use_enabled['USE'] = list(use)
13717 use_disabled['USE'] = []
13719 for f in pkg.iuse.all:
13720 if f not in use and \
13721 f not in use_expand_flags:
13722 use_disabled['USE'].append(f)
13724 for varname in var_order:
13725 if varname in use_expand_hidden:
13728 for f in use_enabled.get(varname, []):
13729 flags.append(UseFlagDisplay(f, True, f in forced_flags))
13730 for f in use_disabled.get(varname, []):
13731 flags.append(UseFlagDisplay(f, False, f in forced_flags))
13732 if alphabetical_use:
13733 flags.sort(key=UseFlagDisplay.sort_combined)
13735 flags.sort(key=UseFlagDisplay.sort_separated)
13736 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13739 # If a difference was found, print the info for
13742 # Print package info
13743 for myvar in mydesiredvars:
13744 if myvar in diff_values:
13745 mylist = list(diff_values[myvar])
13747 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13750 if metadata['DEFINED_PHASES']:
13751 if 'info' not in metadata['DEFINED_PHASES'].split():
13754 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13755 ebuildpath = vardb.findname(pkg.cpv)
13756 if not ebuildpath or not os.path.exists(ebuildpath):
13757 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13759 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13760 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13761 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13764 def action_search(root_config, myopts, myfiles, spinner):
13766 print "emerge: no search terms provided."
13768 searchinstance = search(root_config,
13769 spinner, "--searchdesc" in myopts,
13770 "--quiet" not in myopts, "--usepkg" in myopts,
13771 "--usepkgonly" in myopts)
13772 for mysearch in myfiles:
13774 searchinstance.execute(mysearch)
13775 except re.error, comment:
13776 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13778 searchinstance.output()
13780 def action_uninstall(settings, trees, ldpath_mtimes,
13781 opts, action, files, spinner):
13783 # For backward compat, some actions do not require leading '='.
13784 ignore_missing_eq = action in ('clean', 'unmerge')
13785 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13788 # Ensure atoms are valid before calling unmerge().
13789 # For backward compat, leading '=' is not required.
13791 if not (is_valid_package_atom(x) or \
13792 (ignore_missing_eq and is_valid_package_atom("=" + x))):
13795 msg.append("'%s' is not a valid package atom." % (x,))
13796 msg.append("Please check ebuild(5) for full details.")
13797 writemsg_level("".join("!!! %s\n" % line for line in msg),
13798 level=logging.ERROR, noiselevel=-1)
13802 valid_atoms.append(
13803 portage.dep_expand(x, mydb=vardb, settings=settings))
13804 except portage.exception.AmbiguousPackageName, e:
13805 msg = "The short ebuild name \"" + x + \
13806 "\" is ambiguous. Please specify " + \
13807 "one of the following " + \
13808 "fully-qualified ebuild names instead:"
13809 for line in textwrap.wrap(msg, 70):
13810 writemsg_level("!!! %s\n" % (line,),
13811 level=logging.ERROR, noiselevel=-1)
13813 writemsg_level(" %s\n" % colorize("INFORM", i),
13814 level=logging.ERROR, noiselevel=-1)
13815 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13818 if action in ('clean', 'unmerge') or \
13819 (action == 'prune' and "--nodeps" in opts):
13820 # When given a list of atoms, unmerge them in the order given.
13821 ordered = action == 'unmerge'
13822 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
13823 valid_atoms, ldpath_mtimes, ordered=ordered)
13826 rval = action_depclean(settings, trees, ldpath_mtimes,
13827 opts, action, valid_atoms, spinner)
13831 def action_depclean(settings, trees, ldpath_mtimes,
13832 myopts, action, myfiles, spinner):
13833 # Kill packages that aren't explicitly merged or are required as a
13834 # dependency of another package. World file is explicit.
13836 # Global depclean or prune operations are not very safe when there are
13837 # missing dependencies since it's unknown how badly incomplete
13838 # the dependency graph is, and we might accidentally remove packages
13839 # that should have been pulled into the graph. On the other hand, it's
13840 # relatively safe to ignore missing deps when only asked to remove
13841 # specific packages.
13842 allow_missing_deps = len(myfiles) > 0
13845 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13846 msg.append("mistakes. Packages that are part of the world set will always\n")
13847 msg.append("be kept. They can be manually added to this set with\n")
13848 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13849 msg.append("package.provided (see portage(5)) will be removed by\n")
13850 msg.append("depclean, even if they are part of the world set.\n")
13852 msg.append("As a safety measure, depclean will not remove any packages\n")
13853 msg.append("unless *all* required dependencies have been resolved. As a\n")
13854 msg.append("consequence, it is often necessary to run %s\n" % \
13855 good("`emerge --update"))
13856 msg.append(good("--newuse --deep @system @world`") + \
13857 " prior to depclean.\n")
13859 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13860 portage.writemsg_stdout("\n")
13862 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13864 xterm_titles = "notitles" not in settings.features
13865 myroot = settings["ROOT"]
13866 root_config = trees[myroot]["root_config"]
13867 getSetAtoms = root_config.setconfig.getSetAtoms
13868 vardb = trees[myroot]["vartree"].dbapi
13870 required_set_names = ("system", "world")
13874 for s in required_set_names:
13875 required_sets[s] = InternalPackageSet(
13876 initial_atoms=getSetAtoms(s))
13879 # When removing packages, use a temporary version of world
13880 # which excludes packages that are intended to be eligible for
13882 world_temp_set = required_sets["world"]
13883 system_set = required_sets["system"]
13885 if not system_set or not world_temp_set:
13888 writemsg_level("!!! You have no system list.\n",
13889 level=logging.ERROR, noiselevel=-1)
13891 if not world_temp_set:
13892 writemsg_level("!!! You have no world file.\n",
13893 level=logging.WARNING, noiselevel=-1)
13895 writemsg_level("!!! Proceeding is likely to " + \
13896 "break your installation.\n",
13897 level=logging.WARNING, noiselevel=-1)
13898 if "--pretend" not in myopts:
13899 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13901 if action == "depclean":
13902 emergelog(xterm_titles, " >>> depclean")
13905 args_set = InternalPackageSet()
13907 args_set.update(myfiles)
13908 matched_packages = False
13911 matched_packages = True
13913 if not matched_packages:
13914 writemsg_level(">>> No packages selected for removal by %s\n" % \
13918 writemsg_level("\nCalculating dependencies ")
13919 resolver_params = create_depgraph_params(myopts, "remove")
13920 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13921 vardb = resolver.trees[myroot]["vartree"].dbapi
13923 if action == "depclean":
13926 # Pull in everything that's installed but not matched
13927 # by an argument atom since we don't want to clean any
13928 # package if something depends on it.
13930 world_temp_set.clear()
13935 if args_set.findAtomForPackage(pkg) is None:
13936 world_temp_set.add("=" + pkg.cpv)
13938 except portage.exception.InvalidDependString, e:
13939 show_invalid_depstring_notice(pkg,
13940 pkg.metadata["PROVIDE"], str(e))
13942 world_temp_set.add("=" + pkg.cpv)
13945 elif action == "prune":
13947 # Pull in everything that's installed since we don't
13948 # to prune a package if something depends on it.
13949 world_temp_set.clear()
13950 world_temp_set.update(vardb.cp_all())
13954 # Try to prune everything that's slotted.
13955 for cp in vardb.cp_all():
13956 if len(vardb.cp_list(cp)) > 1:
13959 # Remove atoms from world that match installed packages
13960 # that are also matched by argument atoms, but do not remove
13961 # them if they match the highest installed version.
13964 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13965 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13966 raise AssertionError("package expected in matches: " + \
13967 "cp = %s, cpv = %s matches = %s" % \
13968 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13970 highest_version = pkgs_for_cp[-1]
13971 if pkg == highest_version:
13972 # pkg is the highest version
13973 world_temp_set.add("=" + pkg.cpv)
13976 if len(pkgs_for_cp) <= 1:
13977 raise AssertionError("more packages expected: " + \
13978 "cp = %s, cpv = %s matches = %s" % \
13979 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13982 if args_set.findAtomForPackage(pkg) is None:
13983 world_temp_set.add("=" + pkg.cpv)
13985 except portage.exception.InvalidDependString, e:
13986 show_invalid_depstring_notice(pkg,
13987 pkg.metadata["PROVIDE"], str(e))
13989 world_temp_set.add("=" + pkg.cpv)
13993 for s, package_set in required_sets.iteritems():
13994 set_atom = SETPREFIX + s
13995 set_arg = SetArg(arg=set_atom, set=package_set,
13996 root_config=resolver.roots[myroot])
13997 set_args[s] = set_arg
13998 for atom in set_arg.set:
13999 resolver._dep_stack.append(
14000 Dependency(atom=atom, root=myroot, parent=set_arg))
14001 resolver.digraph.add(set_arg, None)
14003 success = resolver._complete_graph()
14004 writemsg_level("\b\b... done!\n")
14006 resolver.display_problems()
14011 def unresolved_deps():
14013 unresolvable = set()
14014 for dep in resolver._initially_unsatisfied_deps:
14015 if isinstance(dep.parent, Package) and \
14016 (dep.priority > UnmergeDepPriority.SOFT):
14017 unresolvable.add((dep.atom, dep.parent.cpv))
14019 if not unresolvable:
14022 if unresolvable and not allow_missing_deps:
14023 prefix = bad(" * ")
14025 msg.append("Dependencies could not be completely resolved due to")
14026 msg.append("the following required packages not being installed:")
14028 for atom, parent in unresolvable:
14029 msg.append(" %s pulled in by:" % (atom,))
14030 msg.append(" %s" % (parent,))
14032 msg.append("Have you forgotten to run " + \
14033 good("`emerge --update --newuse --deep @system @world`") + " prior")
14034 msg.append(("to %s? It may be necessary to manually " + \
14035 "uninstall packages that no longer") % action)
14036 msg.append("exist in the portage tree since " + \
14037 "it may not be possible to satisfy their")
14038 msg.append("dependencies. Also, be aware of " + \
14039 "the --with-bdeps option that is documented")
14040 msg.append("in " + good("`man emerge`") + ".")
14041 if action == "prune":
14043 msg.append("If you would like to ignore " + \
14044 "dependencies then use %s." % good("--nodeps"))
14045 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14046 level=logging.ERROR, noiselevel=-1)
14050 if unresolved_deps():
14053 graph = resolver.digraph.copy()
14054 required_pkgs_total = 0
14056 if isinstance(node, Package):
14057 required_pkgs_total += 1
14059 def show_parents(child_node):
14060 parent_nodes = graph.parent_nodes(child_node)
14061 if not parent_nodes:
14062 # With --prune, the highest version can be pulled in without any
14063 # real parent since all installed packages are pulled in. In that
14064 # case there's nothing to show here.
14067 for node in parent_nodes:
14068 parent_strs.append(str(getattr(node, "cpv", node)))
14071 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
14072 for parent_str in parent_strs:
14073 msg.append(" %s\n" % (parent_str,))
14075 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14077 def cmp_pkg_cpv(pkg1, pkg2):
14078 """Sort Package instances by cpv."""
14079 if pkg1.cpv > pkg2.cpv:
14081 elif pkg1.cpv == pkg2.cpv:
14086 def create_cleanlist():
14087 pkgs_to_remove = []
14089 if action == "depclean":
14092 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14095 arg_atom = args_set.findAtomForPackage(pkg)
14096 except portage.exception.InvalidDependString:
14097 # this error has already been displayed by now
14101 if pkg not in graph:
14102 pkgs_to_remove.append(pkg)
14103 elif "--verbose" in myopts:
14107 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14108 if pkg not in graph:
14109 pkgs_to_remove.append(pkg)
14110 elif "--verbose" in myopts:
14113 elif action == "prune":
14114 # Prune really uses all installed instead of world. It's not
14115 # a real reverse dependency so don't display it as such.
14116 graph.remove(set_args["world"])
14118 for atom in args_set:
14119 for pkg in vardb.match_pkgs(atom):
14120 if pkg not in graph:
14121 pkgs_to_remove.append(pkg)
14122 elif "--verbose" in myopts:
14125 if not pkgs_to_remove:
14127 ">>> No packages selected for removal by %s\n" % action)
14128 if "--verbose" not in myopts:
14130 ">>> To see reverse dependencies, use %s\n" % \
14132 if action == "prune":
14134 ">>> To ignore dependencies, use %s\n" % \
14137 return pkgs_to_remove
14139 cleanlist = create_cleanlist()
14142 clean_set = set(cleanlist)
14144 # Check if any of these package are the sole providers of libraries
14145 # with consumers that have not been selected for removal. If so, these
14146 # packages and any dependencies need to be added to the graph.
14147 real_vardb = trees[myroot]["vartree"].dbapi
14148 linkmap = real_vardb.linkmap
14149 liblist = linkmap.listLibraryObjects()
14150 consumer_cache = {}
14151 provider_cache = {}
14155 writemsg_level(">>> Checking for lib consumers...\n")
14157 for pkg in cleanlist:
14158 pkg_dblink = real_vardb._dblink(pkg.cpv)
14159 provided_libs = set()
14161 for lib in liblist:
14162 if pkg_dblink.isowner(lib, myroot):
14163 provided_libs.add(lib)
14165 if not provided_libs:
14169 for lib in provided_libs:
14170 lib_consumers = consumer_cache.get(lib)
14171 if lib_consumers is None:
14172 lib_consumers = linkmap.findConsumers(lib)
14173 consumer_cache[lib] = lib_consumers
14175 consumers[lib] = lib_consumers
14180 for lib, lib_consumers in consumers.items():
14181 for consumer_file in list(lib_consumers):
14182 if pkg_dblink.isowner(consumer_file, myroot):
14183 lib_consumers.remove(consumer_file)
14184 if not lib_consumers:
14190 for lib, lib_consumers in consumers.iteritems():
14192 soname = soname_cache.get(lib)
14194 soname = linkmap.getSoname(lib)
14195 soname_cache[lib] = soname
14197 consumer_providers = []
14198 for lib_consumer in lib_consumers:
14199 providers = provider_cache.get(lib)
14200 if providers is None:
14201 providers = linkmap.findProviders(lib_consumer)
14202 provider_cache[lib_consumer] = providers
14203 if soname not in providers:
14204 # Why does this happen?
14206 consumer_providers.append(
14207 (lib_consumer, providers[soname]))
14209 consumers[lib] = consumer_providers
14211 consumer_map[pkg] = consumers
14215 search_files = set()
14216 for consumers in consumer_map.itervalues():
14217 for lib, consumer_providers in consumers.iteritems():
14218 for lib_consumer, providers in consumer_providers:
14219 search_files.add(lib_consumer)
14220 search_files.update(providers)
14222 writemsg_level(">>> Assigning files to packages...\n")
14223 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14225 for pkg, consumers in consumer_map.items():
14226 for lib, consumer_providers in consumers.items():
14227 lib_consumers = set()
14229 for lib_consumer, providers in consumer_providers:
14230 owner_set = file_owners.get(lib_consumer)
14231 provider_dblinks = set()
14232 provider_pkgs = set()
14234 if len(providers) > 1:
14235 for provider in providers:
14236 provider_set = file_owners.get(provider)
14237 if provider_set is not None:
14238 provider_dblinks.update(provider_set)
14240 if len(provider_dblinks) > 1:
14241 for provider_dblink in provider_dblinks:
14242 pkg_key = ("installed", myroot,
14243 provider_dblink.mycpv, "nomerge")
14244 if pkg_key not in clean_set:
14245 provider_pkgs.add(vardb.get(pkg_key))
14250 if owner_set is not None:
14251 lib_consumers.update(owner_set)
14253 for consumer_dblink in list(lib_consumers):
14254 if ("installed", myroot, consumer_dblink.mycpv,
14255 "nomerge") in clean_set:
14256 lib_consumers.remove(consumer_dblink)
14260 consumers[lib] = lib_consumers
14264 del consumer_map[pkg]
14267 # TODO: Implement a package set for rebuilding consumer packages.
14269 msg = "In order to avoid breakage of link level " + \
14270 "dependencies, one or more packages will not be removed. " + \
14271 "This can be solved by rebuilding " + \
14272 "the packages that pulled them in."
14274 prefix = bad(" * ")
14275 from textwrap import wrap
14276 writemsg_level("".join(prefix + "%s\n" % line for \
14277 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14280 for pkg, consumers in consumer_map.iteritems():
14281 unique_consumers = set(chain(*consumers.values()))
14282 unique_consumers = sorted(consumer.mycpv \
14283 for consumer in unique_consumers)
14285 msg.append(" %s pulled in by:" % (pkg.cpv,))
14286 for consumer in unique_consumers:
14287 msg.append(" %s" % (consumer,))
14289 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14290 level=logging.WARNING, noiselevel=-1)
14292 # Add lib providers to the graph as children of lib consumers,
14293 # and also add any dependencies pulled in by the provider.
14294 writemsg_level(">>> Adding lib providers to graph...\n")
14296 for pkg, consumers in consumer_map.iteritems():
14297 for consumer_dblink in set(chain(*consumers.values())):
14298 consumer_pkg = vardb.get(("installed", myroot,
14299 consumer_dblink.mycpv, "nomerge"))
14300 if not resolver._add_pkg(pkg,
14301 Dependency(parent=consumer_pkg,
14302 priority=UnmergeDepPriority(runtime=True),
14304 resolver.display_problems()
14307 writemsg_level("\nCalculating dependencies ")
14308 success = resolver._complete_graph()
14309 writemsg_level("\b\b... done!\n")
14310 resolver.display_problems()
14313 if unresolved_deps():
14316 graph = resolver.digraph.copy()
14317 required_pkgs_total = 0
14319 if isinstance(node, Package):
14320 required_pkgs_total += 1
14321 cleanlist = create_cleanlist()
14324 clean_set = set(cleanlist)
14326 # Use a topological sort to create an unmerge order such that
14327 # each package is unmerged before it's dependencies. This is
14328 # necessary to avoid breaking things that may need to run
14329 # during pkg_prerm or pkg_postrm phases.
14331 # Create a new graph to account for dependencies between the
14332 # packages being unmerged.
14336 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14337 runtime = UnmergeDepPriority(runtime=True)
14338 runtime_post = UnmergeDepPriority(runtime_post=True)
14339 buildtime = UnmergeDepPriority(buildtime=True)
14341 "RDEPEND": runtime,
14342 "PDEPEND": runtime_post,
14343 "DEPEND": buildtime,
14346 for node in clean_set:
14347 graph.add(node, None)
14349 node_use = node.metadata["USE"].split()
14350 for dep_type in dep_keys:
14351 depstr = node.metadata[dep_type]
14355 portage.dep._dep_check_strict = False
14356 success, atoms = portage.dep_check(depstr, None, settings,
14357 myuse=node_use, trees=resolver._graph_trees,
14360 portage.dep._dep_check_strict = True
14362 # Ignore invalid deps of packages that will
14363 # be uninstalled anyway.
14366 priority = priority_map[dep_type]
14368 if not isinstance(atom, portage.dep.Atom):
14369 # Ignore invalid atoms returned from dep_check().
14373 matches = vardb.match_pkgs(atom)
14376 for child_node in matches:
14377 if child_node in clean_set:
14378 graph.add(child_node, node, priority=priority)
14381 if len(graph.order) == len(graph.root_nodes()):
14382 # If there are no dependencies between packages
14383 # let unmerge() group them by cat/pn.
14385 cleanlist = [pkg.cpv for pkg in graph.order]
14387 # Order nodes from lowest to highest overall reference count for
14388 # optimal root node selection.
14389 node_refcounts = {}
14390 for node in graph.order:
14391 node_refcounts[node] = len(graph.parent_nodes(node))
14392 def cmp_reference_count(node1, node2):
14393 return node_refcounts[node1] - node_refcounts[node2]
14394 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14396 ignore_priority_range = [None]
14397 ignore_priority_range.extend(
14398 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14399 while not graph.empty():
14400 for ignore_priority in ignore_priority_range:
14401 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14405 raise AssertionError("no root nodes")
14406 if ignore_priority is not None:
14407 # Some deps have been dropped due to circular dependencies,
14408 # so only pop one node in order do minimize the number that
14413 cleanlist.append(node.cpv)
14415 unmerge(root_config, myopts, "unmerge", cleanlist,
14416 ldpath_mtimes, ordered=ordered)
14418 if action == "prune":
14421 if not cleanlist and "--quiet" in myopts:
14424 print "Packages installed: "+str(len(vardb.cpv_all()))
14425 print "Packages in world: " + \
14426 str(len(root_config.sets["world"].getAtoms()))
14427 print "Packages in system: " + \
14428 str(len(root_config.sets["system"].getAtoms()))
14429 print "Required packages: "+str(required_pkgs_total)
14430 if "--pretend" in myopts:
14431 print "Number to remove: "+str(len(cleanlist))
14433 print "Number removed: "+str(len(cleanlist))
14435 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14437 Construct a depgraph for the given resume list. This will raise
14438 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14440 @returns: (success, depgraph, dropped_tasks)
14443 skip_unsatisfied = True
14444 mergelist = mtimedb["resume"]["mergelist"]
14445 dropped_tasks = set()
14447 mydepgraph = depgraph(settings, trees,
14448 myopts, myparams, spinner)
14450 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14451 skip_masked=skip_masked)
14452 except depgraph.UnsatisfiedResumeDep, e:
14453 if not skip_unsatisfied:
14456 graph = mydepgraph.digraph
14457 unsatisfied_parents = dict((dep.parent, dep.parent) \
14458 for dep in e.value)
14459 traversed_nodes = set()
14460 unsatisfied_stack = list(unsatisfied_parents)
14461 while unsatisfied_stack:
14462 pkg = unsatisfied_stack.pop()
14463 if pkg in traversed_nodes:
14465 traversed_nodes.add(pkg)
14467 # If this package was pulled in by a parent
14468 # package scheduled for merge, removing this
14469 # package may cause the the parent package's
14470 # dependency to become unsatisfied.
14471 for parent_node in graph.parent_nodes(pkg):
14472 if not isinstance(parent_node, Package) \
14473 or parent_node.operation not in ("merge", "nomerge"):
14476 graph.child_nodes(parent_node,
14477 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14478 if pkg in unsatisfied:
14479 unsatisfied_parents[parent_node] = parent_node
14480 unsatisfied_stack.append(parent_node)
14482 pruned_mergelist = []
14483 for x in mergelist:
14484 if isinstance(x, list) and \
14485 tuple(x) not in unsatisfied_parents:
14486 pruned_mergelist.append(x)
14488 # If the mergelist doesn't shrink then this loop is infinite.
14489 if len(pruned_mergelist) == len(mergelist):
14490 # This happens if a package can't be dropped because
14491 # it's already installed, but it has unsatisfied PDEPEND.
14493 mergelist[:] = pruned_mergelist
14495 # Exclude installed packages that have been removed from the graph due
14496 # to failure to build/install runtime dependencies after the dependent
14497 # package has already been installed.
14498 dropped_tasks.update(pkg for pkg in \
14499 unsatisfied_parents if pkg.operation != "nomerge")
14500 mydepgraph.break_refs(unsatisfied_parents)
14502 del e, graph, traversed_nodes, \
14503 unsatisfied_parents, unsatisfied_stack
14507 return (success, mydepgraph, dropped_tasks)
14509 def action_build(settings, trees, mtimedb,
14510 myopts, myaction, myfiles, spinner):
14512 # validate the state of the resume data
14513 # so that we can make assumptions later.
14514 for k in ("resume", "resume_backup"):
14515 if k not in mtimedb:
14517 resume_data = mtimedb[k]
14518 if not isinstance(resume_data, dict):
14521 mergelist = resume_data.get("mergelist")
14522 if not isinstance(mergelist, list):
14525 for x in mergelist:
14526 if not (isinstance(x, list) and len(x) == 4):
14528 pkg_type, pkg_root, pkg_key, pkg_action = x
14529 if pkg_root not in trees:
14530 # Current $ROOT setting differs,
14531 # so the list must be stale.
14537 resume_opts = resume_data.get("myopts")
14538 if not isinstance(resume_opts, (dict, list)):
14541 favorites = resume_data.get("favorites")
14542 if not isinstance(favorites, list):
14547 if "--resume" in myopts and \
14548 ("resume" in mtimedb or
14549 "resume_backup" in mtimedb):
14551 if "resume" not in mtimedb:
14552 mtimedb["resume"] = mtimedb["resume_backup"]
14553 del mtimedb["resume_backup"]
14555 # "myopts" is a list for backward compatibility.
14556 resume_opts = mtimedb["resume"].get("myopts", [])
14557 if isinstance(resume_opts, list):
14558 resume_opts = dict((k,True) for k in resume_opts)
14559 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14560 resume_opts.pop(opt, None)
14562 # Current options always override resume_opts.
14563 resume_opts.update(myopts)
14565 myopts.update(resume_opts)
14567 if "--debug" in myopts:
14568 writemsg_level("myopts %s\n" % (myopts,))
14570 # Adjust config according to options of the command being resumed.
14571 for myroot in trees:
14572 mysettings = trees[myroot]["vartree"].settings
14573 mysettings.unlock()
14574 adjust_config(myopts, mysettings)
14576 del myroot, mysettings
14578 ldpath_mtimes = mtimedb["ldpath"]
14581 buildpkgonly = "--buildpkgonly" in myopts
14582 pretend = "--pretend" in myopts
14583 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14584 ask = "--ask" in myopts
14585 nodeps = "--nodeps" in myopts
14586 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14587 tree = "--tree" in myopts
14588 if nodeps and tree:
14590 del myopts["--tree"]
14591 portage.writemsg(colorize("WARN", " * ") + \
14592 "--tree is broken with --nodeps. Disabling...\n")
14593 debug = "--debug" in myopts
14594 verbose = "--verbose" in myopts
14595 quiet = "--quiet" in myopts
14596 if pretend or fetchonly:
14597 # make the mtimedb readonly
14598 mtimedb.filename = None
14599 if '--digest' in myopts or 'digest' in settings.features:
14600 if '--digest' in myopts:
14601 msg = "The --digest option"
14603 msg = "The FEATURES=digest setting"
14605 msg += " can prevent corruption from being" + \
14606 " noticed. The `repoman manifest` command is the preferred" + \
14607 " way to generate manifests and it is capable of doing an" + \
14608 " entire repository or category at once."
14609 prefix = bad(" * ")
14610 writemsg(prefix + "\n")
14611 from textwrap import wrap
14612 for line in wrap(msg, 72):
14613 writemsg("%s%s\n" % (prefix, line))
14614 writemsg(prefix + "\n")
14616 if "--quiet" not in myopts and \
14617 ("--pretend" in myopts or "--ask" in myopts or \
14618 "--tree" in myopts or "--verbose" in myopts):
14620 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14622 elif "--buildpkgonly" in myopts:
14626 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14628 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14632 print darkgreen("These are the packages that would be %s, in order:") % action
14635 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14636 if not show_spinner:
14637 spinner.update = spinner.update_quiet
14640 favorites = mtimedb["resume"].get("favorites")
14641 if not isinstance(favorites, list):
14645 print "Calculating dependencies ",
14646 myparams = create_depgraph_params(myopts, myaction)
14648 resume_data = mtimedb["resume"]
14649 mergelist = resume_data["mergelist"]
14650 if mergelist and "--skipfirst" in myopts:
14651 for i, task in enumerate(mergelist):
14652 if isinstance(task, list) and \
14653 task and task[-1] == "merge":
14660 success, mydepgraph, dropped_tasks = resume_depgraph(
14661 settings, trees, mtimedb, myopts, myparams, spinner)
14662 except (portage.exception.PackageNotFound,
14663 depgraph.UnsatisfiedResumeDep), e:
14664 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14665 mydepgraph = e.depgraph
14668 from textwrap import wrap
14669 from portage.output import EOutput
14672 resume_data = mtimedb["resume"]
14673 mergelist = resume_data.get("mergelist")
14674 if not isinstance(mergelist, list):
14676 if mergelist and debug or (verbose and not quiet):
14677 out.eerror("Invalid resume list:")
14680 for task in mergelist:
14681 if isinstance(task, list):
14682 out.eerror(indent + str(tuple(task)))
14685 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14686 out.eerror("One or more packages are either masked or " + \
14687 "have missing dependencies:")
14690 for dep in e.value:
14691 if dep.atom is None:
14692 out.eerror(indent + "Masked package:")
14693 out.eerror(2 * indent + str(dep.parent))
14696 out.eerror(indent + str(dep.atom) + " pulled in by:")
14697 out.eerror(2 * indent + str(dep.parent))
14699 msg = "The resume list contains packages " + \
14700 "that are either masked or have " + \
14701 "unsatisfied dependencies. " + \
14702 "Please restart/continue " + \
14703 "the operation manually, or use --skipfirst " + \
14704 "to skip the first package in the list and " + \
14705 "any other packages that may be " + \
14706 "masked or have missing dependencies."
14707 for line in wrap(msg, 72):
14709 elif isinstance(e, portage.exception.PackageNotFound):
14710 out.eerror("An expected package is " + \
14711 "not available: %s" % str(e))
14713 msg = "The resume list contains one or more " + \
14714 "packages that are no longer " + \
14715 "available. Please restart/continue " + \
14716 "the operation manually."
14717 for line in wrap(msg, 72):
14721 print "\b\b... done!"
14725 portage.writemsg("!!! One or more packages have been " + \
14726 "dropped due to\n" + \
14727 "!!! masking or unsatisfied dependencies:\n\n",
14729 for task in dropped_tasks:
14730 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14731 portage.writemsg("\n", noiselevel=-1)
14734 if mydepgraph is not None:
14735 mydepgraph.display_problems()
14736 if not (ask or pretend):
14737 # delete the current list and also the backup
14738 # since it's probably stale too.
14739 for k in ("resume", "resume_backup"):
14740 mtimedb.pop(k, None)
14745 if ("--resume" in myopts):
14746 print darkgreen("emerge: It seems we have nothing to resume...")
14749 myparams = create_depgraph_params(myopts, myaction)
14750 if "--quiet" not in myopts and "--nodeps" not in myopts:
14751 print "Calculating dependencies ",
14753 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14755 retval, favorites = mydepgraph.select_files(myfiles)
14756 except portage.exception.PackageNotFound, e:
14757 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14759 except portage.exception.PackageSetNotFound, e:
14760 root_config = trees[settings["ROOT"]]["root_config"]
14761 display_missing_pkg_set(root_config, e.value)
14764 print "\b\b... done!"
14766 mydepgraph.display_problems()
14769 if "--pretend" not in myopts and \
14770 ("--ask" in myopts or "--tree" in myopts or \
14771 "--verbose" in myopts) and \
14772 not ("--quiet" in myopts and "--ask" not in myopts):
14773 if "--resume" in myopts:
14774 mymergelist = mydepgraph.altlist()
14775 if len(mymergelist) == 0:
14776 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14778 favorites = mtimedb["resume"]["favorites"]
14779 retval = mydepgraph.display(
14780 mydepgraph.altlist(reversed=tree),
14781 favorites=favorites)
14782 mydepgraph.display_problems()
14783 if retval != os.EX_OK:
14785 prompt="Would you like to resume merging these packages?"
14787 retval = mydepgraph.display(
14788 mydepgraph.altlist(reversed=("--tree" in myopts)),
14789 favorites=favorites)
14790 mydepgraph.display_problems()
14791 if retval != os.EX_OK:
14794 for x in mydepgraph.altlist():
14795 if isinstance(x, Package) and x.operation == "merge":
14799 sets = trees[settings["ROOT"]]["root_config"].sets
14800 world_candidates = None
14801 if "--noreplace" in myopts and \
14802 not oneshot and favorites:
14803 # Sets that are not world candidates are filtered
14804 # out here since the favorites list needs to be
14805 # complete for depgraph.loadResumeCommand() to
14806 # operate correctly.
14807 world_candidates = [x for x in favorites \
14808 if not (x.startswith(SETPREFIX) and \
14809 not sets[x[1:]].world_candidate)]
14810 if "--noreplace" in myopts and \
14811 not oneshot and world_candidates:
14813 for x in world_candidates:
14814 print " %s %s" % (good("*"), x)
14815 prompt="Would you like to add these packages to your world favorites?"
14816 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14817 prompt="Nothing to merge; would you like to auto-clean packages?"
14820 print "Nothing to merge; quitting."
14823 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14824 prompt="Would you like to fetch the source files for these packages?"
14826 prompt="Would you like to merge these packages?"
14828 if "--ask" in myopts and userquery(prompt) == "No":
14833 # Don't ask again (e.g. when auto-cleaning packages after merge)
14834 myopts.pop("--ask", None)
14836 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14837 if ("--resume" in myopts):
14838 mymergelist = mydepgraph.altlist()
14839 if len(mymergelist) == 0:
14840 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14842 favorites = mtimedb["resume"]["favorites"]
14843 retval = mydepgraph.display(
14844 mydepgraph.altlist(reversed=tree),
14845 favorites=favorites)
14846 mydepgraph.display_problems()
14847 if retval != os.EX_OK:
14850 retval = mydepgraph.display(
14851 mydepgraph.altlist(reversed=("--tree" in myopts)),
14852 favorites=favorites)
14853 mydepgraph.display_problems()
14854 if retval != os.EX_OK:
14856 if "--buildpkgonly" in myopts:
14857 graph_copy = mydepgraph.digraph.clone()
14858 removed_nodes = set()
14859 for node in graph_copy:
14860 if not isinstance(node, Package) or \
14861 node.operation == "nomerge":
14862 removed_nodes.add(node)
14863 graph_copy.difference_update(removed_nodes)
14864 if not graph_copy.hasallzeros(ignore_priority = \
14865 DepPrioritySatisfiedRange.ignore_medium):
14866 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14867 print "!!! You have to merge the dependencies before you can build this package.\n"
14870 if "--buildpkgonly" in myopts:
14871 graph_copy = mydepgraph.digraph.clone()
14872 removed_nodes = set()
14873 for node in graph_copy:
14874 if not isinstance(node, Package) or \
14875 node.operation == "nomerge":
14876 removed_nodes.add(node)
14877 graph_copy.difference_update(removed_nodes)
14878 if not graph_copy.hasallzeros(ignore_priority = \
14879 DepPrioritySatisfiedRange.ignore_medium):
14880 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14881 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14884 if ("--resume" in myopts):
14885 favorites=mtimedb["resume"]["favorites"]
14886 mymergelist = mydepgraph.altlist()
14887 mydepgraph.break_refs(mymergelist)
14888 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14889 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14890 del mydepgraph, mymergelist
14891 clear_caches(trees)
14893 retval = mergetask.merge()
14894 merge_count = mergetask.curval
14896 if "resume" in mtimedb and \
14897 "mergelist" in mtimedb["resume"] and \
14898 len(mtimedb["resume"]["mergelist"]) > 1:
14899 mtimedb["resume_backup"] = mtimedb["resume"]
14900 del mtimedb["resume"]
14902 mtimedb["resume"]={}
14903 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14904 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14905 # a list type for options.
14906 mtimedb["resume"]["myopts"] = myopts.copy()
14908 # Convert Atom instances to plain str.
14909 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14911 pkglist = mydepgraph.altlist()
14912 mydepgraph.saveNomergeFavorites()
14913 mydepgraph.break_refs(pkglist)
14914 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14915 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14916 del mydepgraph, pkglist
14917 clear_caches(trees)
14919 retval = mergetask.merge()
14920 merge_count = mergetask.curval
14922 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14923 if "yes" == settings.get("AUTOCLEAN"):
14924 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14925 unmerge(trees[settings["ROOT"]]["root_config"],
14926 myopts, "clean", [],
14927 ldpath_mtimes, autoclean=1)
14929 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14930 + " AUTOCLEAN is disabled. This can cause serious"
14931 + " problems due to overlapping packages.\n")
14932 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14936 def multiple_actions(action1, action2):
14937 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14938 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14941 def insert_optional_args(args):
14943 Parse optional arguments and insert a value if one has
14944 not been provided. This is done before feeding the args
14945 to the optparse parser since that parser does not support
14946 this feature natively.
14950 jobs_opts = ("-j", "--jobs")
14951 root_deps_opt = '--root-deps'
14952 root_deps_choices = ('True', 'rdeps')
14953 arg_stack = args[:]
14954 arg_stack.reverse()
14956 arg = arg_stack.pop()
14958 if arg == root_deps_opt:
14959 new_args.append(arg)
14960 if arg_stack and arg_stack[-1] in root_deps_choices:
14961 new_args.append(arg_stack.pop())
14963 # insert default argument
14964 new_args.append('True')
14967 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14968 if not (short_job_opt or arg in jobs_opts):
14969 new_args.append(arg)
14972 # Insert an empty placeholder in order to
14973 # satisfy the requirements of optparse.
14975 new_args.append("--jobs")
14978 if short_job_opt and len(arg) > 2:
14979 if arg[:2] == "-j":
14981 job_count = int(arg[2:])
14983 saved_opts = arg[2:]
14986 saved_opts = arg[1:].replace("j", "")
14988 if job_count is None and arg_stack:
14990 job_count = int(arg_stack[-1])
14994 # Discard the job count from the stack
14995 # since we're consuming it here.
14998 if job_count is None:
14999 # unlimited number of jobs
15000 new_args.append("True")
15002 new_args.append(str(job_count))
15004 if saved_opts is not None:
15005 new_args.append("-" + saved_opts)
15009 def parse_opts(tmpcmdline, silent=False):
15014 global actions, options, shortmapping
15016 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15017 argument_options = {
15019 "help":"specify the location for portage configuration files",
15023 "help":"enable or disable color output",
15025 "choices":("y", "n")
15030 "help" : "Specifies the number of packages to build " + \
15036 "--load-average": {
15038 "help" :"Specifies that no new builds should be started " + \
15039 "if there are other builds running and the load average " + \
15040 "is at least LOAD (a floating-point number).",
15046 "help":"include unnecessary build time dependencies",
15048 "choices":("y", "n")
15051 "help":"specify conditions to trigger package reinstallation",
15053 "choices":["changed-use"]
15056 "help" : "specify the target root filesystem for merging packages",
15061 "help" : "modify interpretation of depedencies",
15063 "choices" :("True", "rdeps")
15067 from optparse import OptionParser
15068 parser = OptionParser()
15069 if parser.has_option("--help"):
15070 parser.remove_option("--help")
15072 for action_opt in actions:
15073 parser.add_option("--" + action_opt, action="store_true",
15074 dest=action_opt.replace("-", "_"), default=False)
15075 for myopt in options:
15076 parser.add_option(myopt, action="store_true",
15077 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15078 for shortopt, longopt in shortmapping.iteritems():
15079 parser.add_option("-" + shortopt, action="store_true",
15080 dest=longopt.lstrip("--").replace("-", "_"), default=False)
15081 for myalias, myopt in longopt_aliases.iteritems():
15082 parser.add_option(myalias, action="store_true",
15083 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15085 for myopt, kwargs in argument_options.iteritems():
15086 parser.add_option(myopt,
15087 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15089 tmpcmdline = insert_optional_args(tmpcmdline)
15091 myoptions, myargs = parser.parse_args(args=tmpcmdline)
15093 if myoptions.root_deps == "True":
15094 myoptions.root_deps = True
15098 if myoptions.jobs == "True":
15102 jobs = int(myoptions.jobs)
15106 if jobs is not True and \
15110 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15111 (myoptions.jobs,), noiselevel=-1)
15113 myoptions.jobs = jobs
15115 if myoptions.load_average:
15117 load_average = float(myoptions.load_average)
15121 if load_average <= 0.0:
15122 load_average = None
15124 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15125 (myoptions.load_average,), noiselevel=-1)
15127 myoptions.load_average = load_average
15129 for myopt in options:
15130 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15132 myopts[myopt] = True
15134 for myopt in argument_options:
15135 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15139 if myoptions.searchdesc:
15140 myoptions.search = True
15142 for action_opt in actions:
15143 v = getattr(myoptions, action_opt.replace("-", "_"))
15146 multiple_actions(myaction, action_opt)
15148 myaction = action_opt
15152 return myaction, myopts, myfiles
15154 def validate_ebuild_environment(trees):
15155 for myroot in trees:
15156 settings = trees[myroot]["vartree"].settings
15157 settings.validate()
15159 def clear_caches(trees):
15160 for d in trees.itervalues():
15161 d["porttree"].dbapi.melt()
15162 d["porttree"].dbapi._aux_cache.clear()
15163 d["bintree"].dbapi._aux_cache.clear()
15164 d["bintree"].dbapi._clear_cache()
15165 d["vartree"].dbapi.linkmap._clear_cache()
15166 portage.dircache.clear()
15169 def load_emerge_config(trees=None):
15171 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15172 v = os.environ.get(envvar, None)
15173 if v and v.strip():
15175 trees = portage.create_trees(trees=trees, **kwargs)
15177 for root, root_trees in trees.iteritems():
15178 settings = root_trees["vartree"].settings
15179 setconfig = load_default_config(settings, root_trees)
15180 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15182 settings = trees["/"]["vartree"].settings
15184 for myroot in trees:
15186 settings = trees[myroot]["vartree"].settings
15189 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15190 mtimedb = portage.MtimeDB(mtimedbfile)
15192 return settings, trees, mtimedb
15194 def adjust_config(myopts, settings):
15195 """Make emerge specific adjustments to the config."""
15197 # To enhance usability, make some vars case insensitive by forcing them to
15199 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15200 if myvar in settings:
15201 settings[myvar] = settings[myvar].lower()
15202 settings.backup_changes(myvar)
15205 # Kill noauto as it will break merges otherwise.
15206 if "noauto" in settings.features:
15207 settings.features.remove('noauto')
15208 settings['FEATURES'] = ' '.join(sorted(settings.features))
15209 settings.backup_changes("FEATURES")
15213 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15214 except ValueError, e:
15215 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15216 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15217 settings["CLEAN_DELAY"], noiselevel=-1)
15218 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15219 settings.backup_changes("CLEAN_DELAY")
15221 EMERGE_WARNING_DELAY = 10
15223 EMERGE_WARNING_DELAY = int(settings.get(
15224 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15225 except ValueError, e:
15226 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15227 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15228 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15229 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15230 settings.backup_changes("EMERGE_WARNING_DELAY")
15232 if "--quiet" in myopts:
15233 settings["PORTAGE_QUIET"]="1"
15234 settings.backup_changes("PORTAGE_QUIET")
15236 if "--verbose" in myopts:
15237 settings["PORTAGE_VERBOSE"] = "1"
15238 settings.backup_changes("PORTAGE_VERBOSE")
15240 # Set so that configs will be merged regardless of remembered status
15241 if ("--noconfmem" in myopts):
15242 settings["NOCONFMEM"]="1"
15243 settings.backup_changes("NOCONFMEM")
15245 # Set various debug markers... They should be merged somehow.
15248 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15249 if PORTAGE_DEBUG not in (0, 1):
15250 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15251 PORTAGE_DEBUG, noiselevel=-1)
15252 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15255 except ValueError, e:
15256 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15257 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15258 settings["PORTAGE_DEBUG"], noiselevel=-1)
15260 if "--debug" in myopts:
15262 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15263 settings.backup_changes("PORTAGE_DEBUG")
15265 if settings.get("NOCOLOR") not in ("yes","true"):
15266 portage.output.havecolor = 1
15268 """The explicit --color < y | n > option overrides the NOCOLOR environment
15269 variable and stdout auto-detection."""
15270 if "--color" in myopts:
15271 if "y" == myopts["--color"]:
15272 portage.output.havecolor = 1
15273 settings["NOCOLOR"] = "false"
15275 portage.output.havecolor = 0
15276 settings["NOCOLOR"] = "true"
15277 settings.backup_changes("NOCOLOR")
15278 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15279 portage.output.havecolor = 0
15280 settings["NOCOLOR"] = "true"
15281 settings.backup_changes("NOCOLOR")
15283 def apply_priorities(settings):
15287 def nice(settings):
15289 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15290 except (OSError, ValueError), e:
15291 out = portage.output.EOutput()
15292 out.eerror("Failed to change nice value to '%s'" % \
15293 settings["PORTAGE_NICENESS"])
15294 out.eerror("%s\n" % str(e))
15296 def ionice(settings):
15298 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15300 ionice_cmd = shlex.split(ionice_cmd)
15304 from portage.util import varexpand
15305 variables = {"PID" : str(os.getpid())}
15306 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15309 rval = portage.process.spawn(cmd, env=os.environ)
15310 except portage.exception.CommandNotFound:
15311 # The OS kernel probably doesn't support ionice,
15312 # so return silently.
15315 if rval != os.EX_OK:
15316 out = portage.output.EOutput()
15317 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15318 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15320 def display_missing_pkg_set(root_config, set_name):
15323 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15324 "The following sets exist:") % \
15325 colorize("INFORM", set_name))
15328 for s in sorted(root_config.sets):
15329 msg.append(" %s" % s)
15332 writemsg_level("".join("%s\n" % l for l in msg),
15333 level=logging.ERROR, noiselevel=-1)
15335 def expand_set_arguments(myfiles, myaction, root_config):
15337 setconfig = root_config.setconfig
15339 sets = setconfig.getSets()
15341 # In order to know exactly which atoms/sets should be added to the
15342 # world file, the depgraph performs set expansion later. It will get
15343 # confused about where the atoms came from if it's not allowed to
15344 # expand them itself.
15345 do_not_expand = (None, )
15348 if a in ("system", "world"):
15349 newargs.append(SETPREFIX+a)
15356 # separators for set arguments
15360 # WARNING: all operators must be of equal length
15362 DIFF_OPERATOR = "-@"
15363 UNION_OPERATOR = "+@"
15365 for i in range(0, len(myfiles)):
15366 if myfiles[i].startswith(SETPREFIX):
15369 x = myfiles[i][len(SETPREFIX):]
15372 start = x.find(ARG_START)
15373 end = x.find(ARG_END)
15374 if start > 0 and start < end:
15375 namepart = x[:start]
15376 argpart = x[start+1:end]
15378 # TODO: implement proper quoting
15379 args = argpart.split(",")
15383 k, v = a.split("=", 1)
15386 options[a] = "True"
15387 setconfig.update(namepart, options)
15388 newset += (x[:start-len(namepart)]+namepart)
15389 x = x[end+len(ARG_END):]
15393 myfiles[i] = SETPREFIX+newset
15395 sets = setconfig.getSets()
15397 # display errors that occured while loading the SetConfig instance
15398 for e in setconfig.errors:
15399 print colorize("BAD", "Error during set creation: %s" % e)
15401 # emerge relies on the existance of sets with names "world" and "system"
15402 required_sets = ("world", "system")
15405 for s in required_sets:
15407 missing_sets.append(s)
15409 if len(missing_sets) > 2:
15410 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15411 missing_sets_str += ', and "%s"' % missing_sets[-1]
15412 elif len(missing_sets) == 2:
15413 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15415 missing_sets_str = '"%s"' % missing_sets[-1]
15416 msg = ["emerge: incomplete set configuration, " + \
15417 "missing set(s): %s" % missing_sets_str]
15419 msg.append(" sets defined: %s" % ", ".join(sets))
15420 msg.append(" This usually means that '%s'" % \
15421 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15422 msg.append(" is missing or corrupt.")
15424 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15426 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15429 if a.startswith(SETPREFIX):
15430 # support simple set operations (intersection, difference and union)
15431 # on the commandline. Expressions are evaluated strictly left-to-right
15432 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15433 expression = a[len(SETPREFIX):]
15436 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15437 is_pos = expression.rfind(IS_OPERATOR)
15438 diff_pos = expression.rfind(DIFF_OPERATOR)
15439 union_pos = expression.rfind(UNION_OPERATOR)
15440 op_pos = max(is_pos, diff_pos, union_pos)
15441 s1 = expression[:op_pos]
15442 s2 = expression[op_pos+len(IS_OPERATOR):]
15443 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15445 display_missing_pkg_set(root_config, s2)
15447 expr_sets.insert(0, s2)
15448 expr_ops.insert(0, op)
15450 if not expression in sets:
15451 display_missing_pkg_set(root_config, expression)
15453 expr_sets.insert(0, expression)
15454 result = set(setconfig.getSetAtoms(expression))
15455 for i in range(0, len(expr_ops)):
15456 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15457 if expr_ops[i] == IS_OPERATOR:
15458 result.intersection_update(s2)
15459 elif expr_ops[i] == DIFF_OPERATOR:
15460 result.difference_update(s2)
15461 elif expr_ops[i] == UNION_OPERATOR:
15464 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15465 newargs.extend(result)
15467 s = a[len(SETPREFIX):]
15469 display_missing_pkg_set(root_config, s)
15471 setconfig.active.append(s)
15473 set_atoms = setconfig.getSetAtoms(s)
15474 except portage.exception.PackageSetNotFound, e:
15475 writemsg_level(("emerge: the given set '%s' " + \
15476 "contains a non-existent set named '%s'.\n") % \
15477 (s, e), level=logging.ERROR, noiselevel=-1)
15479 if myaction in unmerge_actions and \
15480 not sets[s].supportsOperation("unmerge"):
15481 sys.stderr.write("emerge: the given set '%s' does " % s + \
15482 "not support unmerge operations\n")
15484 elif not set_atoms:
15485 print "emerge: '%s' is an empty set" % s
15486 elif myaction not in do_not_expand:
15487 newargs.extend(set_atoms)
15489 newargs.append(SETPREFIX+s)
15490 for e in sets[s].errors:
15494 return (newargs, retval)
15496 def repo_name_check(trees):
15497 missing_repo_names = set()
15498 for root, root_trees in trees.iteritems():
15499 if "porttree" in root_trees:
15500 portdb = root_trees["porttree"].dbapi
15501 missing_repo_names.update(portdb.porttrees)
15502 repos = portdb.getRepositories()
15504 missing_repo_names.discard(portdb.getRepositoryPath(r))
15505 if portdb.porttree_root in missing_repo_names and \
15506 not os.path.exists(os.path.join(
15507 portdb.porttree_root, "profiles")):
15508 # This is normal if $PORTDIR happens to be empty,
15509 # so don't warn about it.
15510 missing_repo_names.remove(portdb.porttree_root)
15512 if missing_repo_names:
15514 msg.append("WARNING: One or more repositories " + \
15515 "have missing repo_name entries:")
15517 for p in missing_repo_names:
15518 msg.append("\t%s/profiles/repo_name" % (p,))
15520 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15521 "should be a plain text file containing a unique " + \
15522 "name for the repository on the first line.", 70))
15523 writemsg_level("".join("%s\n" % l for l in msg),
15524 level=logging.WARNING, noiselevel=-1)
15526 return bool(missing_repo_names)
15528 def config_protect_check(trees):
15529 for root, root_trees in trees.iteritems():
15530 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15531 msg = "!!! CONFIG_PROTECT is empty"
15533 msg += " for '%s'" % root
15534 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15536 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15538 if "--quiet" in myopts:
15539 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15540 print "!!! one of the following fully-qualified ebuild names instead:\n"
15541 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15542 print " " + colorize("INFORM", cp)
15545 s = search(root_config, spinner, "--searchdesc" in myopts,
15546 "--quiet" not in myopts, "--usepkg" in myopts,
15547 "--usepkgonly" in myopts)
15548 null_cp = portage.dep_getkey(insert_category_into_atom(
15550 cat, atom_pn = portage.catsplit(null_cp)
15551 s.searchkey = atom_pn
15552 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15555 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15556 print "!!! one of the above fully-qualified ebuild names instead.\n"
15558 def profile_check(trees, myaction, myopts):
15559 if myaction in ("info", "sync"):
15561 elif "--version" in myopts or "--help" in myopts:
15563 for root, root_trees in trees.iteritems():
15564 if root_trees["root_config"].settings.profiles:
15566 # generate some profile related warning messages
15567 validate_ebuild_environment(trees)
15568 msg = "If you have just changed your profile configuration, you " + \
15569 "should revert back to the previous configuration. Due to " + \
15570 "your current profile being invalid, allowed actions are " + \
15571 "limited to --help, --info, --sync, and --version."
15572 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15573 level=logging.ERROR, noiselevel=-1)
15578 global portage # NFC why this is necessary now - genone
15579 portage._disable_legacy_globals()
15580 # Disable color until we're sure that it should be enabled (after
15581 # EMERGE_DEFAULT_OPTS has been parsed).
15582 portage.output.havecolor = 0
15583 # This first pass is just for options that need to be known as early as
15584 # possible, such as --config-root. They will be parsed again later,
15585 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15586 # the value of --config-root).
15587 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15588 if "--debug" in myopts:
15589 os.environ["PORTAGE_DEBUG"] = "1"
15590 if "--config-root" in myopts:
15591 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15592 if "--root" in myopts:
15593 os.environ["ROOT"] = myopts["--root"]
15595 # Portage needs to ensure a sane umask for the files it creates.
15597 settings, trees, mtimedb = load_emerge_config()
15598 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15599 rval = profile_check(trees, myaction, myopts)
15600 if rval != os.EX_OK:
15603 if portage._global_updates(trees, mtimedb["updates"]):
15605 # Reload the whole config from scratch.
15606 settings, trees, mtimedb = load_emerge_config(trees=trees)
15607 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15609 xterm_titles = "notitles" not in settings.features
15612 if "--ignore-default-opts" not in myopts:
15613 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15614 tmpcmdline.extend(sys.argv[1:])
15615 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15617 if "--digest" in myopts:
15618 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15619 # Reload the whole config from scratch so that the portdbapi internal
15620 # config is updated with new FEATURES.
15621 settings, trees, mtimedb = load_emerge_config(trees=trees)
15622 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15624 for myroot in trees:
15625 mysettings = trees[myroot]["vartree"].settings
15626 mysettings.unlock()
15627 adjust_config(myopts, mysettings)
15628 if '--pretend' not in myopts and myaction in \
15629 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15630 mysettings["PORTAGE_COUNTER_HASH"] = \
15631 trees[myroot]["vartree"].dbapi._counter_hash()
15632 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15634 del myroot, mysettings
15636 apply_priorities(settings)
15638 spinner = stdout_spinner()
15639 if "candy" in settings.features:
15640 spinner.update = spinner.update_scroll
15642 if "--quiet" not in myopts:
15643 portage.deprecated_profile_check(settings=settings)
15644 repo_name_check(trees)
15645 config_protect_check(trees)
15647 for mytrees in trees.itervalues():
15648 mydb = mytrees["porttree"].dbapi
15649 # Freeze the portdbapi for performance (memoize all xmatch results).
15653 if "moo" in myfiles:
15656 Larry loves Gentoo (""" + platform.system() + """)
15658 _______________________
15659 < Have you mooed today? >
15660 -----------------------
15670 ext = os.path.splitext(x)[1]
15671 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15672 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15675 root_config = trees[settings["ROOT"]]["root_config"]
15676 if myaction == "list-sets":
15677 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15681 # only expand sets for actions taking package arguments
15682 oldargs = myfiles[:]
15683 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15684 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15685 if retval != os.EX_OK:
15688 # Need to handle empty sets specially, otherwise emerge will react
15689 # with the help message for empty argument lists
15690 if oldargs and not myfiles:
15691 print "emerge: no targets left after set expansion"
15694 if ("--tree" in myopts) and ("--columns" in myopts):
15695 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15698 if ("--quiet" in myopts):
15699 spinner.update = spinner.update_quiet
15700 portage.util.noiselimit = -1
15702 # Always create packages if FEATURES=buildpkg
15703 # Imply --buildpkg if --buildpkgonly
15704 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15705 if "--buildpkg" not in myopts:
15706 myopts["--buildpkg"] = True
15708 # Always try and fetch binary packages if FEATURES=getbinpkg
15709 if ("getbinpkg" in settings.features):
15710 myopts["--getbinpkg"] = True
15712 if "--buildpkgonly" in myopts:
15713 # --buildpkgonly will not merge anything, so
15714 # it cancels all binary package options.
15715 for opt in ("--getbinpkg", "--getbinpkgonly",
15716 "--usepkg", "--usepkgonly"):
15717 myopts.pop(opt, None)
15719 if "--fetch-all-uri" in myopts:
15720 myopts["--fetchonly"] = True
15722 if "--skipfirst" in myopts and "--resume" not in myopts:
15723 myopts["--resume"] = True
15725 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15726 myopts["--usepkgonly"] = True
15728 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15729 myopts["--getbinpkg"] = True
15731 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15732 myopts["--usepkg"] = True
15734 # Also allow -K to apply --usepkg/-k
15735 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15736 myopts["--usepkg"] = True
15738 # Allow -p to remove --ask
15739 if ("--pretend" in myopts) and ("--ask" in myopts):
15740 print ">>> --pretend disables --ask... removing --ask from options."
15741 del myopts["--ask"]
15743 # forbid --ask when not in a terminal
15744 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15745 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15746 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15750 if settings.get("PORTAGE_DEBUG", "") == "1":
15751 spinner.update = spinner.update_quiet
15753 if "python-trace" in settings.features:
15754 import portage.debug
15755 portage.debug.set_trace(True)
15757 if not ("--quiet" in myopts):
15758 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15759 spinner.update = spinner.update_basic
15761 if myaction == 'version':
15762 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15763 settings.profile_path, settings["CHOST"],
15764 trees[settings["ROOT"]]["vartree"].dbapi)
15766 elif "--help" in myopts:
15767 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15770 if "--debug" in myopts:
15771 print "myaction", myaction
15772 print "myopts", myopts
15774 if not myaction and not myfiles and "--resume" not in myopts:
15775 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15778 pretend = "--pretend" in myopts
15779 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15780 buildpkgonly = "--buildpkgonly" in myopts
15782 # check if root user is the current user for the actions where emerge needs this
15783 if portage.secpass < 2:
15784 # We've already allowed "--version" and "--help" above.
15785 if "--pretend" not in myopts and myaction not in ("search","info"):
15786 need_superuser = not \
15788 (buildpkgonly and secpass >= 1) or \
15789 myaction in ("metadata", "regen") or \
15790 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15791 if portage.secpass < 1 or \
15794 access_desc = "superuser"
15796 access_desc = "portage group"
15797 # Always show portage_group_warning() when only portage group
15798 # access is required but the user is not in the portage group.
15799 from portage.data import portage_group_warning
15800 if "--ask" in myopts:
15801 myopts["--pretend"] = True
15802 del myopts["--ask"]
15803 print ("%s access is required... " + \
15804 "adding --pretend to options.\n") % access_desc
15805 if portage.secpass < 1 and not need_superuser:
15806 portage_group_warning()
15808 sys.stderr.write(("emerge: %s access is " + \
15809 "required.\n\n") % access_desc)
15810 if portage.secpass < 1 and not need_superuser:
15811 portage_group_warning()
15814 disable_emergelog = False
15815 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15817 disable_emergelog = True
15819 if myaction in ("search", "info"):
15820 disable_emergelog = True
15821 if disable_emergelog:
15822 """ Disable emergelog for everything except build or unmerge
15823 operations. This helps minimize parallel emerge.log entries that can
15824 confuse log parsers. We especially want it disabled during
15825 parallel-fetch, which uses --resume --fetchonly."""
15827 def emergelog(*pargs, **kargs):
15830 if not "--pretend" in myopts:
15831 emergelog(xterm_titles, "Started emerge on: "+\
15832 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15835 myelogstr=" ".join(myopts)
15837 myelogstr+=" "+myaction
15839 myelogstr += " " + " ".join(oldargs)
15840 emergelog(xterm_titles, " *** emerge " + myelogstr)
15843 def emergeexitsig(signum, frame):
15844 signal.signal(signal.SIGINT, signal.SIG_IGN)
15845 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15846 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15847 sys.exit(100+signum)
15848 signal.signal(signal.SIGINT, emergeexitsig)
15849 signal.signal(signal.SIGTERM, emergeexitsig)
15852 """This gets out final log message in before we quit."""
15853 if "--pretend" not in myopts:
15854 emergelog(xterm_titles, " *** terminating.")
15855 if "notitles" not in settings.features:
15857 portage.atexit_register(emergeexit)
15859 if myaction in ("config", "metadata", "regen", "sync"):
15860 if "--pretend" in myopts:
15861 sys.stderr.write(("emerge: The '%s' action does " + \
15862 "not support '--pretend'.\n") % myaction)
15865 if "sync" == myaction:
15866 return action_sync(settings, trees, mtimedb, myopts, myaction)
15867 elif "metadata" == myaction:
15868 action_metadata(settings, portdb, myopts)
15869 elif myaction=="regen":
15870 validate_ebuild_environment(trees)
15871 return action_regen(settings, portdb, myopts.get("--jobs"),
15872 myopts.get("--load-average"))
15874 elif "config"==myaction:
15875 validate_ebuild_environment(trees)
15876 action_config(settings, trees, myopts, myfiles)
15879 elif "search"==myaction:
15880 validate_ebuild_environment(trees)
15881 action_search(trees[settings["ROOT"]]["root_config"],
15882 myopts, myfiles, spinner)
15884 elif myaction in ('clean', 'depclean', 'prune', 'unmerge'):
15885 validate_ebuild_environment(trees)
15886 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
15887 myopts, myaction, myfiles, spinner)
15888 if not (buildpkgonly or fetchonly or pretend):
15889 post_emerge(root_config, myopts, mtimedb, rval)
15892 elif myaction == 'info':
15894 # Ensure atoms are valid before calling unmerge().
15895 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15898 if is_valid_package_atom(x):
15900 valid_atoms.append(
15901 portage.dep_expand(x, mydb=vardb, settings=settings))
15902 except portage.exception.AmbiguousPackageName, e:
15903 msg = "The short ebuild name \"" + x + \
15904 "\" is ambiguous. Please specify " + \
15905 "one of the following " + \
15906 "fully-qualified ebuild names instead:"
15907 for line in textwrap.wrap(msg, 70):
15908 writemsg_level("!!! %s\n" % (line,),
15909 level=logging.ERROR, noiselevel=-1)
15911 writemsg_level(" %s\n" % colorize("INFORM", i),
15912 level=logging.ERROR, noiselevel=-1)
15913 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15917 msg.append("'%s' is not a valid package atom." % (x,))
15918 msg.append("Please check ebuild(5) for full details.")
15919 writemsg_level("".join("!!! %s\n" % line for line in msg),
15920 level=logging.ERROR, noiselevel=-1)
15923 return action_info(settings, trees, myopts, valid_atoms)
15925 # "update", "system", or just process files:
15927 validate_ebuild_environment(trees)
15930 if x.startswith(SETPREFIX) or \
15931 is_valid_package_atom(x):
15933 if x[:1] == os.sep:
15941 msg.append("'%s' is not a valid package atom." % (x,))
15942 msg.append("Please check ebuild(5) for full details.")
15943 writemsg_level("".join("!!! %s\n" % line for line in msg),
15944 level=logging.ERROR, noiselevel=-1)
15947 if "--pretend" not in myopts:
15948 display_news_notification(root_config, myopts)
15949 retval = action_build(settings, trees, mtimedb,
15950 myopts, myaction, myfiles, spinner)
15951 root_config = trees[settings["ROOT"]]["root_config"]
15952 post_emerge(root_config, myopts, mtimedb, retval)