2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if isinstance(mysize, basestring):
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 if setconfig is None:
776 self.sets = self.setconfig.getSets()
777 self.visible_pkgs = PackageVirtualDbapi(self.settings)
779 def create_world_atom(pkg, args_set, root_config):
780 """Create a new atom for the world file if one does not exist. If the
781 argument atom is precise enough to identify a specific slot then a slot
782 atom will be returned. Atoms that are in the system set may also be stored
783 in world since system atoms can only match one slot while world atoms can
784 be greedy with respect to slots. Unslotted system packages will not be
787 arg_atom = args_set.findAtomForPackage(pkg)
790 cp = portage.dep_getkey(arg_atom)
792 sets = root_config.sets
793 portdb = root_config.trees["porttree"].dbapi
794 vardb = root_config.trees["vartree"].dbapi
795 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
796 for cpv in portdb.match(cp))
797 slotted = len(available_slots) > 1 or \
798 (len(available_slots) == 1 and "0" not in available_slots)
800 # check the vdb in case this is multislot
801 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
802 for cpv in vardb.match(cp))
803 slotted = len(available_slots) > 1 or \
804 (len(available_slots) == 1 and "0" not in available_slots)
805 if slotted and arg_atom != cp:
806 # If the user gave a specific atom, store it as a
807 # slot atom in the world file.
808 slot_atom = pkg.slot_atom
810 # For USE=multislot, there are a couple of cases to
813 # 1) SLOT="0", but the real SLOT spontaneously changed to some
814 # unknown value, so just record an unslotted atom.
816 # 2) SLOT comes from an installed package and there is no
817 # matching SLOT in the portage tree.
819 # Make sure that the slot atom is available in either the
820 # portdb or the vardb, since otherwise the user certainly
821 # doesn't want the SLOT atom recorded in the world file
822 # (case 1 above). If it's only available in the vardb,
823 # the user may be trying to prevent a USE=multislot
824 # package from being removed by --depclean (case 2 above).
827 if not portdb.match(slot_atom):
828 # SLOT seems to come from an installed multislot package
830 # If there is no installed package matching the SLOT atom,
831 # it probably changed SLOT spontaneously due to USE=multislot,
832 # so just record an unslotted atom.
833 if vardb.match(slot_atom):
834 # Now verify that the argument is precise
835 # enough to identify a specific slot.
836 matches = mydb.match(arg_atom)
837 matched_slots = set()
839 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
840 if len(matched_slots) == 1:
841 new_world_atom = slot_atom
843 if new_world_atom == sets["world"].findAtomForPackage(pkg):
844 # Both atoms would be identical, so there's nothing to add.
847 # Unlike world atoms, system atoms are not greedy for slots, so they
848 # can't be safely excluded from world if they are slotted.
849 system_atom = sets["system"].findAtomForPackage(pkg)
851 if not portage.dep_getkey(system_atom).startswith("virtual/"):
853 # System virtuals aren't safe to exclude from world since they can
854 # match multiple old-style virtuals but only one of them will be
855 # pulled in by update or depclean.
856 providers = portdb.mysettings.getvirtuals().get(
857 portage.dep_getkey(system_atom))
858 if providers and len(providers) == 1 and providers[0] == cp:
860 return new_world_atom
862 def filter_iuse_defaults(iuse):
864 if flag.startswith("+") or flag.startswith("-"):
869 class SlotObject(object):
870 __slots__ = ("__weakref__",)
872 def __init__(self, **kwargs):
873 classes = [self.__class__]
878 classes.extend(c.__bases__)
879 slots = getattr(c, "__slots__", None)
883 myvalue = kwargs.get(myattr, None)
884 setattr(self, myattr, myvalue)
888 Create a new instance and copy all attributes
889 defined from __slots__ (including those from
892 obj = self.__class__()
894 classes = [self.__class__]
899 classes.extend(c.__bases__)
900 slots = getattr(c, "__slots__", None)
904 setattr(obj, myattr, getattr(self, myattr))
908 class AbstractDepPriority(SlotObject):
909 __slots__ = ("buildtime", "runtime", "runtime_post")
911 def __lt__(self, other):
912 return self.__int__() < other
914 def __le__(self, other):
915 return self.__int__() <= other
917 def __eq__(self, other):
918 return self.__int__() == other
920 def __ne__(self, other):
921 return self.__int__() != other
923 def __gt__(self, other):
924 return self.__int__() > other
926 def __ge__(self, other):
927 return self.__int__() >= other
931 return copy.copy(self)
933 class DepPriority(AbstractDepPriority):
935 __slots__ = ("satisfied", "optional", "rebuild")
947 if self.runtime_post:
948 return "runtime_post"
951 class BlockerDepPriority(DepPriority):
959 BlockerDepPriority.instance = BlockerDepPriority()
961 class UnmergeDepPriority(AbstractDepPriority):
962 __slots__ = ("optional", "satisfied",)
964 Combination of properties Priority Category
969 (none of the above) -2 SOFT
979 if self.runtime_post:
986 myvalue = self.__int__()
987 if myvalue > self.SOFT:
991 class DepPriorityNormalRange(object):
993 DepPriority properties Index Category
997 runtime_post 2 MEDIUM_SOFT
999 (none of the above) 0 NONE
1007 def _ignore_optional(cls, priority):
1008 if priority.__class__ is not DepPriority:
1010 return bool(priority.optional)
1013 def _ignore_runtime_post(cls, priority):
1014 if priority.__class__ is not DepPriority:
1016 return bool(priority.optional or priority.runtime_post)
1019 def _ignore_runtime(cls, priority):
1020 if priority.__class__ is not DepPriority:
1022 return not priority.buildtime
1024 ignore_medium = _ignore_runtime
1025 ignore_medium_soft = _ignore_runtime_post
1026 ignore_soft = _ignore_optional
1028 DepPriorityNormalRange.ignore_priority = (
1030 DepPriorityNormalRange._ignore_optional,
1031 DepPriorityNormalRange._ignore_runtime_post,
1032 DepPriorityNormalRange._ignore_runtime
1035 class DepPrioritySatisfiedRange(object):
1037 DepPriority Index Category
1039 not satisfied and buildtime HARD
1040 not satisfied and runtime 7 MEDIUM
1041 not satisfied and runtime_post 6 MEDIUM_SOFT
1042 satisfied and buildtime and rebuild 5 SOFT
1043 satisfied and buildtime 4 SOFT
1044 satisfied and runtime 3 SOFT
1045 satisfied and runtime_post 2 SOFT
1047 (none of the above) 0 NONE
1055 def _ignore_optional(cls, priority):
1056 if priority.__class__ is not DepPriority:
1058 return bool(priority.optional)
1061 def _ignore_satisfied_runtime_post(cls, priority):
1062 if priority.__class__ is not DepPriority:
1064 if priority.optional:
1066 if not priority.satisfied:
1068 return bool(priority.runtime_post)
1071 def _ignore_satisfied_runtime(cls, priority):
1072 if priority.__class__ is not DepPriority:
1074 if priority.optional:
1076 if not priority.satisfied:
1078 return not priority.buildtime
1081 def _ignore_satisfied_buildtime(cls, priority):
1082 if priority.__class__ is not DepPriority:
1084 if priority.optional:
1086 if not priority.satisfied:
1088 if priority.buildtime:
1089 return not priority.rebuild
1093 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1094 if priority.__class__ is not DepPriority:
1096 if priority.optional:
1098 return bool(priority.satisfied)
1101 def _ignore_runtime_post(cls, priority):
1102 if priority.__class__ is not DepPriority:
1104 return bool(priority.optional or \
1105 priority.satisfied or \
1106 priority.runtime_post)
1109 def _ignore_runtime(cls, priority):
1110 if priority.__class__ is not DepPriority:
1112 return bool(priority.satisfied or \
1113 not priority.buildtime)
1115 ignore_medium = _ignore_runtime
1116 ignore_medium_soft = _ignore_runtime_post
1117 ignore_soft = _ignore_satisfied_buildtime_rebuild
1119 DepPrioritySatisfiedRange.ignore_priority = (
1121 DepPrioritySatisfiedRange._ignore_optional,
1122 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1124 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1125 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1126 DepPrioritySatisfiedRange._ignore_runtime_post,
1127 DepPrioritySatisfiedRange._ignore_runtime
1130 def _find_deep_system_runtime_deps(graph):
1131 deep_system_deps = set()
1134 if not isinstance(node, Package) or \
1135 node.operation == 'uninstall':
1137 if node.root_config.sets['system'].findAtomForPackage(node):
1138 node_stack.append(node)
1140 def ignore_priority(priority):
1142 Ignore non-runtime priorities.
1144 if isinstance(priority, DepPriority) and \
1145 (priority.runtime or priority.runtime_post):
1150 node = node_stack.pop()
1151 if node in deep_system_deps:
1153 deep_system_deps.add(node)
1154 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1155 if not isinstance(child, Package) or \
1156 child.operation == 'uninstall':
1158 node_stack.append(child)
1160 return deep_system_deps
1162 class FakeVartree(portage.vartree):
1163 """This is implements an in-memory copy of a vartree instance that provides
1164 all the interfaces required for use by the depgraph. The vardb is locked
1165 during the constructor call just long enough to read a copy of the
1166 installed package information. This allows the depgraph to do it's
1167 dependency calculations without holding a lock on the vardb. It also
1168 allows things like vardb global updates to be done in memory so that the
1169 user doesn't necessarily need write access to the vardb in cases where
1170 global updates are necessary (updates are performed when necessary if there
1171 is not a matching ebuild in the tree)."""
1172 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1173 self._root_config = root_config
1174 if pkg_cache is None:
1176 real_vartree = root_config.trees["vartree"]
1177 portdb = root_config.trees["porttree"].dbapi
1178 self.root = real_vartree.root
1179 self.settings = real_vartree.settings
1180 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1181 if "_mtime_" not in mykeys:
1182 mykeys.append("_mtime_")
1183 self._db_keys = mykeys
1184 self._pkg_cache = pkg_cache
1185 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1186 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1188 # At least the parent needs to exist for the lock file.
1189 portage.util.ensure_dirs(vdb_path)
1190 except portage.exception.PortageException:
1194 if acquire_lock and os.access(vdb_path, os.W_OK):
1195 vdb_lock = portage.locks.lockdir(vdb_path)
1196 real_dbapi = real_vartree.dbapi
1198 for cpv in real_dbapi.cpv_all():
1199 cache_key = ("installed", self.root, cpv, "nomerge")
1200 pkg = self._pkg_cache.get(cache_key)
1202 metadata = pkg.metadata
1204 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1205 myslot = metadata["SLOT"]
1206 mycp = portage.dep_getkey(cpv)
1207 myslot_atom = "%s:%s" % (mycp, myslot)
1209 mycounter = long(metadata["COUNTER"])
1212 metadata["COUNTER"] = str(mycounter)
1213 other_counter = slot_counters.get(myslot_atom, None)
1214 if other_counter is not None:
1215 if other_counter > mycounter:
1217 slot_counters[myslot_atom] = mycounter
1219 pkg = Package(built=True, cpv=cpv,
1220 installed=True, metadata=metadata,
1221 root_config=root_config, type_name="installed")
1222 self._pkg_cache[pkg] = pkg
1223 self.dbapi.cpv_inject(pkg)
1224 real_dbapi.flush_cache()
1227 portage.locks.unlockdir(vdb_lock)
1228 # Populate the old-style virtuals using the cached values.
1229 if not self.settings.treeVirtuals:
1230 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1231 portage.getCPFromCPV, self.get_all_provides())
1233 # Intialize variables needed for lazy cache pulls of the live ebuild
1234 # metadata. This ensures that the vardb lock is released ASAP, without
1235 # being delayed in case cache generation is triggered.
1236 self._aux_get = self.dbapi.aux_get
1237 self.dbapi.aux_get = self._aux_get_wrapper
1238 self._match = self.dbapi.match
1239 self.dbapi.match = self._match_wrapper
1240 self._aux_get_history = set()
1241 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1242 self._portdb = portdb
1243 self._global_updates = None
1245 def _match_wrapper(self, cpv, use_cache=1):
1247 Make sure the metadata in Package instances gets updated for any
1248 cpv that is returned from a match() call, since the metadata can
1249 be accessed directly from the Package instance instead of via
1252 matches = self._match(cpv, use_cache=use_cache)
1254 if cpv in self._aux_get_history:
1256 self._aux_get_wrapper(cpv, [])
1259 def _aux_get_wrapper(self, pkg, wants):
1260 if pkg in self._aux_get_history:
1261 return self._aux_get(pkg, wants)
1262 self._aux_get_history.add(pkg)
1264 # Use the live ebuild metadata if possible.
1265 live_metadata = dict(izip(self._portdb_keys,
1266 self._portdb.aux_get(pkg, self._portdb_keys)))
1267 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1269 self.dbapi.aux_update(pkg, live_metadata)
1270 except (KeyError, portage.exception.PortageException):
1271 if self._global_updates is None:
1272 self._global_updates = \
1273 grab_global_updates(self._portdb.porttree_root)
1274 perform_global_updates(
1275 pkg, self.dbapi, self._global_updates)
1276 return self._aux_get(pkg, wants)
1278 def sync(self, acquire_lock=1):
1280 Call this method to synchronize state with the real vardb
1281 after one or more packages may have been installed or
1284 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1286 # At least the parent needs to exist for the lock file.
1287 portage.util.ensure_dirs(vdb_path)
1288 except portage.exception.PortageException:
1292 if acquire_lock and os.access(vdb_path, os.W_OK):
1293 vdb_lock = portage.locks.lockdir(vdb_path)
1297 portage.locks.unlockdir(vdb_lock)
1301 real_vardb = self._root_config.trees["vartree"].dbapi
1302 current_cpv_set = frozenset(real_vardb.cpv_all())
1303 pkg_vardb = self.dbapi
1304 aux_get_history = self._aux_get_history
1306 # Remove any packages that have been uninstalled.
1307 for pkg in list(pkg_vardb):
1308 if pkg.cpv not in current_cpv_set:
1309 pkg_vardb.cpv_remove(pkg)
1310 aux_get_history.discard(pkg.cpv)
1312 # Validate counters and timestamps.
1315 validation_keys = ["COUNTER", "_mtime_"]
1316 for cpv in current_cpv_set:
1318 pkg_hash_key = ("installed", root, cpv, "nomerge")
1319 pkg = pkg_vardb.get(pkg_hash_key)
1321 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1323 counter = long(counter)
1327 if counter != pkg.counter or \
1329 pkg_vardb.cpv_remove(pkg)
1330 aux_get_history.discard(pkg.cpv)
1334 pkg = self._pkg(cpv)
1336 other_counter = slot_counters.get(pkg.slot_atom)
1337 if other_counter is not None:
1338 if other_counter > pkg.counter:
1341 slot_counters[pkg.slot_atom] = pkg.counter
1342 pkg_vardb.cpv_inject(pkg)
1344 real_vardb.flush_cache()
1346 def _pkg(self, cpv):
1347 root_config = self._root_config
1348 real_vardb = root_config.trees["vartree"].dbapi
1349 pkg = Package(cpv=cpv, installed=True,
1350 metadata=izip(self._db_keys,
1351 real_vardb.aux_get(cpv, self._db_keys)),
1352 root_config=root_config,
1353 type_name="installed")
1356 mycounter = long(pkg.metadata["COUNTER"])
1359 pkg.metadata["COUNTER"] = str(mycounter)
1363 def grab_global_updates(portdir):
1364 from portage.update import grab_updates, parse_updates
1365 updpath = os.path.join(portdir, "profiles", "updates")
1367 rawupdates = grab_updates(updpath)
1368 except portage.exception.DirectoryNotFound:
1371 for mykey, mystat, mycontent in rawupdates:
1372 commands, errors = parse_updates(mycontent)
1373 upd_commands.extend(commands)
1376 def perform_global_updates(mycpv, mydb, mycommands):
1377 from portage.update import update_dbentries
1378 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1379 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1380 updates = update_dbentries(mycommands, aux_dict)
1382 mydb.aux_update(mycpv, updates)
1384 def visible(pkgsettings, pkg):
1386 Check if a package is visible. This can raise an InvalidDependString
1387 exception if LICENSE is invalid.
1388 TODO: optionally generate a list of masking reasons
1390 @returns: True if the package is visible, False otherwise.
1392 if not pkg.metadata["SLOT"]:
1394 if not pkg.installed:
1395 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1397 eapi = pkg.metadata["EAPI"]
1398 if not portage.eapi_is_supported(eapi):
1400 if not pkg.installed:
1401 if portage._eapi_is_deprecated(eapi):
1403 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1405 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1407 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1410 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1412 except portage.exception.InvalidDependString:
1416 def get_masking_status(pkg, pkgsettings, root_config):
1418 mreasons = portage.getmaskingstatus(
1419 pkg, settings=pkgsettings,
1420 portdb=root_config.trees["porttree"].dbapi)
1422 if not pkg.installed:
1423 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1424 mreasons.append("CHOST: %s" % \
1425 pkg.metadata["CHOST"])
1427 if not pkg.metadata["SLOT"]:
1428 mreasons.append("invalid: SLOT is undefined")
1432 def get_mask_info(root_config, cpv, pkgsettings,
1433 db, pkg_type, built, installed, db_keys):
1436 metadata = dict(izip(db_keys,
1437 db.aux_get(cpv, db_keys)))
1440 if metadata and not built:
1441 pkgsettings.setcpv(cpv, mydb=metadata)
1442 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1443 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1444 if metadata is None:
1445 mreasons = ["corruption"]
1447 eapi = metadata['EAPI']
1450 if not portage.eapi_is_supported(eapi):
1451 mreasons = ['EAPI %s' % eapi]
1453 pkg = Package(type_name=pkg_type, root_config=root_config,
1454 cpv=cpv, built=built, installed=installed, metadata=metadata)
1455 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1456 return metadata, mreasons
1458 def show_masked_packages(masked_packages):
1459 shown_licenses = set()
1460 shown_comments = set()
1461 # Maybe there is both an ebuild and a binary. Only
1462 # show one of them to avoid redundant appearance.
1464 have_eapi_mask = False
1465 for (root_config, pkgsettings, cpv,
1466 metadata, mreasons) in masked_packages:
1467 if cpv in shown_cpvs:
1470 comment, filename = None, None
1471 if "package.mask" in mreasons:
1472 comment, filename = \
1473 portage.getmaskingreason(
1474 cpv, metadata=metadata,
1475 settings=pkgsettings,
1476 portdb=root_config.trees["porttree"].dbapi,
1477 return_location=True)
1478 missing_licenses = []
1480 if not portage.eapi_is_supported(metadata["EAPI"]):
1481 have_eapi_mask = True
1483 missing_licenses = \
1484 pkgsettings._getMissingLicenses(
1486 except portage.exception.InvalidDependString:
1487 # This will have already been reported
1488 # above via mreasons.
1491 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1492 if comment and comment not in shown_comments:
1495 shown_comments.add(comment)
1496 portdb = root_config.trees["porttree"].dbapi
1497 for l in missing_licenses:
1498 l_path = portdb.findLicensePath(l)
1499 if l in shown_licenses:
1501 msg = ("A copy of the '%s' license" + \
1502 " is located at '%s'.") % (l, l_path)
1505 shown_licenses.add(l)
1506 return have_eapi_mask
1508 class Task(SlotObject):
1509 __slots__ = ("_hash_key", "_hash_value")
1511 def _get_hash_key(self):
1512 hash_key = getattr(self, "_hash_key", None)
1513 if hash_key is None:
1514 raise NotImplementedError(self)
1517 def __eq__(self, other):
1518 return self._get_hash_key() == other
1520 def __ne__(self, other):
1521 return self._get_hash_key() != other
1524 hash_value = getattr(self, "_hash_value", None)
1525 if hash_value is None:
1526 self._hash_value = hash(self._get_hash_key())
1527 return self._hash_value
1530 return len(self._get_hash_key())
1532 def __getitem__(self, key):
1533 return self._get_hash_key()[key]
1536 return iter(self._get_hash_key())
1538 def __contains__(self, key):
1539 return key in self._get_hash_key()
1542 return str(self._get_hash_key())
1544 class Blocker(Task):
1546 __hash__ = Task.__hash__
1547 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1549 def __init__(self, **kwargs):
1550 Task.__init__(self, **kwargs)
1551 self.cp = portage.dep_getkey(self.atom)
1553 def _get_hash_key(self):
1554 hash_key = getattr(self, "_hash_key", None)
1555 if hash_key is None:
1557 ("blocks", self.root, self.atom, self.eapi)
1558 return self._hash_key
1560 class Package(Task):
1562 __hash__ = Task.__hash__
1563 __slots__ = ("built", "cpv", "depth",
1564 "installed", "metadata", "onlydeps", "operation",
1565 "root_config", "type_name",
1566 "category", "counter", "cp", "cpv_split",
1567 "inherited", "iuse", "mtime",
1568 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1571 "CHOST", "COUNTER", "DEPEND", "EAPI",
1572 "INHERITED", "IUSE", "KEYWORDS",
1573 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1574 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1576 def __init__(self, **kwargs):
1577 Task.__init__(self, **kwargs)
1578 self.root = self.root_config.root
1579 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1580 self.cp = portage.cpv_getkey(self.cpv)
1583 # Avoid an InvalidAtom exception when creating slot_atom.
1584 # This package instance will be masked due to empty SLOT.
1586 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1587 self.category, self.pf = portage.catsplit(self.cpv)
1588 self.cpv_split = portage.catpkgsplit(self.cpv)
1589 self.pv_split = self.cpv_split[1:]
1593 __slots__ = ("__weakref__", "enabled")
1595 def __init__(self, use):
1596 self.enabled = frozenset(use)
1598 class _iuse(object):
1600 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1602 def __init__(self, tokens, iuse_implicit):
1603 self.tokens = tuple(tokens)
1604 self.iuse_implicit = iuse_implicit
1611 enabled.append(x[1:])
1613 disabled.append(x[1:])
1616 self.enabled = frozenset(enabled)
1617 self.disabled = frozenset(disabled)
1618 self.all = frozenset(chain(enabled, disabled, other))
1620 def __getattribute__(self, name):
1623 return object.__getattribute__(self, "regex")
1624 except AttributeError:
1625 all = object.__getattribute__(self, "all")
1626 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1627 # Escape anything except ".*" which is supposed
1628 # to pass through from _get_implicit_iuse()
1629 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1630 regex = "^(%s)$" % "|".join(regex)
1631 regex = regex.replace("\\.\\*", ".*")
1632 self.regex = re.compile(regex)
1633 return object.__getattribute__(self, name)
1635 def _get_hash_key(self):
1636 hash_key = getattr(self, "_hash_key", None)
1637 if hash_key is None:
1638 if self.operation is None:
1639 self.operation = "merge"
1640 if self.onlydeps or self.installed:
1641 self.operation = "nomerge"
1643 (self.type_name, self.root, self.cpv, self.operation)
1644 return self._hash_key
1646 def __lt__(self, other):
1647 if other.cp != self.cp:
1649 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1653 def __le__(self, other):
1654 if other.cp != self.cp:
1656 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1660 def __gt__(self, other):
1661 if other.cp != self.cp:
1663 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1667 def __ge__(self, other):
1668 if other.cp != self.cp:
1670 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1674 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1675 if not x.startswith("UNUSED_"))
1676 _all_metadata_keys.discard("CDEPEND")
1677 _all_metadata_keys.update(Package.metadata_keys)
1679 from portage.cache.mappings import slot_dict_class
1680 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1682 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1684 Detect metadata updates and synchronize Package attributes.
1687 __slots__ = ("_pkg",)
1688 _wrapped_keys = frozenset(
1689 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1691 def __init__(self, pkg, metadata):
1692 _PackageMetadataWrapperBase.__init__(self)
1694 self.update(metadata)
1696 def __setitem__(self, k, v):
1697 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1698 if k in self._wrapped_keys:
1699 getattr(self, "_set_" + k.lower())(k, v)
1701 def _set_inherited(self, k, v):
1702 if isinstance(v, basestring):
1703 v = frozenset(v.split())
1704 self._pkg.inherited = v
1706 def _set_iuse(self, k, v):
1707 self._pkg.iuse = self._pkg._iuse(
1708 v.split(), self._pkg.root_config.iuse_implicit)
1710 def _set_slot(self, k, v):
1713 def _set_use(self, k, v):
1714 self._pkg.use = self._pkg._use(v.split())
1716 def _set_counter(self, k, v):
1717 if isinstance(v, basestring):
1722 self._pkg.counter = v
1724 def _set__mtime_(self, k, v):
1725 if isinstance(v, basestring):
1732 class EbuildFetchonly(SlotObject):
1734 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1737 settings = self.settings
1739 portdb = pkg.root_config.trees["porttree"].dbapi
1740 ebuild_path = portdb.findname(pkg.cpv)
1741 settings.setcpv(pkg)
1742 debug = settings.get("PORTAGE_DEBUG") == "1"
1743 use_cache = 1 # always true
1744 portage.doebuild_environment(ebuild_path, "fetch",
1745 settings["ROOT"], settings, debug, use_cache, portdb)
1746 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1749 rval = self._execute_with_builddir()
1751 rval = portage.doebuild(ebuild_path, "fetch",
1752 settings["ROOT"], settings, debug=debug,
1753 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1754 mydbapi=portdb, tree="porttree")
1756 if rval != os.EX_OK:
1757 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1758 eerror(msg, phase="unpack", key=pkg.cpv)
1762 def _execute_with_builddir(self):
1763 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1764 # ensuring sane $PWD (bug #239560) and storing elog
1765 # messages. Use a private temp directory, in order
1766 # to avoid locking the main one.
1767 settings = self.settings
1768 global_tmpdir = settings["PORTAGE_TMPDIR"]
1769 from tempfile import mkdtemp
1771 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1773 if e.errno != portage.exception.PermissionDenied.errno:
1775 raise portage.exception.PermissionDenied(global_tmpdir)
1776 settings["PORTAGE_TMPDIR"] = private_tmpdir
1777 settings.backup_changes("PORTAGE_TMPDIR")
1779 retval = self._execute()
1781 settings["PORTAGE_TMPDIR"] = global_tmpdir
1782 settings.backup_changes("PORTAGE_TMPDIR")
1783 shutil.rmtree(private_tmpdir)
1787 settings = self.settings
1789 root_config = pkg.root_config
1790 portdb = root_config.trees["porttree"].dbapi
1791 ebuild_path = portdb.findname(pkg.cpv)
1792 debug = settings.get("PORTAGE_DEBUG") == "1"
1793 retval = portage.doebuild(ebuild_path, "fetch",
1794 self.settings["ROOT"], self.settings, debug=debug,
1795 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796 mydbapi=portdb, tree="porttree")
1798 if retval != os.EX_OK:
1799 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800 eerror(msg, phase="unpack", key=pkg.cpv)
1802 portage.elog.elog_process(self.pkg.cpv, self.settings)
1805 class PollConstants(object):
1808 Provides POLL* constants that are equivalent to those from the
1809 select module, for use by PollSelectAdapter.
1812 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1815 locals()[k] = getattr(select, k, v)
1819 class AsynchronousTask(SlotObject):
1821 Subclasses override _wait() and _poll() so that calls
1822 to public methods can be wrapped for implementing
1823 hooks such as exit listener notification.
1825 Sublasses should call self.wait() to notify exit listeners after
1826 the task is complete and self.returncode has been set.
1829 __slots__ = ("background", "cancelled", "returncode") + \
1830 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1834 Start an asynchronous task and then return as soon as possible.
1840 raise NotImplementedError(self)
1843 return self.returncode is None
1850 return self.returncode
1853 if self.returncode is None:
1856 return self.returncode
1859 return self.returncode
1862 self.cancelled = True
1865 def addStartListener(self, f):
1867 The function will be called with one argument, a reference to self.
1869 if self._start_listeners is None:
1870 self._start_listeners = []
1871 self._start_listeners.append(f)
1873 def removeStartListener(self, f):
1874 if self._start_listeners is None:
1876 self._start_listeners.remove(f)
1878 def _start_hook(self):
1879 if self._start_listeners is not None:
1880 start_listeners = self._start_listeners
1881 self._start_listeners = None
1883 for f in start_listeners:
1886 def addExitListener(self, f):
1888 The function will be called with one argument, a reference to self.
1890 if self._exit_listeners is None:
1891 self._exit_listeners = []
1892 self._exit_listeners.append(f)
1894 def removeExitListener(self, f):
1895 if self._exit_listeners is None:
1896 if self._exit_listener_stack is not None:
1897 self._exit_listener_stack.remove(f)
1899 self._exit_listeners.remove(f)
1901 def _wait_hook(self):
1903 Call this method after the task completes, just before returning
1904 the returncode from wait() or poll(). This hook is
1905 used to trigger exit listeners when the returncode first
1908 if self.returncode is not None and \
1909 self._exit_listeners is not None:
1911 # This prevents recursion, in case one of the
1912 # exit handlers triggers this method again by
1913 # calling wait(). Use a stack that gives
1914 # removeExitListener() an opportunity to consume
1915 # listeners from the stack, before they can get
1916 # called below. This is necessary because a call
1917 # to one exit listener may result in a call to
1918 # removeExitListener() for another listener on
1919 # the stack. That listener needs to be removed
1920 # from the stack since it would be inconsistent
1921 # to call it after it has been been passed into
1922 # removeExitListener().
1923 self._exit_listener_stack = self._exit_listeners
1924 self._exit_listeners = None
1926 self._exit_listener_stack.reverse()
1927 while self._exit_listener_stack:
1928 self._exit_listener_stack.pop()(self)
1930 class AbstractPollTask(AsynchronousTask):
1932 __slots__ = ("scheduler",) + \
1936 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1940 def _unregister(self):
1941 raise NotImplementedError(self)
1943 def _unregister_if_appropriate(self, event):
1944 if self._registered:
1945 if event & self._exceptional_events:
1948 elif event & PollConstants.POLLHUP:
1952 class PipeReader(AbstractPollTask):
1955 Reads output from one or more files and saves it in memory,
1956 for retrieval via the getvalue() method. This is driven by
1957 the scheduler's poll() loop, so it runs entirely within the
1961 __slots__ = ("input_files",) + \
1962 ("_read_data", "_reg_ids")
1965 self._reg_ids = set()
1966 self._read_data = []
1967 for k, f in self.input_files.iteritems():
1968 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970 self._reg_ids.add(self.scheduler.register(f.fileno(),
1971 self._registered_events, self._output_handler))
1972 self._registered = True
1975 return self._registered
1978 if self.returncode is None:
1980 self.cancelled = True
1984 if self.returncode is not None:
1985 return self.returncode
1987 if self._registered:
1988 self.scheduler.schedule(self._reg_ids)
1991 self.returncode = os.EX_OK
1992 return self.returncode
1995 """Retrieve the entire contents"""
1996 if sys.hexversion >= 0x3000000:
1997 return bytes().join(self._read_data)
1998 return "".join(self._read_data)
2001 """Free the memory buffer."""
2002 self._read_data = None
2004 def _output_handler(self, fd, event):
2006 if event & PollConstants.POLLIN:
2008 for f in self.input_files.itervalues():
2009 if fd == f.fileno():
2012 buf = array.array('B')
2014 buf.fromfile(f, self._bufsize)
2019 self._read_data.append(buf.tostring())
2024 self._unregister_if_appropriate(event)
2025 return self._registered
2027 def _unregister(self):
2029 Unregister from the scheduler and close open files.
2032 self._registered = False
2034 if self._reg_ids is not None:
2035 for reg_id in self._reg_ids:
2036 self.scheduler.unregister(reg_id)
2037 self._reg_ids = None
2039 if self.input_files is not None:
2040 for f in self.input_files.itervalues():
2042 self.input_files = None
2044 class CompositeTask(AsynchronousTask):
2046 __slots__ = ("scheduler",) + ("_current_task",)
2049 return self._current_task is not None
2052 self.cancelled = True
2053 if self._current_task is not None:
2054 self._current_task.cancel()
2058 This does a loop calling self._current_task.poll()
2059 repeatedly as long as the value of self._current_task
2060 keeps changing. It calls poll() a maximum of one time
2061 for a given self._current_task instance. This is useful
2062 since calling poll() on a task can trigger advance to
2063 the next task could eventually lead to the returncode
2064 being set in cases when polling only a single task would
2065 not have the same effect.
2070 task = self._current_task
2071 if task is None or task is prev:
2072 # don't poll the same task more than once
2077 return self.returncode
2083 task = self._current_task
2085 # don't wait for the same task more than once
2088 # Before the task.wait() method returned, an exit
2089 # listener should have set self._current_task to either
2090 # a different task or None. Something is wrong.
2091 raise AssertionError("self._current_task has not " + \
2092 "changed since calling wait", self, task)
2096 return self.returncode
2098 def _assert_current(self, task):
2100 Raises an AssertionError if the given task is not the
2101 same one as self._current_task. This can be useful
2104 if task is not self._current_task:
2105 raise AssertionError("Unrecognized task: %s" % (task,))
2107 def _default_exit(self, task):
2109 Calls _assert_current() on the given task and then sets the
2110 composite returncode attribute if task.returncode != os.EX_OK.
2111 If the task failed then self._current_task will be set to None.
2112 Subclasses can use this as a generic task exit callback.
2115 @returns: The task.returncode attribute.
2117 self._assert_current(task)
2118 if task.returncode != os.EX_OK:
2119 self.returncode = task.returncode
2120 self._current_task = None
2121 return task.returncode
2123 def _final_exit(self, task):
2125 Assumes that task is the final task of this composite task.
2126 Calls _default_exit() and sets self.returncode to the task's
2127 returncode and sets self._current_task to None.
2129 self._default_exit(task)
2130 self._current_task = None
2131 self.returncode = task.returncode
2132 return self.returncode
2134 def _default_final_exit(self, task):
2136 This calls _final_exit() and then wait().
2138 Subclasses can use this as a generic final task exit callback.
2141 self._final_exit(task)
2144 def _start_task(self, task, exit_handler):
2146 Register exit handler for the given task, set it
2147 as self._current_task, and call task.start().
2149 Subclasses can use this as a generic way to start
2153 task.addExitListener(exit_handler)
2154 self._current_task = task
2157 class TaskSequence(CompositeTask):
2159 A collection of tasks that executes sequentially. Each task
2160 must have a addExitListener() method that can be used as
2161 a means to trigger movement from one task to the next.
2164 __slots__ = ("_task_queue",)
2166 def __init__(self, **kwargs):
2167 AsynchronousTask.__init__(self, **kwargs)
2168 self._task_queue = deque()
2170 def add(self, task):
2171 self._task_queue.append(task)
2174 self._start_next_task()
2177 self._task_queue.clear()
2178 CompositeTask.cancel(self)
2180 def _start_next_task(self):
2181 self._start_task(self._task_queue.popleft(),
2182 self._task_exit_handler)
2184 def _task_exit_handler(self, task):
2185 if self._default_exit(task) != os.EX_OK:
2187 elif self._task_queue:
2188 self._start_next_task()
2190 self._final_exit(task)
2193 class SubProcess(AbstractPollTask):
2195 __slots__ = ("pid",) + \
2196 ("_files", "_reg_id")
2198 # A file descriptor is required for the scheduler to monitor changes from
2199 # inside a poll() loop. When logging is not enabled, create a pipe just to
2200 # serve this purpose alone.
2204 if self.returncode is not None:
2205 return self.returncode
2206 if self.pid is None:
2207 return self.returncode
2208 if self._registered:
2209 return self.returncode
2212 retval = os.waitpid(self.pid, os.WNOHANG)
2214 if e.errno != errno.ECHILD:
2217 retval = (self.pid, 1)
2219 if retval == (0, 0):
2221 self._set_returncode(retval)
2222 return self.returncode
2227 os.kill(self.pid, signal.SIGTERM)
2229 if e.errno != errno.ESRCH:
2233 self.cancelled = True
2234 if self.pid is not None:
2236 return self.returncode
2239 return self.pid is not None and \
2240 self.returncode is None
2244 if self.returncode is not None:
2245 return self.returncode
2247 if self._registered:
2248 self.scheduler.schedule(self._reg_id)
2250 if self.returncode is not None:
2251 return self.returncode
2254 wait_retval = os.waitpid(self.pid, 0)
2256 if e.errno != errno.ECHILD:
2259 self._set_returncode((self.pid, 1))
2261 self._set_returncode(wait_retval)
2263 return self.returncode
2265 def _unregister(self):
2267 Unregister from the scheduler and close open files.
2270 self._registered = False
2272 if self._reg_id is not None:
2273 self.scheduler.unregister(self._reg_id)
2276 if self._files is not None:
2277 for f in self._files.itervalues():
2281 def _set_returncode(self, wait_retval):
2283 retval = wait_retval[1]
2285 if retval != os.EX_OK:
2287 retval = (retval & 0xff) << 8
2289 retval = retval >> 8
2291 self.returncode = retval
2293 class SpawnProcess(SubProcess):
2296 Constructor keyword args are passed into portage.process.spawn().
2297 The required "args" keyword argument will be passed as the first
2301 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302 "uid", "gid", "groups", "umask", "logfile",
2303 "path_lookup", "pre_exec")
2305 __slots__ = ("args",) + \
2308 _file_names = ("log", "process", "stdout")
2309 _files_dict = slot_dict_class(_file_names, prefix="")
2316 if self.fd_pipes is None:
2318 fd_pipes = self.fd_pipes
2319 fd_pipes.setdefault(0, sys.stdin.fileno())
2320 fd_pipes.setdefault(1, sys.stdout.fileno())
2321 fd_pipes.setdefault(2, sys.stderr.fileno())
2323 # flush any pending output
2324 for fd in fd_pipes.itervalues():
2325 if fd == sys.stdout.fileno():
2327 if fd == sys.stderr.fileno():
2330 logfile = self.logfile
2331 self._files = self._files_dict()
2334 master_fd, slave_fd = self._pipe(fd_pipes)
2335 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2339 fd_pipes_orig = fd_pipes.copy()
2341 # TODO: Use job control functions like tcsetpgrp() to control
2342 # access to stdin. Until then, use /dev/null so that any
2343 # attempts to read from stdin will immediately return EOF
2344 # instead of blocking indefinitely.
2345 null_input = open('/dev/null', 'rb')
2346 fd_pipes[0] = null_input.fileno()
2348 fd_pipes[0] = fd_pipes_orig[0]
2350 files.process = os.fdopen(master_fd, 'rb')
2351 if logfile is not None:
2353 fd_pipes[1] = slave_fd
2354 fd_pipes[2] = slave_fd
2356 files.log = open(logfile, mode='ab')
2357 portage.util.apply_secpass_permissions(logfile,
2358 uid=portage.portage_uid, gid=portage.portage_gid,
2361 if not self.background:
2362 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2364 output_handler = self._output_handler
2368 # Create a dummy pipe so the scheduler can monitor
2369 # the process from inside a poll() loop.
2370 fd_pipes[self._dummy_pipe_fd] = slave_fd
2372 fd_pipes[1] = slave_fd
2373 fd_pipes[2] = slave_fd
2374 output_handler = self._dummy_handler
2377 for k in self._spawn_kwarg_names:
2378 v = getattr(self, k)
2382 kwargs["fd_pipes"] = fd_pipes
2383 kwargs["returnpid"] = True
2384 kwargs.pop("logfile", None)
2386 self._reg_id = self.scheduler.register(files.process.fileno(),
2387 self._registered_events, output_handler)
2388 self._registered = True
2390 retval = self._spawn(self.args, **kwargs)
2393 if null_input is not None:
2396 if isinstance(retval, int):
2399 self.returncode = retval
2403 self.pid = retval[0]
2404 portage.process.spawned_pids.remove(self.pid)
2406 def _pipe(self, fd_pipes):
2408 @type fd_pipes: dict
2409 @param fd_pipes: pipes from which to copy terminal size if desired.
2413 def _spawn(self, args, **kwargs):
2414 return portage.process.spawn(args, **kwargs)
2416 def _output_handler(self, fd, event):
2418 if event & PollConstants.POLLIN:
2421 buf = array.array('B')
2423 buf.fromfile(files.process, self._bufsize)
2428 if not self.background:
2429 write_successful = False
2433 if not write_successful:
2434 buf.tofile(files.stdout)
2435 write_successful = True
2436 files.stdout.flush()
2439 if e.errno != errno.EAGAIN:
2444 # Avoid a potentially infinite loop. In
2445 # most cases, the failure count is zero
2446 # and it's unlikely to exceed 1.
2449 # This means that a subprocess has put an inherited
2450 # stdio file descriptor (typically stdin) into
2451 # O_NONBLOCK mode. This is not acceptable (see bug
2452 # #264435), so revert it. We need to use a loop
2453 # here since there's a race condition due to
2454 # parallel processes being able to change the
2455 # flags on the inherited file descriptor.
2456 # TODO: When possible, avoid having child processes
2457 # inherit stdio file descriptors from portage
2458 # (maybe it can't be avoided with
2459 # PROPERTIES=interactive).
2460 fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2461 fcntl.fcntl(files.stdout.fileno(),
2462 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2464 buf.tofile(files.log)
2470 self._unregister_if_appropriate(event)
2471 return self._registered
2473 def _dummy_handler(self, fd, event):
2475 This method is mainly interested in detecting EOF, since
2476 the only purpose of the pipe is to allow the scheduler to
2477 monitor the process from inside a poll() loop.
2480 if event & PollConstants.POLLIN:
2482 buf = array.array('B')
2484 buf.fromfile(self._files.process, self._bufsize)
2494 self._unregister_if_appropriate(event)
2495 return self._registered
2497 class MiscFunctionsProcess(SpawnProcess):
2499 Spawns misc-functions.sh with an existing ebuild environment.
2502 __slots__ = ("commands", "phase", "pkg", "settings")
2505 settings = self.settings
2506 settings.pop("EBUILD_PHASE", None)
2507 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2508 misc_sh_binary = os.path.join(portage_bin_path,
2509 os.path.basename(portage.const.MISC_SH_BINARY))
2511 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2512 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514 portage._doebuild_exit_status_unlink(
2515 settings.get("EBUILD_EXIT_STATUS_FILE"))
2517 SpawnProcess._start(self)
2519 def _spawn(self, args, **kwargs):
2520 settings = self.settings
2521 debug = settings.get("PORTAGE_DEBUG") == "1"
2522 return portage.spawn(" ".join(args), settings,
2523 debug=debug, **kwargs)
2525 def _set_returncode(self, wait_retval):
2526 SpawnProcess._set_returncode(self, wait_retval)
2527 self.returncode = portage._doebuild_exit_status_check_and_log(
2528 self.settings, self.phase, self.returncode)
2530 class EbuildFetcher(SpawnProcess):
2532 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2537 root_config = self.pkg.root_config
2538 portdb = root_config.trees["porttree"].dbapi
2539 ebuild_path = portdb.findname(self.pkg.cpv)
2540 settings = self.config_pool.allocate()
2541 settings.setcpv(self.pkg)
2543 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2544 # should not be touched since otherwise it could interfere with
2545 # another instance of the same cpv concurrently being built for a
2546 # different $ROOT (currently, builds only cooperate with prefetchers
2547 # that are spawned for the same $ROOT).
2548 if not self.prefetch:
2549 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2550 self._build_dir.lock()
2551 self._build_dir.clean_log()
2552 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2553 if self.logfile is None:
2554 self.logfile = settings.get("PORTAGE_LOG_FILE")
2560 # If any incremental variables have been overridden
2561 # via the environment, those values need to be passed
2562 # along here so that they are correctly considered by
2563 # the config instance in the subproccess.
2564 fetch_env = os.environ.copy()
2566 nocolor = settings.get("NOCOLOR")
2567 if nocolor is not None:
2568 fetch_env["NOCOLOR"] = nocolor
2570 fetch_env["PORTAGE_NICENESS"] = "0"
2572 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2574 ebuild_binary = os.path.join(
2575 settings["PORTAGE_BIN_PATH"], "ebuild")
2577 fetch_args = [ebuild_binary, ebuild_path, phase]
2578 debug = settings.get("PORTAGE_DEBUG") == "1"
2580 fetch_args.append("--debug")
2582 self.args = fetch_args
2583 self.env = fetch_env
2584 SpawnProcess._start(self)
2586 def _pipe(self, fd_pipes):
2587 """When appropriate, use a pty so that fetcher progress bars,
2588 like wget has, will work properly."""
2589 if self.background or not sys.stdout.isatty():
2590 # When the output only goes to a log file,
2591 # there's no point in creating a pty.
2593 stdout_pipe = fd_pipes.get(1)
2594 got_pty, master_fd, slave_fd = \
2595 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2596 return (master_fd, slave_fd)
2598 def _set_returncode(self, wait_retval):
2599 SpawnProcess._set_returncode(self, wait_retval)
2600 # Collect elog messages that might have been
2601 # created by the pkg_nofetch phase.
2602 if self._build_dir is not None:
2603 # Skip elog messages for prefetch, in order to avoid duplicates.
2604 if not self.prefetch and self.returncode != os.EX_OK:
2606 if self.logfile is not None:
2608 elog_out = open(self.logfile, 'a')
2609 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2610 if self.logfile is not None:
2611 msg += ", Log file:"
2612 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2613 if self.logfile is not None:
2614 eerror(" '%s'" % (self.logfile,),
2615 phase="unpack", key=self.pkg.cpv, out=elog_out)
2616 if elog_out is not None:
2618 if not self.prefetch:
2619 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2620 features = self._build_dir.settings.features
2621 if self.returncode == os.EX_OK:
2622 self._build_dir.clean_log()
2623 self._build_dir.unlock()
2624 self.config_pool.deallocate(self._build_dir.settings)
2625 self._build_dir = None
2627 class EbuildBuildDir(SlotObject):
2629 __slots__ = ("dir_path", "pkg", "settings",
2630 "locked", "_catdir", "_lock_obj")
2632 def __init__(self, **kwargs):
2633 SlotObject.__init__(self, **kwargs)
2638 This raises an AlreadyLocked exception if lock() is called
2639 while a lock is already held. In order to avoid this, call
2640 unlock() or check whether the "locked" attribute is True
2641 or False before calling lock().
2643 if self._lock_obj is not None:
2644 raise self.AlreadyLocked((self._lock_obj,))
2646 dir_path = self.dir_path
2647 if dir_path is None:
2648 root_config = self.pkg.root_config
2649 portdb = root_config.trees["porttree"].dbapi
2650 ebuild_path = portdb.findname(self.pkg.cpv)
2651 settings = self.settings
2652 settings.setcpv(self.pkg)
2653 debug = settings.get("PORTAGE_DEBUG") == "1"
2654 use_cache = 1 # always true
2655 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2656 self.settings, debug, use_cache, portdb)
2657 dir_path = self.settings["PORTAGE_BUILDDIR"]
2659 catdir = os.path.dirname(dir_path)
2660 self._catdir = catdir
2662 portage.util.ensure_dirs(os.path.dirname(catdir),
2663 gid=portage.portage_gid,
2667 catdir_lock = portage.locks.lockdir(catdir)
2668 portage.util.ensure_dirs(catdir,
2669 gid=portage.portage_gid,
2671 self._lock_obj = portage.locks.lockdir(dir_path)
2673 self.locked = self._lock_obj is not None
2674 if catdir_lock is not None:
2675 portage.locks.unlockdir(catdir_lock)
2677 def clean_log(self):
2678 """Discard existing log."""
2679 settings = self.settings
2681 for x in ('.logid', 'temp/build.log'):
2683 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2688 if self._lock_obj is None:
2691 portage.locks.unlockdir(self._lock_obj)
2692 self._lock_obj = None
2695 catdir = self._catdir
2698 catdir_lock = portage.locks.lockdir(catdir)
2704 if e.errno not in (errno.ENOENT,
2705 errno.ENOTEMPTY, errno.EEXIST):
2708 portage.locks.unlockdir(catdir_lock)
2710 class AlreadyLocked(portage.exception.PortageException):
2713 class EbuildBuild(CompositeTask):
2715 __slots__ = ("args_set", "config_pool", "find_blockers",
2716 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2717 "prefetcher", "settings", "world_atom") + \
2718 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2722 logger = self.logger
2725 settings = self.settings
2726 world_atom = self.world_atom
2727 root_config = pkg.root_config
2730 portdb = root_config.trees[tree].dbapi
2731 settings.setcpv(pkg)
2732 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2733 ebuild_path = portdb.findname(self.pkg.cpv)
2734 self._ebuild_path = ebuild_path
2736 prefetcher = self.prefetcher
2737 if prefetcher is None:
2739 elif not prefetcher.isAlive():
2741 elif prefetcher.poll() is None:
2743 waiting_msg = "Fetching files " + \
2744 "in the background. " + \
2745 "To view fetch progress, run `tail -f " + \
2746 "/var/log/emerge-fetch.log` in another " + \
2748 msg_prefix = colorize("GOOD", " * ")
2749 from textwrap import wrap
2750 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2751 for line in wrap(waiting_msg, 65))
2752 if not self.background:
2753 writemsg(waiting_msg, noiselevel=-1)
2755 self._current_task = prefetcher
2756 prefetcher.addExitListener(self._prefetch_exit)
2759 self._prefetch_exit(prefetcher)
2761 def _prefetch_exit(self, prefetcher):
2765 settings = self.settings
2768 fetcher = EbuildFetchonly(
2769 fetch_all=opts.fetch_all_uri,
2770 pkg=pkg, pretend=opts.pretend,
2772 retval = fetcher.execute()
2773 self.returncode = retval
2777 fetcher = EbuildFetcher(config_pool=self.config_pool,
2778 fetchall=opts.fetch_all_uri,
2779 fetchonly=opts.fetchonly,
2780 background=self.background,
2781 pkg=pkg, scheduler=self.scheduler)
2783 self._start_task(fetcher, self._fetch_exit)
2785 def _fetch_exit(self, fetcher):
2789 fetch_failed = False
2791 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2793 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2795 if fetch_failed and fetcher.logfile is not None and \
2796 os.path.exists(fetcher.logfile):
2797 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2799 if not fetch_failed and fetcher.logfile is not None:
2800 # Fetch was successful, so remove the fetch log.
2802 os.unlink(fetcher.logfile)
2806 if fetch_failed or opts.fetchonly:
2810 logger = self.logger
2812 pkg_count = self.pkg_count
2813 scheduler = self.scheduler
2814 settings = self.settings
2815 features = settings.features
2816 ebuild_path = self._ebuild_path
2817 system_set = pkg.root_config.sets["system"]
2819 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2820 self._build_dir.lock()
2822 # Cleaning is triggered before the setup
2823 # phase, in portage.doebuild().
2824 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2825 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2826 short_msg = "emerge: (%s of %s) %s Clean" % \
2827 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2828 logger.log(msg, short_msg=short_msg)
2830 #buildsyspkg: Check if we need to _force_ binary package creation
2831 self._issyspkg = "buildsyspkg" in features and \
2832 system_set.findAtomForPackage(pkg) and \
2835 if opts.buildpkg or self._issyspkg:
2837 self._buildpkg = True
2839 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2840 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2841 short_msg = "emerge: (%s of %s) %s Compile" % \
2842 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2843 logger.log(msg, short_msg=short_msg)
2846 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2847 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2848 short_msg = "emerge: (%s of %s) %s Compile" % \
2849 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2850 logger.log(msg, short_msg=short_msg)
2852 build = EbuildExecuter(background=self.background, pkg=pkg,
2853 scheduler=scheduler, settings=settings)
2854 self._start_task(build, self._build_exit)
2856 def _unlock_builddir(self):
2857 portage.elog.elog_process(self.pkg.cpv, self.settings)
2858 self._build_dir.unlock()
2860 def _build_exit(self, build):
2861 if self._default_exit(build) != os.EX_OK:
2862 self._unlock_builddir()
2867 buildpkg = self._buildpkg
2870 self._final_exit(build)
2875 msg = ">>> This is a system package, " + \
2876 "let's pack a rescue tarball.\n"
2878 log_path = self.settings.get("PORTAGE_LOG_FILE")
2879 if log_path is not None:
2880 log_file = open(log_path, 'a')
2886 if not self.background:
2887 portage.writemsg_stdout(msg, noiselevel=-1)
2889 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2890 scheduler=self.scheduler, settings=self.settings)
2892 self._start_task(packager, self._buildpkg_exit)
2894 def _buildpkg_exit(self, packager):
2896 Released build dir lock when there is a failure or
2897 when in buildpkgonly mode. Otherwise, the lock will
2898 be released when merge() is called.
2901 if self._default_exit(packager) != os.EX_OK:
2902 self._unlock_builddir()
2906 if self.opts.buildpkgonly:
2907 # Need to call "clean" phase for buildpkgonly mode
2908 portage.elog.elog_process(self.pkg.cpv, self.settings)
2910 clean_phase = EbuildPhase(background=self.background,
2911 pkg=self.pkg, phase=phase,
2912 scheduler=self.scheduler, settings=self.settings,
2914 self._start_task(clean_phase, self._clean_exit)
2917 # Continue holding the builddir lock until
2918 # after the package has been installed.
2919 self._current_task = None
2920 self.returncode = packager.returncode
2923 def _clean_exit(self, clean_phase):
2924 if self._final_exit(clean_phase) != os.EX_OK or \
2925 self.opts.buildpkgonly:
2926 self._unlock_builddir()
2931 Install the package and then clean up and release locks.
2932 Only call this after the build has completed successfully
2933 and neither fetchonly nor buildpkgonly mode are enabled.
2936 find_blockers = self.find_blockers
2937 ldpath_mtimes = self.ldpath_mtimes
2938 logger = self.logger
2940 pkg_count = self.pkg_count
2941 settings = self.settings
2942 world_atom = self.world_atom
2943 ebuild_path = self._ebuild_path
2946 merge = EbuildMerge(find_blockers=self.find_blockers,
2947 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2948 pkg_count=pkg_count, pkg_path=ebuild_path,
2949 scheduler=self.scheduler,
2950 settings=settings, tree=tree, world_atom=world_atom)
2952 msg = " === (%s of %s) Merging (%s::%s)" % \
2953 (pkg_count.curval, pkg_count.maxval,
2954 pkg.cpv, ebuild_path)
2955 short_msg = "emerge: (%s of %s) %s Merge" % \
2956 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2957 logger.log(msg, short_msg=short_msg)
2960 rval = merge.execute()
2962 self._unlock_builddir()
2966 class EbuildExecuter(CompositeTask):
2968 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2970 _phases = ("prepare", "configure", "compile", "test", "install")
2972 _live_eclasses = frozenset([
2982 self._tree = "porttree"
2985 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2986 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2987 self._start_task(clean_phase, self._clean_phase_exit)
2989 def _clean_phase_exit(self, clean_phase):
2991 if self._default_exit(clean_phase) != os.EX_OK:
2996 scheduler = self.scheduler
2997 settings = self.settings
3000 # This initializes PORTAGE_LOG_FILE.
3001 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3003 setup_phase = EbuildPhase(background=self.background,
3004 pkg=pkg, phase="setup", scheduler=scheduler,
3005 settings=settings, tree=self._tree)
3007 setup_phase.addExitListener(self._setup_exit)
3008 self._current_task = setup_phase
3009 self.scheduler.scheduleSetup(setup_phase)
3011 def _setup_exit(self, setup_phase):
3013 if self._default_exit(setup_phase) != os.EX_OK:
3017 unpack_phase = EbuildPhase(background=self.background,
3018 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3019 settings=self.settings, tree=self._tree)
3021 if self._live_eclasses.intersection(self.pkg.inherited):
3022 # Serialize $DISTDIR access for live ebuilds since
3023 # otherwise they can interfere with eachother.
3025 unpack_phase.addExitListener(self._unpack_exit)
3026 self._current_task = unpack_phase
3027 self.scheduler.scheduleUnpack(unpack_phase)
3030 self._start_task(unpack_phase, self._unpack_exit)
3032 def _unpack_exit(self, unpack_phase):
3034 if self._default_exit(unpack_phase) != os.EX_OK:
3038 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3041 phases = self._phases
3042 eapi = pkg.metadata["EAPI"]
3043 if eapi in ("0", "1"):
3044 # skip src_prepare and src_configure
3047 for phase in phases:
3048 ebuild_phases.add(EbuildPhase(background=self.background,
3049 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3050 settings=self.settings, tree=self._tree))
3052 self._start_task(ebuild_phases, self._default_final_exit)
3054 class EbuildMetadataPhase(SubProcess):
3057 Asynchronous interface for the ebuild "depend" phase which is
3058 used to extract metadata from the ebuild.
3061 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3062 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3065 _file_names = ("ebuild",)
3066 _files_dict = slot_dict_class(_file_names, prefix="")
3070 settings = self.settings
3071 settings.setcpv(self.cpv)
3072 ebuild_path = self.ebuild_path
3075 if 'parse-eapi-glep-55' in settings.features:
3076 pf, eapi = portage._split_ebuild_name_glep55(
3077 os.path.basename(ebuild_path))
3078 if eapi is None and \
3079 'parse-eapi-ebuild-head' in settings.features:
3080 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3081 mode='r', encoding='utf_8', errors='replace'))
3083 if eapi is not None:
3084 if not portage.eapi_is_supported(eapi):
3085 self.metadata_callback(self.cpv, self.ebuild_path,
3086 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3087 self.returncode = os.EX_OK
3091 settings.configdict['pkg']['EAPI'] = eapi
3093 debug = settings.get("PORTAGE_DEBUG") == "1"
3097 if self.fd_pipes is not None:
3098 fd_pipes = self.fd_pipes.copy()
3102 fd_pipes.setdefault(0, sys.stdin.fileno())
3103 fd_pipes.setdefault(1, sys.stdout.fileno())
3104 fd_pipes.setdefault(2, sys.stderr.fileno())
3106 # flush any pending output
3107 for fd in fd_pipes.itervalues():
3108 if fd == sys.stdout.fileno():
3110 if fd == sys.stderr.fileno():
3113 fd_pipes_orig = fd_pipes.copy()
3114 self._files = self._files_dict()
3117 master_fd, slave_fd = os.pipe()
3118 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3119 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3121 fd_pipes[self._metadata_fd] = slave_fd
3123 self._raw_metadata = []
3124 files.ebuild = os.fdopen(master_fd, 'r')
3125 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3126 self._registered_events, self._output_handler)
3127 self._registered = True
3129 retval = portage.doebuild(ebuild_path, "depend",
3130 settings["ROOT"], settings, debug,
3131 mydbapi=self.portdb, tree="porttree",
3132 fd_pipes=fd_pipes, returnpid=True)
3136 if isinstance(retval, int):
3137 # doebuild failed before spawning
3139 self.returncode = retval
3143 self.pid = retval[0]
3144 portage.process.spawned_pids.remove(self.pid)
3146 def _output_handler(self, fd, event):
3148 if event & PollConstants.POLLIN:
3149 self._raw_metadata.append(self._files.ebuild.read())
3150 if not self._raw_metadata[-1]:
3154 self._unregister_if_appropriate(event)
3155 return self._registered
3157 def _set_returncode(self, wait_retval):
3158 SubProcess._set_returncode(self, wait_retval)
3159 if self.returncode == os.EX_OK:
3160 metadata_lines = "".join(self._raw_metadata).splitlines()
3161 if len(portage.auxdbkeys) != len(metadata_lines):
3162 # Don't trust bash's returncode if the
3163 # number of lines is incorrect.
3166 metadata = izip(portage.auxdbkeys, metadata_lines)
3167 self.metadata = self.metadata_callback(self.cpv,
3168 self.ebuild_path, self.repo_path, metadata,
3171 class EbuildProcess(SpawnProcess):
3173 __slots__ = ("phase", "pkg", "settings", "tree")
3176 # Don't open the log file during the clean phase since the
3177 # open file can result in an nfs lock on $T/build.log which
3178 # prevents the clean phase from removing $T.
3179 if self.phase not in ("clean", "cleanrm"):
3180 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3181 SpawnProcess._start(self)
3183 def _pipe(self, fd_pipes):
3184 stdout_pipe = fd_pipes.get(1)
3185 got_pty, master_fd, slave_fd = \
3186 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3187 return (master_fd, slave_fd)
3189 def _spawn(self, args, **kwargs):
3191 root_config = self.pkg.root_config
3193 mydbapi = root_config.trees[tree].dbapi
3194 settings = self.settings
3195 ebuild_path = settings["EBUILD"]
3196 debug = settings.get("PORTAGE_DEBUG") == "1"
3198 rval = portage.doebuild(ebuild_path, self.phase,
3199 root_config.root, settings, debug,
3200 mydbapi=mydbapi, tree=tree, **kwargs)
3204 def _set_returncode(self, wait_retval):
3205 SpawnProcess._set_returncode(self, wait_retval)
3207 if self.phase not in ("clean", "cleanrm"):
3208 self.returncode = portage._doebuild_exit_status_check_and_log(
3209 self.settings, self.phase, self.returncode)
3211 if self.phase == "test" and self.returncode != os.EX_OK and \
3212 "test-fail-continue" in self.settings.features:
3213 self.returncode = os.EX_OK
3215 portage._post_phase_userpriv_perms(self.settings)
3217 class EbuildPhase(CompositeTask):
3219 __slots__ = ("background", "pkg", "phase",
3220 "scheduler", "settings", "tree")
3222 _post_phase_cmds = portage._post_phase_cmds
3226 ebuild_process = EbuildProcess(background=self.background,
3227 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3228 settings=self.settings, tree=self.tree)
3230 self._start_task(ebuild_process, self._ebuild_exit)
3232 def _ebuild_exit(self, ebuild_process):
3234 if self.phase == "install":
3236 log_path = self.settings.get("PORTAGE_LOG_FILE")
3238 if self.background and log_path is not None:
3239 log_file = open(log_path, 'a')
3242 portage._check_build_log(self.settings, out=out)
3244 if log_file is not None:
3247 if self._default_exit(ebuild_process) != os.EX_OK:
3251 settings = self.settings
3253 if self.phase == "install":
3254 portage._post_src_install_chost_fix(settings)
3255 portage._post_src_install_uid_fix(settings)
3257 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3258 if post_phase_cmds is not None:
3259 post_phase = MiscFunctionsProcess(background=self.background,
3260 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3261 scheduler=self.scheduler, settings=settings)
3262 self._start_task(post_phase, self._post_phase_exit)
3265 self.returncode = ebuild_process.returncode
3266 self._current_task = None
3269 def _post_phase_exit(self, post_phase):
3270 if self._final_exit(post_phase) != os.EX_OK:
3271 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3273 self._current_task = None
3277 class EbuildBinpkg(EbuildProcess):
3279 This assumes that src_install() has successfully completed.
3281 __slots__ = ("_binpkg_tmpfile",)
3284 self.phase = "package"
3285 self.tree = "porttree"
3287 root_config = pkg.root_config
3288 portdb = root_config.trees["porttree"].dbapi
3289 bintree = root_config.trees["bintree"]
3290 ebuild_path = portdb.findname(self.pkg.cpv)
3291 settings = self.settings
3292 debug = settings.get("PORTAGE_DEBUG") == "1"
3294 bintree.prevent_collision(pkg.cpv)
3295 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3296 pkg.cpv + ".tbz2." + str(os.getpid()))
3297 self._binpkg_tmpfile = binpkg_tmpfile
3298 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3299 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3302 EbuildProcess._start(self)
3304 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3306 def _set_returncode(self, wait_retval):
3307 EbuildProcess._set_returncode(self, wait_retval)
3310 bintree = pkg.root_config.trees["bintree"]
3311 binpkg_tmpfile = self._binpkg_tmpfile
3312 if self.returncode == os.EX_OK:
3313 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3315 class EbuildMerge(SlotObject):
3317 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3318 "pkg", "pkg_count", "pkg_path", "pretend",
3319 "scheduler", "settings", "tree", "world_atom")
3322 root_config = self.pkg.root_config
3323 settings = self.settings
3324 retval = portage.merge(settings["CATEGORY"],
3325 settings["PF"], settings["D"],
3326 os.path.join(settings["PORTAGE_BUILDDIR"],
3327 "build-info"), root_config.root, settings,
3328 myebuild=settings["EBUILD"],
3329 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3330 vartree=root_config.trees["vartree"],
3331 prev_mtimes=self.ldpath_mtimes,
3332 scheduler=self.scheduler,
3333 blockers=self.find_blockers)
3335 if retval == os.EX_OK:
3336 self.world_atom(self.pkg)
3341 def _log_success(self):
3343 pkg_count = self.pkg_count
3344 pkg_path = self.pkg_path
3345 logger = self.logger
3346 if "noclean" not in self.settings.features:
3347 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3348 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3349 logger.log((" === (%s of %s) " + \
3350 "Post-Build Cleaning (%s::%s)") % \
3351 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3352 short_msg=short_msg)
3353 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3354 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3356 class PackageUninstall(AsynchronousTask):
3358 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3362 unmerge(self.pkg.root_config, self.opts, "unmerge",
3363 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3364 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3365 writemsg_level=self._writemsg_level)
3366 except UninstallFailure, e:
3367 self.returncode = e.status
3369 self.returncode = os.EX_OK
3372 def _writemsg_level(self, msg, level=0, noiselevel=0):
3374 log_path = self.settings.get("PORTAGE_LOG_FILE")
3375 background = self.background
3377 if log_path is None:
3378 if not (background and level < logging.WARNING):
3379 portage.util.writemsg_level(msg,
3380 level=level, noiselevel=noiselevel)
3383 portage.util.writemsg_level(msg,
3384 level=level, noiselevel=noiselevel)
3386 f = open(log_path, 'a')
3392 class Binpkg(CompositeTask):
3394 __slots__ = ("find_blockers",
3395 "ldpath_mtimes", "logger", "opts",
3396 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3397 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3398 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3400 def _writemsg_level(self, msg, level=0, noiselevel=0):
3402 if not self.background:
3403 portage.util.writemsg_level(msg,
3404 level=level, noiselevel=noiselevel)
3406 log_path = self.settings.get("PORTAGE_LOG_FILE")
3407 if log_path is not None:
3408 f = open(log_path, 'a')
3417 settings = self.settings
3418 settings.setcpv(pkg)
3419 self._tree = "bintree"
3420 self._bintree = self.pkg.root_config.trees[self._tree]
3421 self._verify = not self.opts.pretend
3423 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3424 "portage", pkg.category, pkg.pf)
3425 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3426 pkg=pkg, settings=settings)
3427 self._image_dir = os.path.join(dir_path, "image")
3428 self._infloc = os.path.join(dir_path, "build-info")
3429 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3430 settings["EBUILD"] = self._ebuild_path
3431 debug = settings.get("PORTAGE_DEBUG") == "1"
3432 portage.doebuild_environment(self._ebuild_path, "setup",
3433 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3434 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3436 # The prefetcher has already completed or it
3437 # could be running now. If it's running now,
3438 # wait for it to complete since it holds
3439 # a lock on the file being fetched. The
3440 # portage.locks functions are only designed
3441 # to work between separate processes. Since
3442 # the lock is held by the current process,
3443 # use the scheduler and fetcher methods to
3444 # synchronize with the fetcher.
3445 prefetcher = self.prefetcher
3446 if prefetcher is None:
3448 elif not prefetcher.isAlive():
3450 elif prefetcher.poll() is None:
3452 waiting_msg = ("Fetching '%s' " + \
3453 "in the background. " + \
3454 "To view fetch progress, run `tail -f " + \
3455 "/var/log/emerge-fetch.log` in another " + \
3456 "terminal.") % prefetcher.pkg_path
3457 msg_prefix = colorize("GOOD", " * ")
3458 from textwrap import wrap
3459 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3460 for line in wrap(waiting_msg, 65))
3461 if not self.background:
3462 writemsg(waiting_msg, noiselevel=-1)
3464 self._current_task = prefetcher
3465 prefetcher.addExitListener(self._prefetch_exit)
3468 self._prefetch_exit(prefetcher)
3470 def _prefetch_exit(self, prefetcher):
3473 pkg_count = self.pkg_count
3474 if not (self.opts.pretend or self.opts.fetchonly):
3475 self._build_dir.lock()
3476 # If necessary, discard old log so that we don't
3478 self._build_dir.clean_log()
3479 # Initialze PORTAGE_LOG_FILE.
3480 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3481 fetcher = BinpkgFetcher(background=self.background,
3482 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3483 pretend=self.opts.pretend, scheduler=self.scheduler)
3484 pkg_path = fetcher.pkg_path
3485 self._pkg_path = pkg_path
3487 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3489 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3490 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3491 short_msg = "emerge: (%s of %s) %s Fetch" % \
3492 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3493 self.logger.log(msg, short_msg=short_msg)
3494 self._start_task(fetcher, self._fetcher_exit)
3497 self._fetcher_exit(fetcher)
3499 def _fetcher_exit(self, fetcher):
3501 # The fetcher only has a returncode when
3502 # --getbinpkg is enabled.
3503 if fetcher.returncode is not None:
3504 self._fetched_pkg = True
3505 if self._default_exit(fetcher) != os.EX_OK:
3506 self._unlock_builddir()
3510 if self.opts.pretend:
3511 self._current_task = None
3512 self.returncode = os.EX_OK
3520 logfile = self.settings.get("PORTAGE_LOG_FILE")
3521 verifier = BinpkgVerifier(background=self.background,
3522 logfile=logfile, pkg=self.pkg)
3523 self._start_task(verifier, self._verifier_exit)
3526 self._verifier_exit(verifier)
3528 def _verifier_exit(self, verifier):
3529 if verifier is not None and \
3530 self._default_exit(verifier) != os.EX_OK:
3531 self._unlock_builddir()
3535 logger = self.logger
3537 pkg_count = self.pkg_count
3538 pkg_path = self._pkg_path
3540 if self._fetched_pkg:
3541 self._bintree.inject(pkg.cpv, filename=pkg_path)
3543 if self.opts.fetchonly:
3544 self._current_task = None
3545 self.returncode = os.EX_OK
3549 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3550 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3551 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3552 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3553 logger.log(msg, short_msg=short_msg)
3556 settings = self.settings
3557 ebuild_phase = EbuildPhase(background=self.background,
3558 pkg=pkg, phase=phase, scheduler=self.scheduler,
3559 settings=settings, tree=self._tree)
3561 self._start_task(ebuild_phase, self._clean_exit)
3563 def _clean_exit(self, clean_phase):
3564 if self._default_exit(clean_phase) != os.EX_OK:
3565 self._unlock_builddir()
3569 dir_path = self._build_dir.dir_path
3571 infloc = self._infloc
3573 pkg_path = self._pkg_path
3576 for mydir in (dir_path, self._image_dir, infloc):
3577 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3578 gid=portage.data.portage_gid, mode=dir_mode)
3580 # This initializes PORTAGE_LOG_FILE.
3581 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3582 self._writemsg_level(">>> Extracting info\n")
3584 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3585 check_missing_metadata = ("CATEGORY", "PF")
3586 missing_metadata = set()
3587 for k in check_missing_metadata:
3588 v = pkg_xpak.getfile(k)
3590 missing_metadata.add(k)
3592 pkg_xpak.unpackinfo(infloc)
3593 for k in missing_metadata:
3601 f = open(os.path.join(infloc, k), 'wb')
3607 # Store the md5sum in the vdb.
3608 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3610 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3614 # This gives bashrc users an opportunity to do various things
3615 # such as remove binary packages after they're installed.
3616 settings = self.settings
3617 settings.setcpv(self.pkg)
3618 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3619 settings.backup_changes("PORTAGE_BINPKG_FILE")
3622 setup_phase = EbuildPhase(background=self.background,
3623 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3624 settings=settings, tree=self._tree)
3626 setup_phase.addExitListener(self._setup_exit)
3627 self._current_task = setup_phase
3628 self.scheduler.scheduleSetup(setup_phase)
3630 def _setup_exit(self, setup_phase):
3631 if self._default_exit(setup_phase) != os.EX_OK:
3632 self._unlock_builddir()
3636 extractor = BinpkgExtractorAsync(background=self.background,
3637 image_dir=self._image_dir,
3638 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3639 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3640 self._start_task(extractor, self._extractor_exit)
3642 def _extractor_exit(self, extractor):
3643 if self._final_exit(extractor) != os.EX_OK:
3644 self._unlock_builddir()
3645 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3649 def _unlock_builddir(self):
3650 if self.opts.pretend or self.opts.fetchonly:
3652 portage.elog.elog_process(self.pkg.cpv, self.settings)
3653 self._build_dir.unlock()
3657 # This gives bashrc users an opportunity to do various things
3658 # such as remove binary packages after they're installed.
3659 settings = self.settings
3660 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3661 settings.backup_changes("PORTAGE_BINPKG_FILE")
3663 merge = EbuildMerge(find_blockers=self.find_blockers,
3664 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3665 pkg=self.pkg, pkg_count=self.pkg_count,
3666 pkg_path=self._pkg_path, scheduler=self.scheduler,
3667 settings=settings, tree=self._tree, world_atom=self.world_atom)
3670 retval = merge.execute()
3672 settings.pop("PORTAGE_BINPKG_FILE", None)
3673 self._unlock_builddir()
3676 class BinpkgFetcher(SpawnProcess):
3678 __slots__ = ("pkg", "pretend",
3679 "locked", "pkg_path", "_lock_obj")
3681 def __init__(self, **kwargs):
3682 SpawnProcess.__init__(self, **kwargs)
3684 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3692 pretend = self.pretend
3693 bintree = pkg.root_config.trees["bintree"]
3694 settings = bintree.settings
3695 use_locks = "distlocks" in settings.features
3696 pkg_path = self.pkg_path
3699 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3702 exists = os.path.exists(pkg_path)
3703 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3704 if not (pretend or resume):
3705 # Remove existing file or broken symlink.
3711 # urljoin doesn't work correctly with
3712 # unrecognized protocols like sftp
3713 if bintree._remote_has_index:
3714 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3716 rel_uri = pkg.cpv + ".tbz2"
3717 uri = bintree._remote_base_uri.rstrip("/") + \
3718 "/" + rel_uri.lstrip("/")
3720 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3721 "/" + pkg.pf + ".tbz2"
3724 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3725 self.returncode = os.EX_OK
3729 protocol = urlparse.urlparse(uri)[0]
3730 fcmd_prefix = "FETCHCOMMAND"
3732 fcmd_prefix = "RESUMECOMMAND"
3733 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3735 fcmd = settings.get(fcmd_prefix)
3738 "DISTDIR" : os.path.dirname(pkg_path),
3740 "FILE" : os.path.basename(pkg_path)
3743 fetch_env = dict(settings.iteritems())
3744 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3745 for x in shlex.split(fcmd)]
3747 if self.fd_pipes is None:
3749 fd_pipes = self.fd_pipes
3751 # Redirect all output to stdout since some fetchers like
3752 # wget pollute stderr (if portage detects a problem then it
3753 # can send it's own message to stderr).
3754 fd_pipes.setdefault(0, sys.stdin.fileno())
3755 fd_pipes.setdefault(1, sys.stdout.fileno())
3756 fd_pipes.setdefault(2, sys.stdout.fileno())
3758 self.args = fetch_args
3759 self.env = fetch_env
3760 SpawnProcess._start(self)
3762 def _set_returncode(self, wait_retval):
3763 SpawnProcess._set_returncode(self, wait_retval)
3764 if self.returncode == os.EX_OK:
3765 # If possible, update the mtime to match the remote package if
3766 # the fetcher didn't already do it automatically.
3767 bintree = self.pkg.root_config.trees["bintree"]
3768 if bintree._remote_has_index:
3769 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3770 if remote_mtime is not None:
3772 remote_mtime = long(remote_mtime)
3777 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3781 if remote_mtime != local_mtime:
3783 os.utime(self.pkg_path,
3784 (remote_mtime, remote_mtime))
3793 This raises an AlreadyLocked exception if lock() is called
3794 while a lock is already held. In order to avoid this, call
3795 unlock() or check whether the "locked" attribute is True
3796 or False before calling lock().
3798 if self._lock_obj is not None:
3799 raise self.AlreadyLocked((self._lock_obj,))
3801 self._lock_obj = portage.locks.lockfile(
3802 self.pkg_path, wantnewlockfile=1)
3805 class AlreadyLocked(portage.exception.PortageException):
3809 if self._lock_obj is None:
3811 portage.locks.unlockfile(self._lock_obj)
3812 self._lock_obj = None
3815 class BinpkgVerifier(AsynchronousTask):
3816 __slots__ = ("logfile", "pkg",)
3820 Note: Unlike a normal AsynchronousTask.start() method,
3821 this one does all work is synchronously. The returncode
3822 attribute will be set before it returns.
3826 root_config = pkg.root_config
3827 bintree = root_config.trees["bintree"]
3829 stdout_orig = sys.stdout
3830 stderr_orig = sys.stderr
3832 if self.background and self.logfile is not None:
3833 log_file = open(self.logfile, 'a')
3835 if log_file is not None:
3836 sys.stdout = log_file
3837 sys.stderr = log_file
3839 bintree.digestCheck(pkg)
3840 except portage.exception.FileNotFound:
3841 writemsg("!!! Fetching Binary failed " + \
3842 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3844 except portage.exception.DigestException, e:
3845 writemsg("\n!!! Digest verification failed:\n",
3847 writemsg("!!! %s\n" % e.value[0],
3849 writemsg("!!! Reason: %s\n" % e.value[1],
3851 writemsg("!!! Got: %s\n" % e.value[2],
3853 writemsg("!!! Expected: %s\n" % e.value[3],
3856 if rval != os.EX_OK:
3857 pkg_path = bintree.getname(pkg.cpv)
3858 head, tail = os.path.split(pkg_path)
3859 temp_filename = portage._checksum_failure_temp_file(head, tail)
3860 writemsg("File renamed to '%s'\n" % (temp_filename,),
3863 sys.stdout = stdout_orig
3864 sys.stderr = stderr_orig
3865 if log_file is not None:
3868 self.returncode = rval
3871 class BinpkgPrefetcher(CompositeTask):
3873 __slots__ = ("pkg",) + \
3874 ("pkg_path", "_bintree",)
3877 self._bintree = self.pkg.root_config.trees["bintree"]
3878 fetcher = BinpkgFetcher(background=self.background,
3879 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3880 scheduler=self.scheduler)
3881 self.pkg_path = fetcher.pkg_path
3882 self._start_task(fetcher, self._fetcher_exit)
3884 def _fetcher_exit(self, fetcher):
3886 if self._default_exit(fetcher) != os.EX_OK:
3890 verifier = BinpkgVerifier(background=self.background,
3891 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3892 self._start_task(verifier, self._verifier_exit)
3894 def _verifier_exit(self, verifier):
3895 if self._default_exit(verifier) != os.EX_OK:
3899 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3901 self._current_task = None
3902 self.returncode = os.EX_OK
3905 class BinpkgExtractorAsync(SpawnProcess):
3907 __slots__ = ("image_dir", "pkg", "pkg_path")
3909 _shell_binary = portage.const.BASH_BINARY
3912 self.args = [self._shell_binary, "-c",
3913 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3914 (portage._shell_quote(self.pkg_path),
3915 portage._shell_quote(self.image_dir))]
3917 self.env = self.pkg.root_config.settings.environ()
3918 SpawnProcess._start(self)
3920 class MergeListItem(CompositeTask):
3923 TODO: For parallel scheduling, everything here needs asynchronous
3924 execution support (start, poll, and wait methods).
3927 __slots__ = ("args_set",
3928 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3929 "find_blockers", "logger", "mtimedb", "pkg",
3930 "pkg_count", "pkg_to_replace", "prefetcher",
3931 "settings", "statusMessage", "world_atom") + \
3937 build_opts = self.build_opts
3940 # uninstall, executed by self.merge()
3941 self.returncode = os.EX_OK
3945 args_set = self.args_set
3946 find_blockers = self.find_blockers
3947 logger = self.logger
3948 mtimedb = self.mtimedb
3949 pkg_count = self.pkg_count
3950 scheduler = self.scheduler
3951 settings = self.settings
3952 world_atom = self.world_atom
3953 ldpath_mtimes = mtimedb["ldpath"]
3955 action_desc = "Emerging"
3957 if pkg.type_name == "binary":
3958 action_desc += " binary"
3960 if build_opts.fetchonly:
3961 action_desc = "Fetching"
3963 msg = "%s (%s of %s) %s" % \
3965 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3966 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3967 colorize("GOOD", pkg.cpv))
3969 portdb = pkg.root_config.trees["porttree"].dbapi
3970 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3971 if portdir_repo_name:
3972 pkg_repo_name = pkg.metadata.get("repository")
3973 if pkg_repo_name != portdir_repo_name:
3974 if not pkg_repo_name:
3975 pkg_repo_name = "unknown repo"
3976 msg += " from %s" % pkg_repo_name
3979 msg += " %s %s" % (preposition, pkg.root)
3981 if not build_opts.pretend:
3982 self.statusMessage(msg)
3983 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3984 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3986 if pkg.type_name == "ebuild":
3988 build = EbuildBuild(args_set=args_set,
3989 background=self.background,
3990 config_pool=self.config_pool,
3991 find_blockers=find_blockers,
3992 ldpath_mtimes=ldpath_mtimes, logger=logger,
3993 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3994 prefetcher=self.prefetcher, scheduler=scheduler,
3995 settings=settings, world_atom=world_atom)
3997 self._install_task = build
3998 self._start_task(build, self._default_final_exit)
4001 elif pkg.type_name == "binary":
4003 binpkg = Binpkg(background=self.background,
4004 find_blockers=find_blockers,
4005 ldpath_mtimes=ldpath_mtimes, logger=logger,
4006 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4007 prefetcher=self.prefetcher, settings=settings,
4008 scheduler=scheduler, world_atom=world_atom)
4010 self._install_task = binpkg
4011 self._start_task(binpkg, self._default_final_exit)
4015 self._install_task.poll()
4016 return self.returncode
4019 self._install_task.wait()
4020 return self.returncode
4025 build_opts = self.build_opts
4026 find_blockers = self.find_blockers
4027 logger = self.logger
4028 mtimedb = self.mtimedb
4029 pkg_count = self.pkg_count
4030 prefetcher = self.prefetcher
4031 scheduler = self.scheduler
4032 settings = self.settings
4033 world_atom = self.world_atom
4034 ldpath_mtimes = mtimedb["ldpath"]
4037 if not (build_opts.buildpkgonly or \
4038 build_opts.fetchonly or build_opts.pretend):
4040 uninstall = PackageUninstall(background=self.background,
4041 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4042 pkg=pkg, scheduler=scheduler, settings=settings)
4045 retval = uninstall.wait()
4046 if retval != os.EX_OK:
4050 if build_opts.fetchonly or \
4051 build_opts.buildpkgonly:
4052 return self.returncode
4054 retval = self._install_task.install()
4057 class PackageMerge(AsynchronousTask):
4059 TODO: Implement asynchronous merge so that the scheduler can
4060 run while a merge is executing.
4063 __slots__ = ("merge",)
4067 pkg = self.merge.pkg
4068 pkg_count = self.merge.pkg_count
4071 action_desc = "Uninstalling"
4072 preposition = "from"
4075 action_desc = "Installing"
4077 counter_str = "(%s of %s) " % \
4078 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4079 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4084 colorize("GOOD", pkg.cpv))
4087 msg += " %s %s" % (preposition, pkg.root)
4089 if not self.merge.build_opts.fetchonly and \
4090 not self.merge.build_opts.pretend and \
4091 not self.merge.build_opts.buildpkgonly:
4092 self.merge.statusMessage(msg)
4094 self.returncode = self.merge.merge()
4097 class DependencyArg(object):
4098 def __init__(self, arg=None, root_config=None):
4100 self.root_config = root_config
4103 return str(self.arg)
4105 class AtomArg(DependencyArg):
4106 def __init__(self, atom=None, **kwargs):
4107 DependencyArg.__init__(self, **kwargs)
4109 if not isinstance(self.atom, portage.dep.Atom):
4110 self.atom = portage.dep.Atom(self.atom)
4111 self.set = (self.atom, )
4113 class PackageArg(DependencyArg):
4114 def __init__(self, package=None, **kwargs):
4115 DependencyArg.__init__(self, **kwargs)
4116 self.package = package
4117 self.atom = portage.dep.Atom("=" + package.cpv)
4118 self.set = (self.atom, )
4120 class SetArg(DependencyArg):
4121 def __init__(self, set=None, **kwargs):
4122 DependencyArg.__init__(self, **kwargs)
4124 self.name = self.arg[len(SETPREFIX):]
4126 class Dependency(SlotObject):
4127 __slots__ = ("atom", "blocker", "depth",
4128 "parent", "onlydeps", "priority", "root")
4129 def __init__(self, **kwargs):
4130 SlotObject.__init__(self, **kwargs)
4131 if self.priority is None:
4132 self.priority = DepPriority()
4133 if self.depth is None:
4136 class BlockerCache(portage.cache.mappings.MutableMapping):
4137 """This caches blockers of installed packages so that dep_check does not
4138 have to be done for every single installed package on every invocation of
4139 emerge. The cache is invalidated whenever it is detected that something
4140 has changed that might alter the results of dep_check() calls:
4141 1) the set of installed packages (including COUNTER) has changed
4142 2) the old-style virtuals have changed
4145 # Number of uncached packages to trigger cache update, since
4146 # it's wasteful to update it for every vdb change.
4147 _cache_threshold = 5
4149 class BlockerData(object):
4151 __slots__ = ("__weakref__", "atoms", "counter")
4153 def __init__(self, counter, atoms):
4154 self.counter = counter
4157 def __init__(self, myroot, vardb):
4159 self._virtuals = vardb.settings.getvirtuals()
4160 self._cache_filename = os.path.join(myroot,
4161 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4162 self._cache_version = "1"
4163 self._cache_data = None
4164 self._modified = set()
4169 f = open(self._cache_filename, mode='rb')
4170 mypickle = pickle.Unpickler(f)
4172 mypickle.find_global = None
4173 except AttributeError:
4174 # TODO: If py3k, override Unpickler.find_class().
4176 self._cache_data = mypickle.load()
4179 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4180 if isinstance(e, pickle.UnpicklingError):
4181 writemsg("!!! Error loading '%s': %s\n" % \
4182 (self._cache_filename, str(e)), noiselevel=-1)
4185 cache_valid = self._cache_data and \
4186 isinstance(self._cache_data, dict) and \
4187 self._cache_data.get("version") == self._cache_version and \
4188 isinstance(self._cache_data.get("blockers"), dict)
4190 # Validate all the atoms and counters so that
4191 # corruption is detected as soon as possible.
4192 invalid_items = set()
4193 for k, v in self._cache_data["blockers"].iteritems():
4194 if not isinstance(k, basestring):
4195 invalid_items.add(k)
4198 if portage.catpkgsplit(k) is None:
4199 invalid_items.add(k)
4201 except portage.exception.InvalidData:
4202 invalid_items.add(k)
4204 if not isinstance(v, tuple) or \
4206 invalid_items.add(k)
4209 if not isinstance(counter, (int, long)):
4210 invalid_items.add(k)
4212 if not isinstance(atoms, (list, tuple)):
4213 invalid_items.add(k)
4215 invalid_atom = False
4217 if not isinstance(atom, basestring):
4220 if atom[:1] != "!" or \
4221 not portage.isvalidatom(
4222 atom, allow_blockers=True):
4226 invalid_items.add(k)
4229 for k in invalid_items:
4230 del self._cache_data["blockers"][k]
4231 if not self._cache_data["blockers"]:
4235 self._cache_data = {"version":self._cache_version}
4236 self._cache_data["blockers"] = {}
4237 self._cache_data["virtuals"] = self._virtuals
4238 self._modified.clear()
4241 """If the current user has permission and the internal blocker cache
4242 been updated, save it to disk and mark it unmodified. This is called
4243 by emerge after it has proccessed blockers for all installed packages.
4244 Currently, the cache is only written if the user has superuser
4245 privileges (since that's required to obtain a lock), but all users
4246 have read access and benefit from faster blocker lookups (as long as
4247 the entire cache is still valid). The cache is stored as a pickled
4248 dict object with the following format:
4252 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4253 "virtuals" : vardb.settings.getvirtuals()
4256 if len(self._modified) >= self._cache_threshold and \
4259 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4260 pickle.dump(self._cache_data, f, protocol=2)
4262 portage.util.apply_secpass_permissions(
4263 self._cache_filename, gid=portage.portage_gid, mode=0644)
4264 except (IOError, OSError), e:
4266 self._modified.clear()
4268 def __setitem__(self, cpv, blocker_data):
4270 Update the cache and mark it as modified for a future call to
4273 @param cpv: Package for which to cache blockers.
4275 @param blocker_data: An object with counter and atoms attributes.
4276 @type blocker_data: BlockerData
4278 self._cache_data["blockers"][cpv] = \
4279 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4280 self._modified.add(cpv)
4283 if self._cache_data is None:
4284 # triggered by python-trace
4286 return iter(self._cache_data["blockers"])
4288 def __delitem__(self, cpv):
4289 del self._cache_data["blockers"][cpv]
4291 def __getitem__(self, cpv):
4294 @returns: An object with counter and atoms attributes.
4296 return self.BlockerData(*self._cache_data["blockers"][cpv])
4298 class BlockerDB(object):
4300 def __init__(self, root_config):
4301 self._root_config = root_config
4302 self._vartree = root_config.trees["vartree"]
4303 self._portdb = root_config.trees["porttree"].dbapi
4305 self._dep_check_trees = None
4306 self._fake_vartree = None
4308 def _get_fake_vartree(self, acquire_lock=0):
4309 fake_vartree = self._fake_vartree
4310 if fake_vartree is None:
4311 fake_vartree = FakeVartree(self._root_config,
4312 acquire_lock=acquire_lock)
4313 self._fake_vartree = fake_vartree
4314 self._dep_check_trees = { self._vartree.root : {
4315 "porttree" : fake_vartree,
4316 "vartree" : fake_vartree,
4319 fake_vartree.sync(acquire_lock=acquire_lock)
4322 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4323 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4324 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4325 settings = self._vartree.settings
4326 stale_cache = set(blocker_cache)
4327 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4328 dep_check_trees = self._dep_check_trees
4329 vardb = fake_vartree.dbapi
4330 installed_pkgs = list(vardb)
4332 for inst_pkg in installed_pkgs:
4333 stale_cache.discard(inst_pkg.cpv)
4334 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4335 if cached_blockers is not None and \
4336 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4337 cached_blockers = None
4338 if cached_blockers is not None:
4339 blocker_atoms = cached_blockers.atoms
4341 # Use aux_get() to trigger FakeVartree global
4342 # updates on *DEPEND when appropriate.
4343 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4345 portage.dep._dep_check_strict = False
4346 success, atoms = portage.dep_check(depstr,
4347 vardb, settings, myuse=inst_pkg.use.enabled,
4348 trees=dep_check_trees, myroot=inst_pkg.root)
4350 portage.dep._dep_check_strict = True
4352 pkg_location = os.path.join(inst_pkg.root,
4353 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4354 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4355 (pkg_location, atoms), noiselevel=-1)
4358 blocker_atoms = [atom for atom in atoms \
4359 if atom.startswith("!")]
4360 blocker_atoms.sort()
4361 counter = long(inst_pkg.metadata["COUNTER"])
4362 blocker_cache[inst_pkg.cpv] = \
4363 blocker_cache.BlockerData(counter, blocker_atoms)
4364 for cpv in stale_cache:
4365 del blocker_cache[cpv]
4366 blocker_cache.flush()
4368 blocker_parents = digraph()
4370 for pkg in installed_pkgs:
4371 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4372 blocker_atom = blocker_atom.lstrip("!")
4373 blocker_atoms.append(blocker_atom)
4374 blocker_parents.add(blocker_atom, pkg)
4376 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4377 blocking_pkgs = set()
4378 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4379 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4381 # Check for blockers in the other direction.
4382 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4384 portage.dep._dep_check_strict = False
4385 success, atoms = portage.dep_check(depstr,
4386 vardb, settings, myuse=new_pkg.use.enabled,
4387 trees=dep_check_trees, myroot=new_pkg.root)
4389 portage.dep._dep_check_strict = True
4391 # We should never get this far with invalid deps.
4392 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4395 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4398 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4399 for inst_pkg in installed_pkgs:
4401 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4402 except (portage.exception.InvalidDependString, StopIteration):
4404 blocking_pkgs.add(inst_pkg)
4406 return blocking_pkgs
4408 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4410 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4411 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4412 p_type, p_root, p_key, p_status = parent_node
4414 if p_status == "nomerge":
4415 category, pf = portage.catsplit(p_key)
4416 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4417 msg.append("Portage is unable to process the dependencies of the ")
4418 msg.append("'%s' package. " % p_key)
4419 msg.append("In order to correct this problem, the package ")
4420 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4421 msg.append("As a temporary workaround, the --nodeps option can ")
4422 msg.append("be used to ignore all dependencies. For reference, ")
4423 msg.append("the problematic dependencies can be found in the ")
4424 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4426 msg.append("This package can not be installed. ")
4427 msg.append("Please notify the '%s' package maintainer " % p_key)
4428 msg.append("about this problem.")
4430 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4431 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4433 class PackageVirtualDbapi(portage.dbapi):
4435 A dbapi-like interface class that represents the state of the installed
4436 package database as new packages are installed, replacing any packages
4437 that previously existed in the same slot. The main difference between
4438 this class and fakedbapi is that this one uses Package instances
4439 internally (passed in via cpv_inject() and cpv_remove() calls).
4441 def __init__(self, settings):
4442 portage.dbapi.__init__(self)
4443 self.settings = settings
4444 self._match_cache = {}
4450 Remove all packages.
4454 self._cp_map.clear()
4455 self._cpv_map.clear()
4458 obj = PackageVirtualDbapi(self.settings)
4459 obj._match_cache = self._match_cache.copy()
4460 obj._cp_map = self._cp_map.copy()
4461 for k, v in obj._cp_map.iteritems():
4462 obj._cp_map[k] = v[:]
4463 obj._cpv_map = self._cpv_map.copy()
4467 return self._cpv_map.itervalues()
4469 def __contains__(self, item):
4470 existing = self._cpv_map.get(item.cpv)
4471 if existing is not None and \
4476 def get(self, item, default=None):
4477 cpv = getattr(item, "cpv", None)
4481 type_name, root, cpv, operation = item
4483 existing = self._cpv_map.get(cpv)
4484 if existing is not None and \
4489 def match_pkgs(self, atom):
4490 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4492 def _clear_cache(self):
4493 if self._categories is not None:
4494 self._categories = None
4495 if self._match_cache:
4496 self._match_cache = {}
4498 def match(self, origdep, use_cache=1):
4499 result = self._match_cache.get(origdep)
4500 if result is not None:
4502 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4503 self._match_cache[origdep] = result
4506 def cpv_exists(self, cpv):
4507 return cpv in self._cpv_map
4509 def cp_list(self, mycp, use_cache=1):
4510 cachelist = self._match_cache.get(mycp)
4511 # cp_list() doesn't expand old-style virtuals
4512 if cachelist and cachelist[0].startswith(mycp):
4514 cpv_list = self._cp_map.get(mycp)
4515 if cpv_list is None:
4518 cpv_list = [pkg.cpv for pkg in cpv_list]
4519 self._cpv_sort_ascending(cpv_list)
4520 if not (not cpv_list and mycp.startswith("virtual/")):
4521 self._match_cache[mycp] = cpv_list
4525 return list(self._cp_map)
4528 return list(self._cpv_map)
4530 def cpv_inject(self, pkg):
4531 cp_list = self._cp_map.get(pkg.cp)
4534 self._cp_map[pkg.cp] = cp_list
4535 e_pkg = self._cpv_map.get(pkg.cpv)
4536 if e_pkg is not None:
4539 self.cpv_remove(e_pkg)
4540 for e_pkg in cp_list:
4541 if e_pkg.slot_atom == pkg.slot_atom:
4544 self.cpv_remove(e_pkg)
4547 self._cpv_map[pkg.cpv] = pkg
4550 def cpv_remove(self, pkg):
4551 old_pkg = self._cpv_map.get(pkg.cpv)
4554 self._cp_map[pkg.cp].remove(pkg)
4555 del self._cpv_map[pkg.cpv]
4558 def aux_get(self, cpv, wants):
4559 metadata = self._cpv_map[cpv].metadata
4560 return [metadata.get(x, "") for x in wants]
4562 def aux_update(self, cpv, values):
4563 self._cpv_map[cpv].metadata.update(values)
4566 class depgraph(object):
4568 pkg_tree_map = RootConfig.pkg_tree_map
4570 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4572 def __init__(self, settings, trees, myopts, myparams, spinner):
4573 self.settings = settings
4574 self.target_root = settings["ROOT"]
4575 self.myopts = myopts
4576 self.myparams = myparams
4578 if settings.get("PORTAGE_DEBUG", "") == "1":
4580 self.spinner = spinner
4581 self._running_root = trees["/"]["root_config"]
4582 self._opts_no_restart = Scheduler._opts_no_restart
4583 self.pkgsettings = {}
4584 # Maps slot atom to package for each Package added to the graph.
4585 self._slot_pkg_map = {}
4586 # Maps nodes to the reasons they were selected for reinstallation.
4587 self._reinstall_nodes = {}
4590 self._trees_orig = trees
4592 # Contains a filtered view of preferred packages that are selected
4593 # from available repositories.
4594 self._filtered_trees = {}
4595 # Contains installed packages and new packages that have been added
4597 self._graph_trees = {}
4598 # All Package instances
4599 self._pkg_cache = {}
4600 for myroot in trees:
4601 self.trees[myroot] = {}
4602 # Create a RootConfig instance that references
4603 # the FakeVartree instead of the real one.
4604 self.roots[myroot] = RootConfig(
4605 trees[myroot]["vartree"].settings,
4607 trees[myroot]["root_config"].setconfig)
4608 for tree in ("porttree", "bintree"):
4609 self.trees[myroot][tree] = trees[myroot][tree]
4610 self.trees[myroot]["vartree"] = \
4611 FakeVartree(trees[myroot]["root_config"],
4612 pkg_cache=self._pkg_cache)
4613 self.pkgsettings[myroot] = portage.config(
4614 clone=self.trees[myroot]["vartree"].settings)
4615 self._slot_pkg_map[myroot] = {}
4616 vardb = self.trees[myroot]["vartree"].dbapi
4617 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4618 "--buildpkgonly" not in self.myopts
4619 # This fakedbapi instance will model the state that the vdb will
4620 # have after new packages have been installed.
4621 fakedb = PackageVirtualDbapi(vardb.settings)
4622 if preload_installed_pkgs:
4624 self.spinner.update()
4625 # This triggers metadata updates via FakeVartree.
4626 vardb.aux_get(pkg.cpv, [])
4627 fakedb.cpv_inject(pkg)
4629 # Now that the vardb state is cached in our FakeVartree,
4630 # we won't be needing the real vartree cache for awhile.
4631 # To make some room on the heap, clear the vardbapi
4633 trees[myroot]["vartree"].dbapi._clear_cache()
4636 self.mydbapi[myroot] = fakedb
4639 graph_tree.dbapi = fakedb
4640 self._graph_trees[myroot] = {}
4641 self._filtered_trees[myroot] = {}
4642 # Substitute the graph tree for the vartree in dep_check() since we
4643 # want atom selections to be consistent with package selections
4644 # have already been made.
4645 self._graph_trees[myroot]["porttree"] = graph_tree
4646 self._graph_trees[myroot]["vartree"] = graph_tree
4647 def filtered_tree():
4649 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4650 self._filtered_trees[myroot]["porttree"] = filtered_tree
4652 # Passing in graph_tree as the vartree here could lead to better
4653 # atom selections in some cases by causing atoms for packages that
4654 # have been added to the graph to be preferred over other choices.
4655 # However, it can trigger atom selections that result in
4656 # unresolvable direct circular dependencies. For example, this
4657 # happens with gwydion-dylan which depends on either itself or
4658 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4659 # gwydion-dylan-bin needs to be selected in order to avoid a
4660 # an unresolvable direct circular dependency.
4662 # To solve the problem described above, pass in "graph_db" so that
4663 # packages that have been added to the graph are distinguishable
4664 # from other available packages and installed packages. Also, pass
4665 # the parent package into self._select_atoms() calls so that
4666 # unresolvable direct circular dependencies can be detected and
4667 # avoided when possible.
4668 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4669 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4672 portdb = self.trees[myroot]["porttree"].dbapi
4673 bindb = self.trees[myroot]["bintree"].dbapi
4674 vardb = self.trees[myroot]["vartree"].dbapi
4675 # (db, pkg_type, built, installed, db_keys)
4676 if "--usepkgonly" not in self.myopts:
4677 db_keys = list(portdb._aux_cache_keys)
4678 dbs.append((portdb, "ebuild", False, False, db_keys))
4679 if "--usepkg" in self.myopts:
4680 db_keys = list(bindb._aux_cache_keys)
4681 dbs.append((bindb, "binary", True, False, db_keys))
4682 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4683 dbs.append((vardb, "installed", True, True, db_keys))
4684 self._filtered_trees[myroot]["dbs"] = dbs
4685 if "--usepkg" in self.myopts:
4686 self.trees[myroot]["bintree"].populate(
4687 "--getbinpkg" in self.myopts,
4688 "--getbinpkgonly" in self.myopts)
4691 self.digraph=portage.digraph()
4692 # contains all sets added to the graph
4694 # contains atoms given as arguments
4695 self._sets["args"] = InternalPackageSet()
4696 # contains all atoms from all sets added to the graph, including
4697 # atoms given as arguments
4698 self._set_atoms = InternalPackageSet()
4699 self._atom_arg_map = {}
4700 # contains all nodes pulled in by self._set_atoms
4701 self._set_nodes = set()
4702 # Contains only Blocker -> Uninstall edges
4703 self._blocker_uninstalls = digraph()
4704 # Contains only Package -> Blocker edges
4705 self._blocker_parents = digraph()
4706 # Contains only irrelevant Package -> Blocker edges
4707 self._irrelevant_blockers = digraph()
4708 # Contains only unsolvable Package -> Blocker edges
4709 self._unsolvable_blockers = digraph()
4710 # Contains all Blocker -> Blocked Package edges
4711 self._blocked_pkgs = digraph()
4712 # Contains world packages that have been protected from
4713 # uninstallation but may not have been added to the graph
4714 # if the graph is not complete yet.
4715 self._blocked_world_pkgs = {}
4716 self._slot_collision_info = {}
4717 # Slot collision nodes are not allowed to block other packages since
4718 # blocker validation is only able to account for one package per slot.
4719 self._slot_collision_nodes = set()
4720 self._parent_atoms = {}
4721 self._slot_conflict_parent_atoms = set()
4722 self._serialized_tasks_cache = None
4723 self._scheduler_graph = None
4724 self._displayed_list = None
4725 self._pprovided_args = []
4726 self._missing_args = []
4727 self._masked_installed = set()
4728 self._unsatisfied_deps_for_display = []
4729 self._unsatisfied_blockers_for_display = None
4730 self._circular_deps_for_display = None
4731 self._dep_stack = []
4732 self._unsatisfied_deps = []
4733 self._initially_unsatisfied_deps = []
4734 self._ignored_deps = []
4735 self._required_set_names = set(["system", "world"])
4736 self._select_atoms = self._select_atoms_highest_available
4737 self._select_package = self._select_pkg_highest_available
4738 self._highest_pkg_cache = {}
4740 def _show_slot_collision_notice(self):
4741 """Show an informational message advising the user to mask one of the
4742 the packages. In some cases it may be possible to resolve this
4743 automatically, but support for backtracking (removal nodes that have
4744 already been selected) will be required in order to handle all possible
4748 if not self._slot_collision_info:
4751 self._show_merge_list()
4754 msg.append("\n!!! Multiple package instances within a single " + \
4755 "package slot have been pulled\n")
4756 msg.append("!!! into the dependency graph, resulting" + \
4757 " in a slot conflict:\n\n")
4759 # Max number of parents shown, to avoid flooding the display.
4761 explanation_columns = 70
4763 for (slot_atom, root), slot_nodes \
4764 in self._slot_collision_info.iteritems():
4765 msg.append(str(slot_atom))
4768 for node in slot_nodes:
4770 msg.append(str(node))
4771 parent_atoms = self._parent_atoms.get(node)
4774 # Prefer conflict atoms over others.
4775 for parent_atom in parent_atoms:
4776 if len(pruned_list) >= max_parents:
4778 if parent_atom in self._slot_conflict_parent_atoms:
4779 pruned_list.add(parent_atom)
4781 # If this package was pulled in by conflict atoms then
4782 # show those alone since those are the most interesting.
4784 # When generating the pruned list, prefer instances
4785 # of DependencyArg over instances of Package.
4786 for parent_atom in parent_atoms:
4787 if len(pruned_list) >= max_parents:
4789 parent, atom = parent_atom
4790 if isinstance(parent, DependencyArg):
4791 pruned_list.add(parent_atom)
4792 # Prefer Packages instances that themselves have been
4793 # pulled into collision slots.
4794 for parent_atom in parent_atoms:
4795 if len(pruned_list) >= max_parents:
4797 parent, atom = parent_atom
4798 if isinstance(parent, Package) and \
4799 (parent.slot_atom, parent.root) \
4800 in self._slot_collision_info:
4801 pruned_list.add(parent_atom)
4802 for parent_atom in parent_atoms:
4803 if len(pruned_list) >= max_parents:
4805 pruned_list.add(parent_atom)
4806 omitted_parents = len(parent_atoms) - len(pruned_list)
4807 parent_atoms = pruned_list
4808 msg.append(" pulled in by\n")
4809 for parent_atom in parent_atoms:
4810 parent, atom = parent_atom
4811 msg.append(2*indent)
4812 if isinstance(parent,
4813 (PackageArg, AtomArg)):
4814 # For PackageArg and AtomArg types, it's
4815 # redundant to display the atom attribute.
4816 msg.append(str(parent))
4818 # Display the specific atom from SetArg or
4820 msg.append("%s required by %s" % (atom, parent))
4823 msg.append(2*indent)
4824 msg.append("(and %d more)\n" % omitted_parents)
4826 msg.append(" (no parents)\n")
4828 explanation = self._slot_conflict_explanation(slot_nodes)
4831 msg.append(indent + "Explanation:\n\n")
4832 for line in textwrap.wrap(explanation, explanation_columns):
4833 msg.append(2*indent + line + "\n")
4836 sys.stderr.write("".join(msg))
4839 explanations_for_all = explanations == len(self._slot_collision_info)
4841 if explanations_for_all or "--quiet" in self.myopts:
4845 msg.append("It may be possible to solve this problem ")
4846 msg.append("by using package.mask to prevent one of ")
4847 msg.append("those packages from being selected. ")
4848 msg.append("However, it is also possible that conflicting ")
4849 msg.append("dependencies exist such that they are impossible to ")
4850 msg.append("satisfy simultaneously. If such a conflict exists in ")
4851 msg.append("the dependencies of two different packages, then those ")
4852 msg.append("packages can not be installed simultaneously.")
4854 from formatter import AbstractFormatter, DumbWriter
4855 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4857 f.add_flowing_data(x)
4861 msg.append("For more information, see MASKED PACKAGES ")
4862 msg.append("section in the emerge man page or refer ")
4863 msg.append("to the Gentoo Handbook.")
4865 f.add_flowing_data(x)
4869 def _slot_conflict_explanation(self, slot_nodes):
4871 When a slot conflict occurs due to USE deps, there are a few
4872 different cases to consider:
4874 1) New USE are correctly set but --newuse wasn't requested so an
4875 installed package with incorrect USE happened to get pulled
4876 into graph before the new one.
4878 2) New USE are incorrectly set but an installed package has correct
4879 USE so it got pulled into the graph, and a new instance also got
4880 pulled in due to --newuse or an upgrade.
4882 3) Multiple USE deps exist that can't be satisfied simultaneously,
4883 and multiple package instances got pulled into the same slot to
4884 satisfy the conflicting deps.
4886 Currently, explanations and suggested courses of action are generated
4887 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4890 if len(slot_nodes) != 2:
4891 # Suggestions are only implemented for
4892 # conflicts between two packages.
4895 all_conflict_atoms = self._slot_conflict_parent_atoms
4897 matched_atoms = None
4898 unmatched_node = None
4899 for node in slot_nodes:
4900 parent_atoms = self._parent_atoms.get(node)
4901 if not parent_atoms:
4902 # Normally, there are always parent atoms. If there are
4903 # none then something unexpected is happening and there's
4904 # currently no suggestion for this case.
4906 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4907 for parent_atom in conflict_atoms:
4908 parent, atom = parent_atom
4910 # Suggestions are currently only implemented for cases
4911 # in which all conflict atoms have USE deps.
4914 if matched_node is not None:
4915 # If conflict atoms match multiple nodes
4916 # then there's no suggestion.
4919 matched_atoms = conflict_atoms
4921 if unmatched_node is not None:
4922 # Neither node is matched by conflict atoms, and
4923 # there is no suggestion for this case.
4925 unmatched_node = node
4927 if matched_node is None or unmatched_node is None:
4928 # This shouldn't happen.
4931 if unmatched_node.installed and not matched_node.installed and \
4932 unmatched_node.cpv == matched_node.cpv:
4933 # If the conflicting packages are the same version then
4934 # --newuse should be all that's needed. If they are different
4935 # versions then there's some other problem.
4936 return "New USE are correctly set, but --newuse wasn't" + \
4937 " requested, so an installed package with incorrect USE " + \
4938 "happened to get pulled into the dependency graph. " + \
4939 "In order to solve " + \
4940 "this, either specify the --newuse option or explicitly " + \
4941 " reinstall '%s'." % matched_node.slot_atom
4943 if matched_node.installed and not unmatched_node.installed:
4944 atoms = sorted(set(atom for parent, atom in matched_atoms))
4945 explanation = ("New USE for '%s' are incorrectly set. " + \
4946 "In order to solve this, adjust USE to satisfy '%s'") % \
4947 (matched_node.slot_atom, atoms[0])
4949 for atom in atoms[1:-1]:
4950 explanation += ", '%s'" % (atom,)
4953 explanation += " and '%s'" % (atoms[-1],)
4959 def _process_slot_conflicts(self):
4961 Process slot conflict data to identify specific atoms which
4962 lead to conflict. These atoms only match a subset of the
4963 packages that have been pulled into a given slot.
4965 for (slot_atom, root), slot_nodes \
4966 in self._slot_collision_info.iteritems():
4968 all_parent_atoms = set()
4969 for pkg in slot_nodes:
4970 parent_atoms = self._parent_atoms.get(pkg)
4971 if not parent_atoms:
4973 all_parent_atoms.update(parent_atoms)
4975 for pkg in slot_nodes:
4976 parent_atoms = self._parent_atoms.get(pkg)
4977 if parent_atoms is None:
4978 parent_atoms = set()
4979 self._parent_atoms[pkg] = parent_atoms
4980 for parent_atom in all_parent_atoms:
4981 if parent_atom in parent_atoms:
4983 # Use package set for matching since it will match via
4984 # PROVIDE when necessary, while match_from_list does not.
4985 parent, atom = parent_atom
4986 atom_set = InternalPackageSet(
4987 initial_atoms=(atom,))
4988 if atom_set.findAtomForPackage(pkg):
4989 parent_atoms.add(parent_atom)
4991 self._slot_conflict_parent_atoms.add(parent_atom)
4993 def _reinstall_for_flags(self, forced_flags,
4994 orig_use, orig_iuse, cur_use, cur_iuse):
4995 """Return a set of flags that trigger reinstallation, or None if there
4996 are no such flags."""
4997 if "--newuse" in self.myopts:
4998 flags = set(orig_iuse.symmetric_difference(
4999 cur_iuse).difference(forced_flags))
5000 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5001 cur_iuse.intersection(cur_use)))
5004 elif "changed-use" == self.myopts.get("--reinstall"):
5005 flags = orig_iuse.intersection(orig_use).symmetric_difference(
5006 cur_iuse.intersection(cur_use))
5011 def _create_graph(self, allow_unsatisfied=False):
5012 dep_stack = self._dep_stack
5014 self.spinner.update()
5015 dep = dep_stack.pop()
5016 if isinstance(dep, Package):
5017 if not self._add_pkg_deps(dep,
5018 allow_unsatisfied=allow_unsatisfied):
5021 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5025 def _add_dep(self, dep, allow_unsatisfied=False):
5026 debug = "--debug" in self.myopts
5027 buildpkgonly = "--buildpkgonly" in self.myopts
5028 nodeps = "--nodeps" in self.myopts
5029 empty = "empty" in self.myparams
5030 deep = "deep" in self.myparams
5031 update = "--update" in self.myopts and dep.depth <= 1
5033 if not buildpkgonly and \
5035 dep.parent not in self._slot_collision_nodes:
5036 if dep.parent.onlydeps:
5037 # It's safe to ignore blockers if the
5038 # parent is an --onlydeps node.
5040 # The blocker applies to the root where
5041 # the parent is or will be installed.
5042 blocker = Blocker(atom=dep.atom,
5043 eapi=dep.parent.metadata["EAPI"],
5044 root=dep.parent.root)
5045 self._blocker_parents.add(blocker, dep.parent)
5047 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5048 onlydeps=dep.onlydeps)
5050 if dep.priority.optional:
5051 # This could be an unecessary build-time dep
5052 # pulled in by --with-bdeps=y.
5054 if allow_unsatisfied:
5055 self._unsatisfied_deps.append(dep)
5057 self._unsatisfied_deps_for_display.append(
5058 ((dep.root, dep.atom), {"myparent":dep.parent}))
5060 # In some cases, dep_check will return deps that shouldn't
5061 # be proccessed any further, so they are identified and
5062 # discarded here. Try to discard as few as possible since
5063 # discarded dependencies reduce the amount of information
5064 # available for optimization of merge order.
5065 if dep.priority.satisfied and \
5066 not dep_pkg.installed and \
5067 not (existing_node or empty or deep or update):
5069 if dep.root == self.target_root:
5071 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5072 except StopIteration:
5074 except portage.exception.InvalidDependString:
5075 if not dep_pkg.installed:
5076 # This shouldn't happen since the package
5077 # should have been masked.
5080 self._ignored_deps.append(dep)
5083 if not self._add_pkg(dep_pkg, dep):
5087 def _add_pkg(self, pkg, dep):
5094 myparent = dep.parent
5095 priority = dep.priority
5097 if priority is None:
5098 priority = DepPriority()
5100 Fills the digraph with nodes comprised of packages to merge.
5101 mybigkey is the package spec of the package to merge.
5102 myparent is the package depending on mybigkey ( or None )
5103 addme = Should we add this package to the digraph or are we just looking at it's deps?
5104 Think --onlydeps, we need to ignore packages in that case.
5107 #IUSE-aware emerge -> USE DEP aware depgraph
5108 #"no downgrade" emerge
5110 # Ensure that the dependencies of the same package
5111 # are never processed more than once.
5112 previously_added = pkg in self.digraph
5114 # select the correct /var database that we'll be checking against
5115 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5116 pkgsettings = self.pkgsettings[pkg.root]
5121 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5122 except portage.exception.InvalidDependString, e:
5123 if not pkg.installed:
5124 show_invalid_depstring_notice(
5125 pkg, pkg.metadata["PROVIDE"], str(e))
5129 if not pkg.onlydeps:
5130 if not pkg.installed and \
5131 "empty" not in self.myparams and \
5132 vardbapi.match(pkg.slot_atom):
5133 # Increase the priority of dependencies on packages that
5134 # are being rebuilt. This optimizes merge order so that
5135 # dependencies are rebuilt/updated as soon as possible,
5136 # which is needed especially when emerge is called by
5137 # revdep-rebuild since dependencies may be affected by ABI
5138 # breakage that has rendered them useless. Don't adjust
5139 # priority here when in "empty" mode since all packages
5140 # are being merged in that case.
5141 priority.rebuild = True
5143 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5144 slot_collision = False
5146 existing_node_matches = pkg.cpv == existing_node.cpv
5147 if existing_node_matches and \
5148 pkg != existing_node and \
5149 dep.atom is not None:
5150 # Use package set for matching since it will match via
5151 # PROVIDE when necessary, while match_from_list does not.
5152 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5153 if not atom_set.findAtomForPackage(existing_node):
5154 existing_node_matches = False
5155 if existing_node_matches:
5156 # The existing node can be reused.
5158 for parent_atom in arg_atoms:
5159 parent, atom = parent_atom
5160 self.digraph.add(existing_node, parent,
5162 self._add_parent_atom(existing_node, parent_atom)
5163 # If a direct circular dependency is not an unsatisfied
5164 # buildtime dependency then drop it here since otherwise
5165 # it can skew the merge order calculation in an unwanted
5167 if existing_node != myparent or \
5168 (priority.buildtime and not priority.satisfied):
5169 self.digraph.addnode(existing_node, myparent,
5171 if dep.atom is not None and dep.parent is not None:
5172 self._add_parent_atom(existing_node,
5173 (dep.parent, dep.atom))
5177 # A slot collision has occurred. Sometimes this coincides
5178 # with unresolvable blockers, so the slot collision will be
5179 # shown later if there are no unresolvable blockers.
5180 self._add_slot_conflict(pkg)
5181 slot_collision = True
5184 # Now add this node to the graph so that self.display()
5185 # can show use flags and --tree portage.output. This node is
5186 # only being partially added to the graph. It must not be
5187 # allowed to interfere with the other nodes that have been
5188 # added. Do not overwrite data for existing nodes in
5189 # self.mydbapi since that data will be used for blocker
5191 # Even though the graph is now invalid, continue to process
5192 # dependencies so that things like --fetchonly can still
5193 # function despite collisions.
5195 elif not previously_added:
5196 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5197 self.mydbapi[pkg.root].cpv_inject(pkg)
5198 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5200 if not pkg.installed:
5201 # Allow this package to satisfy old-style virtuals in case it
5202 # doesn't already. Any pre-existing providers will be preferred
5205 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5206 # For consistency, also update the global virtuals.
5207 settings = self.roots[pkg.root].settings
5209 settings.setinst(pkg.cpv, pkg.metadata)
5211 except portage.exception.InvalidDependString, e:
5212 show_invalid_depstring_notice(
5213 pkg, pkg.metadata["PROVIDE"], str(e))
5218 self._set_nodes.add(pkg)
5220 # Do this even when addme is False (--onlydeps) so that the
5221 # parent/child relationship is always known in case
5222 # self._show_slot_collision_notice() needs to be called later.
5223 self.digraph.add(pkg, myparent, priority=priority)
5224 if dep.atom is not None and dep.parent is not None:
5225 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5228 for parent_atom in arg_atoms:
5229 parent, atom = parent_atom
5230 self.digraph.add(pkg, parent, priority=priority)
5231 self._add_parent_atom(pkg, parent_atom)
5233 """ This section determines whether we go deeper into dependencies or not.
5234 We want to go deeper on a few occasions:
5235 Installing package A, we need to make sure package A's deps are met.
5236 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5237 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5239 dep_stack = self._dep_stack
5240 if "recurse" not in self.myparams:
5242 elif pkg.installed and \
5243 "deep" not in self.myparams:
5244 dep_stack = self._ignored_deps
5246 self.spinner.update()
5251 if not previously_added:
5252 dep_stack.append(pkg)
5255 def _add_parent_atom(self, pkg, parent_atom):
5256 parent_atoms = self._parent_atoms.get(pkg)
5257 if parent_atoms is None:
5258 parent_atoms = set()
5259 self._parent_atoms[pkg] = parent_atoms
5260 parent_atoms.add(parent_atom)
5262 def _add_slot_conflict(self, pkg):
5263 self._slot_collision_nodes.add(pkg)
5264 slot_key = (pkg.slot_atom, pkg.root)
5265 slot_nodes = self._slot_collision_info.get(slot_key)
5266 if slot_nodes is None:
5268 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5269 self._slot_collision_info[slot_key] = slot_nodes
5272 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5274 mytype = pkg.type_name
5277 metadata = pkg.metadata
5278 myuse = pkg.use.enabled
5280 depth = pkg.depth + 1
5281 removal_action = "remove" in self.myparams
5284 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5286 edepend[k] = metadata[k]
5288 if not pkg.built and \
5289 "--buildpkgonly" in self.myopts and \
5290 "deep" not in self.myparams and \
5291 "empty" not in self.myparams:
5292 edepend["RDEPEND"] = ""
5293 edepend["PDEPEND"] = ""
5294 bdeps_optional = False
5296 if pkg.built and not removal_action:
5297 if self.myopts.get("--with-bdeps", "n") == "y":
5298 # Pull in build time deps as requested, but marked them as
5299 # "optional" since they are not strictly required. This allows
5300 # more freedom in the merge order calculation for solving
5301 # circular dependencies. Don't convert to PDEPEND since that
5302 # could make --with-bdeps=y less effective if it is used to
5303 # adjust merge order to prevent built_with_use() calls from
5305 bdeps_optional = True
5307 # built packages do not have build time dependencies.
5308 edepend["DEPEND"] = ""
5310 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5311 edepend["DEPEND"] = ""
5314 root_deps = self.myopts.get("--root-deps")
5315 if root_deps is not None:
5316 if root_deps is True:
5318 elif root_deps == "rdeps":
5319 edepend["DEPEND"] = ""
5322 (bdeps_root, edepend["DEPEND"],
5323 self._priority(buildtime=(not bdeps_optional),
5324 optional=bdeps_optional)),
5325 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5326 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5329 debug = "--debug" in self.myopts
5330 strict = mytype != "installed"
5332 for dep_root, dep_string, dep_priority in deps:
5337 print "Parent: ", jbigkey
5338 print "Depstring:", dep_string
5339 print "Priority:", dep_priority
5340 vardb = self.roots[dep_root].trees["vartree"].dbapi
5342 selected_atoms = self._select_atoms(dep_root,
5343 dep_string, myuse=myuse, parent=pkg, strict=strict,
5344 priority=dep_priority)
5345 except portage.exception.InvalidDependString, e:
5346 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5349 print "Candidates:", selected_atoms
5351 for atom in selected_atoms:
5354 atom = portage.dep.Atom(atom)
5356 mypriority = dep_priority.copy()
5357 if not atom.blocker and vardb.match(atom):
5358 mypriority.satisfied = True
5360 if not self._add_dep(Dependency(atom=atom,
5361 blocker=atom.blocker, depth=depth, parent=pkg,
5362 priority=mypriority, root=dep_root),
5363 allow_unsatisfied=allow_unsatisfied):
5366 except portage.exception.InvalidAtom, e:
5367 show_invalid_depstring_notice(
5368 pkg, dep_string, str(e))
5370 if not pkg.installed:
5374 print "Exiting...", jbigkey
5375 except portage.exception.AmbiguousPackageName, e:
5377 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5378 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5380 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5381 portage.writemsg("\n", noiselevel=-1)
5382 if mytype == "binary":
5384 "!!! This binary package cannot be installed: '%s'\n" % \
5385 mykey, noiselevel=-1)
5386 elif mytype == "ebuild":
5387 portdb = self.roots[myroot].trees["porttree"].dbapi
5388 myebuild, mylocation = portdb.findname2(mykey)
5389 portage.writemsg("!!! This ebuild cannot be installed: " + \
5390 "'%s'\n" % myebuild, noiselevel=-1)
5391 portage.writemsg("!!! Please notify the package maintainer " + \
5392 "that atoms must be fully-qualified.\n", noiselevel=-1)
5396 def _priority(self, **kwargs):
5397 if "remove" in self.myparams:
5398 priority_constructor = UnmergeDepPriority
5400 priority_constructor = DepPriority
5401 return priority_constructor(**kwargs)
5403 def _dep_expand(self, root_config, atom_without_category):
5405 @param root_config: a root config instance
5406 @type root_config: RootConfig
5407 @param atom_without_category: an atom without a category component
5408 @type atom_without_category: String
5410 @returns: a list of atoms containing categories (possibly empty)
5412 null_cp = portage.dep_getkey(insert_category_into_atom(
5413 atom_without_category, "null"))
5414 cat, atom_pn = portage.catsplit(null_cp)
5416 dbs = self._filtered_trees[root_config.root]["dbs"]
5418 for db, pkg_type, built, installed, db_keys in dbs:
5419 for cat in db.categories:
5420 if db.cp_list("%s/%s" % (cat, atom_pn)):
5424 for cat in categories:
5425 deps.append(insert_category_into_atom(
5426 atom_without_category, cat))
5429 def _have_new_virt(self, root, atom_cp):
5431 for db, pkg_type, built, installed, db_keys in \
5432 self._filtered_trees[root]["dbs"]:
5433 if db.cp_list(atom_cp):
5438 def _iter_atoms_for_pkg(self, pkg):
5439 # TODO: add multiple $ROOT support
5440 if pkg.root != self.target_root:
5442 atom_arg_map = self._atom_arg_map
5443 root_config = self.roots[pkg.root]
5444 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5445 atom_cp = portage.dep_getkey(atom)
5446 if atom_cp != pkg.cp and \
5447 self._have_new_virt(pkg.root, atom_cp):
5449 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5450 visible_pkgs.reverse() # descending order
5452 for visible_pkg in visible_pkgs:
5453 if visible_pkg.cp != atom_cp:
5455 if pkg >= visible_pkg:
5456 # This is descending order, and we're not
5457 # interested in any versions <= pkg given.
5459 if pkg.slot_atom != visible_pkg.slot_atom:
5460 higher_slot = visible_pkg
5462 if higher_slot is not None:
5464 for arg in atom_arg_map[(atom, pkg.root)]:
5465 if isinstance(arg, PackageArg) and \
5470 def select_files(self, myfiles):
5471 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5472 appropriate depgraph and return a favorite list."""
5473 debug = "--debug" in self.myopts
5474 root_config = self.roots[self.target_root]
5475 sets = root_config.sets
5476 getSetAtoms = root_config.setconfig.getSetAtoms
5478 myroot = self.target_root
5479 dbs = self._filtered_trees[myroot]["dbs"]
5480 vardb = self.trees[myroot]["vartree"].dbapi
5481 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5482 portdb = self.trees[myroot]["porttree"].dbapi
5483 bindb = self.trees[myroot]["bintree"].dbapi
5484 pkgsettings = self.pkgsettings[myroot]
5486 onlydeps = "--onlydeps" in self.myopts
5489 ext = os.path.splitext(x)[1]
5491 if not os.path.exists(x):
5493 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5494 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5495 elif os.path.exists(
5496 os.path.join(pkgsettings["PKGDIR"], x)):
5497 x = os.path.join(pkgsettings["PKGDIR"], x)
5499 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5500 print "!!! Please ensure the tbz2 exists as specified.\n"
5501 return 0, myfavorites
5502 mytbz2=portage.xpak.tbz2(x)
5503 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5504 if os.path.realpath(x) != \
5505 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5506 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5507 return 0, myfavorites
5508 db_keys = list(bindb._aux_cache_keys)
5509 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5510 pkg = Package(type_name="binary", root_config=root_config,
5511 cpv=mykey, built=True, metadata=metadata,
5513 self._pkg_cache[pkg] = pkg
5514 args.append(PackageArg(arg=x, package=pkg,
5515 root_config=root_config))
5516 elif ext==".ebuild":
5517 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5518 pkgdir = os.path.dirname(ebuild_path)
5519 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5520 cp = pkgdir[len(tree_root)+1:]
5521 e = portage.exception.PackageNotFound(
5522 ("%s is not in a valid portage tree " + \
5523 "hierarchy or does not exist") % x)
5524 if not portage.isvalidatom(cp):
5526 cat = portage.catsplit(cp)[0]
5527 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5528 if not portage.isvalidatom("="+mykey):
5530 ebuild_path = portdb.findname(mykey)
5532 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5533 cp, os.path.basename(ebuild_path)):
5534 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5535 return 0, myfavorites
5536 if mykey not in portdb.xmatch(
5537 "match-visible", portage.dep_getkey(mykey)):
5538 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5539 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5540 print colorize("BAD", "*** page for details.")
5541 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5544 raise portage.exception.PackageNotFound(
5545 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5546 db_keys = list(portdb._aux_cache_keys)
5547 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5548 pkg = Package(type_name="ebuild", root_config=root_config,
5549 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5550 pkgsettings.setcpv(pkg)
5551 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5552 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5553 self._pkg_cache[pkg] = pkg
5554 args.append(PackageArg(arg=x, package=pkg,
5555 root_config=root_config))
5556 elif x.startswith(os.path.sep):
5557 if not x.startswith(myroot):
5558 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5559 " $ROOT.\n") % x, noiselevel=-1)
5561 # Queue these up since it's most efficient to handle
5562 # multiple files in a single iter_owners() call.
5563 lookup_owners.append(x)
5565 if x in ("system", "world"):
5567 if x.startswith(SETPREFIX):
5568 s = x[len(SETPREFIX):]
5570 raise portage.exception.PackageSetNotFound(s)
5573 # Recursively expand sets so that containment tests in
5574 # self._get_parent_sets() properly match atoms in nested
5575 # sets (like if world contains system).
5576 expanded_set = InternalPackageSet(
5577 initial_atoms=getSetAtoms(s))
5578 self._sets[s] = expanded_set
5579 args.append(SetArg(arg=x, set=expanded_set,
5580 root_config=root_config))
5582 if not is_valid_package_atom(x):
5583 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5585 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5586 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5588 # Don't expand categories or old-style virtuals here unless
5589 # necessary. Expansion of old-style virtuals here causes at
5590 # least the following problems:
5591 # 1) It's more difficult to determine which set(s) an atom
5592 # came from, if any.
5593 # 2) It takes away freedom from the resolver to choose other
5594 # possible expansions when necessary.
5596 args.append(AtomArg(arg=x, atom=x,
5597 root_config=root_config))
5599 expanded_atoms = self._dep_expand(root_config, x)
5600 installed_cp_set = set()
5601 for atom in expanded_atoms:
5602 atom_cp = portage.dep_getkey(atom)
5603 if vardb.cp_list(atom_cp):
5604 installed_cp_set.add(atom_cp)
5605 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5606 installed_cp = iter(installed_cp_set).next()
5607 expanded_atoms = [atom for atom in expanded_atoms \
5608 if portage.dep_getkey(atom) == installed_cp]
5610 if len(expanded_atoms) > 1:
5613 ambiguous_package_name(x, expanded_atoms, root_config,
5614 self.spinner, self.myopts)
5615 return False, myfavorites
5617 atom = expanded_atoms[0]
5619 null_atom = insert_category_into_atom(x, "null")
5620 null_cp = portage.dep_getkey(null_atom)
5621 cat, atom_pn = portage.catsplit(null_cp)
5622 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5624 # Allow the depgraph to choose which virtual.
5625 atom = insert_category_into_atom(x, "virtual")
5627 atom = insert_category_into_atom(x, "null")
5629 args.append(AtomArg(arg=x, atom=atom,
5630 root_config=root_config))
5634 search_for_multiple = False
5635 if len(lookup_owners) > 1:
5636 search_for_multiple = True
5638 for x in lookup_owners:
5639 if not search_for_multiple and os.path.isdir(x):
5640 search_for_multiple = True
5641 relative_paths.append(x[len(myroot):])
5644 for pkg, relative_path in \
5645 real_vardb._owners.iter_owners(relative_paths):
5646 owners.add(pkg.mycpv)
5647 if not search_for_multiple:
5651 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5652 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5656 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5658 # portage now masks packages with missing slot, but it's
5659 # possible that one was installed by an older version
5660 atom = portage.cpv_getkey(cpv)
5662 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5663 args.append(AtomArg(arg=atom, atom=atom,
5664 root_config=root_config))
5666 if "--update" in self.myopts:
5667 # In some cases, the greedy slots behavior can pull in a slot that
5668 # the user would want to uninstall due to it being blocked by a
5669 # newer version in a different slot. Therefore, it's necessary to
5670 # detect and discard any that should be uninstalled. Each time
5671 # that arguments are updated, package selections are repeated in
5672 # order to ensure consistency with the current arguments:
5674 # 1) Initialize args
5675 # 2) Select packages and generate initial greedy atoms
5676 # 3) Update args with greedy atoms
5677 # 4) Select packages and generate greedy atoms again, while
5678 # accounting for any blockers between selected packages
5679 # 5) Update args with revised greedy atoms
5681 self._set_args(args)
5684 greedy_args.append(arg)
5685 if not isinstance(arg, AtomArg):
5687 for atom in self._greedy_slots(arg.root_config, arg.atom):
5689 AtomArg(arg=arg.arg, atom=atom,
5690 root_config=arg.root_config))
5692 self._set_args(greedy_args)
5695 # Revise greedy atoms, accounting for any blockers
5696 # between selected packages.
5697 revised_greedy_args = []
5699 revised_greedy_args.append(arg)
5700 if not isinstance(arg, AtomArg):
5702 for atom in self._greedy_slots(arg.root_config, arg.atom,
5703 blocker_lookahead=True):
5704 revised_greedy_args.append(
5705 AtomArg(arg=arg.arg, atom=atom,
5706 root_config=arg.root_config))
5707 args = revised_greedy_args
5708 del revised_greedy_args
5710 self._set_args(args)
5712 myfavorites = set(myfavorites)
5714 if isinstance(arg, (AtomArg, PackageArg)):
5715 myfavorites.add(arg.atom)
5716 elif isinstance(arg, SetArg):
5717 myfavorites.add(arg.arg)
5718 myfavorites = list(myfavorites)
5720 pprovideddict = pkgsettings.pprovideddict
5722 portage.writemsg("\n", noiselevel=-1)
5723 # Order needs to be preserved since a feature of --nodeps
5724 # is to allow the user to force a specific merge order.
5728 for atom in arg.set:
5729 self.spinner.update()
5730 dep = Dependency(atom=atom, onlydeps=onlydeps,
5731 root=myroot, parent=arg)
5732 atom_cp = portage.dep_getkey(atom)
5734 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5735 if pprovided and portage.match_from_list(atom, pprovided):
5736 # A provided package has been specified on the command line.
5737 self._pprovided_args.append((arg, atom))
5739 if isinstance(arg, PackageArg):
5740 if not self._add_pkg(arg.package, dep) or \
5741 not self._create_graph():
5742 sys.stderr.write(("\n\n!!! Problem resolving " + \
5743 "dependencies for %s\n") % arg.arg)
5744 return 0, myfavorites
5747 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5748 (arg, atom), noiselevel=-1)
5749 pkg, existing_node = self._select_package(
5750 myroot, atom, onlydeps=onlydeps)
5752 if not (isinstance(arg, SetArg) and \
5753 arg.name in ("system", "world")):
5754 self._unsatisfied_deps_for_display.append(
5755 ((myroot, atom), {}))
5756 return 0, myfavorites
5757 self._missing_args.append((arg, atom))
5759 if atom_cp != pkg.cp:
5760 # For old-style virtuals, we need to repeat the
5761 # package.provided check against the selected package.
5762 expanded_atom = atom.replace(atom_cp, pkg.cp)
5763 pprovided = pprovideddict.get(pkg.cp)
5765 portage.match_from_list(expanded_atom, pprovided):
5766 # A provided package has been
5767 # specified on the command line.
5768 self._pprovided_args.append((arg, atom))
5770 if pkg.installed and "selective" not in self.myparams:
5771 self._unsatisfied_deps_for_display.append(
5772 ((myroot, atom), {}))
5773 # Previous behavior was to bail out in this case, but
5774 # since the dep is satisfied by the installed package,
5775 # it's more friendly to continue building the graph
5776 # and just show a warning message. Therefore, only bail
5777 # out here if the atom is not from either the system or
5779 if not (isinstance(arg, SetArg) and \
5780 arg.name in ("system", "world")):
5781 return 0, myfavorites
5783 # Add the selected package to the graph as soon as possible
5784 # so that later dep_check() calls can use it as feedback
5785 # for making more consistent atom selections.
5786 if not self._add_pkg(pkg, dep):
5787 if isinstance(arg, SetArg):
5788 sys.stderr.write(("\n\n!!! Problem resolving " + \
5789 "dependencies for %s from %s\n") % \
5792 sys.stderr.write(("\n\n!!! Problem resolving " + \
5793 "dependencies for %s\n") % atom)
5794 return 0, myfavorites
5796 except portage.exception.MissingSignature, e:
5797 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5798 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5799 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5800 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5801 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5802 return 0, myfavorites
5803 except portage.exception.InvalidSignature, e:
5804 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5805 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5806 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5807 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5808 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5809 return 0, myfavorites
5810 except SystemExit, e:
5811 raise # Needed else can't exit
5812 except Exception, e:
5813 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5814 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5817 # Now that the root packages have been added to the graph,
5818 # process the dependencies.
5819 if not self._create_graph():
5820 return 0, myfavorites
5823 if "--usepkgonly" in self.myopts:
5824 for xs in self.digraph.all_nodes():
5825 if not isinstance(xs, Package):
5827 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5831 print "Missing binary for:",xs[2]
5835 except self._unknown_internal_error:
5836 return False, myfavorites
5838 # We're true here unless we are missing binaries.
5839 return (not missing,myfavorites)
5841 def _set_args(self, args):
5843 Create the "args" package set from atoms and packages given as
5844 arguments. This method can be called multiple times if necessary.
5845 The package selection cache is automatically invalidated, since
5846 arguments influence package selections.
5848 args_set = self._sets["args"]
5851 if not isinstance(arg, (AtomArg, PackageArg)):
5854 if atom in args_set:
5858 self._set_atoms.clear()
5859 self._set_atoms.update(chain(*self._sets.itervalues()))
5860 atom_arg_map = self._atom_arg_map
5861 atom_arg_map.clear()
5863 for atom in arg.set:
5864 atom_key = (atom, arg.root_config.root)
5865 refs = atom_arg_map.get(atom_key)
5868 atom_arg_map[atom_key] = refs
5872 # Invalidate the package selection cache, since
5873 # arguments influence package selections.
5874 self._highest_pkg_cache.clear()
5875 for trees in self._filtered_trees.itervalues():
5876 trees["porttree"].dbapi._clear_cache()
5878 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5880 Return a list of slot atoms corresponding to installed slots that
5881 differ from the slot of the highest visible match. When
5882 blocker_lookahead is True, slot atoms that would trigger a blocker
5883 conflict are automatically discarded, potentially allowing automatic
5884 uninstallation of older slots when appropriate.
5886 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5887 if highest_pkg is None:
5889 vardb = root_config.trees["vartree"].dbapi
5891 for cpv in vardb.match(atom):
5892 # don't mix new virtuals with old virtuals
5893 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5894 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5896 slots.add(highest_pkg.metadata["SLOT"])
5900 slots.remove(highest_pkg.metadata["SLOT"])
5903 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5904 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5905 if pkg is not None and \
5906 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5907 greedy_pkgs.append(pkg)
5910 if not blocker_lookahead:
5911 return [pkg.slot_atom for pkg in greedy_pkgs]
5914 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5915 for pkg in greedy_pkgs + [highest_pkg]:
5916 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5918 atoms = self._select_atoms(
5919 pkg.root, dep_str, pkg.use.enabled,
5920 parent=pkg, strict=True)
5921 except portage.exception.InvalidDependString:
5923 blocker_atoms = (x for x in atoms if x.blocker)
5924 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5926 if highest_pkg not in blockers:
5929 # filter packages with invalid deps
5930 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5932 # filter packages that conflict with highest_pkg
5933 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5934 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5935 blockers[pkg].findAtomForPackage(highest_pkg))]
5940 # If two packages conflict, discard the lower version.
5941 discard_pkgs = set()
5942 greedy_pkgs.sort(reverse=True)
5943 for i in xrange(len(greedy_pkgs) - 1):
5944 pkg1 = greedy_pkgs[i]
5945 if pkg1 in discard_pkgs:
5947 for j in xrange(i + 1, len(greedy_pkgs)):
5948 pkg2 = greedy_pkgs[j]
5949 if pkg2 in discard_pkgs:
5951 if blockers[pkg1].findAtomForPackage(pkg2) or \
5952 blockers[pkg2].findAtomForPackage(pkg1):
5954 discard_pkgs.add(pkg2)
5956 return [pkg.slot_atom for pkg in greedy_pkgs \
5957 if pkg not in discard_pkgs]
5959 def _select_atoms_from_graph(self, *pargs, **kwargs):
5961 Prefer atoms matching packages that have already been
5962 added to the graph or those that are installed and have
5963 not been scheduled for replacement.
5965 kwargs["trees"] = self._graph_trees
5966 return self._select_atoms_highest_available(*pargs, **kwargs)
5968 def _select_atoms_highest_available(self, root, depstring,
5969 myuse=None, parent=None, strict=True, trees=None, priority=None):
5970 """This will raise InvalidDependString if necessary. If trees is
5971 None then self._filtered_trees is used."""
5972 pkgsettings = self.pkgsettings[root]
5974 trees = self._filtered_trees
5975 if not getattr(priority, "buildtime", False):
5976 # The parent should only be passed to dep_check() for buildtime
5977 # dependencies since that's the only case when it's appropriate
5978 # to trigger the circular dependency avoidance code which uses it.
5979 # It's important not to trigger the same circular dependency
5980 # avoidance code for runtime dependencies since it's not needed
5981 # and it can promote an incorrect package choice.
5985 if parent is not None:
5986 trees[root]["parent"] = parent
5988 portage.dep._dep_check_strict = False
5989 mycheck = portage.dep_check(depstring, None,
5990 pkgsettings, myuse=myuse,
5991 myroot=root, trees=trees)
5993 if parent is not None:
5994 trees[root].pop("parent")
5995 portage.dep._dep_check_strict = True
5997 raise portage.exception.InvalidDependString(mycheck[1])
5998 selected_atoms = mycheck[1]
5999 return selected_atoms
6001 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6002 atom = portage.dep.Atom(atom)
6003 atom_set = InternalPackageSet(initial_atoms=(atom,))
6004 atom_without_use = atom
6006 atom_without_use = portage.dep.remove_slot(atom)
6008 atom_without_use += ":" + atom.slot
6009 atom_without_use = portage.dep.Atom(atom_without_use)
6010 xinfo = '"%s"' % atom
6013 # Discard null/ from failed cpv_expand category expansion.
6014 xinfo = xinfo.replace("null/", "")
6015 masked_packages = []
6017 masked_pkg_instances = set()
6018 missing_licenses = []
6019 have_eapi_mask = False
6020 pkgsettings = self.pkgsettings[root]
6021 implicit_iuse = pkgsettings._get_implicit_iuse()
6022 root_config = self.roots[root]
6023 portdb = self.roots[root].trees["porttree"].dbapi
6024 dbs = self._filtered_trees[root]["dbs"]
6025 for db, pkg_type, built, installed, db_keys in dbs:
6029 if hasattr(db, "xmatch"):
6030 cpv_list = db.xmatch("match-all", atom_without_use)
6032 cpv_list = db.match(atom_without_use)
6035 for cpv in cpv_list:
6036 metadata, mreasons = get_mask_info(root_config, cpv,
6037 pkgsettings, db, pkg_type, built, installed, db_keys)
6038 if metadata is not None:
6039 pkg = Package(built=built, cpv=cpv,
6040 installed=installed, metadata=metadata,
6041 root_config=root_config)
6042 if pkg.cp != atom.cp:
6043 # A cpv can be returned from dbapi.match() as an
6044 # old-style virtual match even in cases when the
6045 # package does not actually PROVIDE the virtual.
6046 # Filter out any such false matches here.
6047 if not atom_set.findAtomForPackage(pkg):
6050 masked_pkg_instances.add(pkg)
6052 missing_use.append(pkg)
6055 masked_packages.append(
6056 (root_config, pkgsettings, cpv, metadata, mreasons))
6058 missing_use_reasons = []
6059 missing_iuse_reasons = []
6060 for pkg in missing_use:
6061 use = pkg.use.enabled
6062 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6063 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6065 for x in atom.use.required:
6066 if iuse_re.match(x) is None:
6067 missing_iuse.append(x)
6070 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6071 missing_iuse_reasons.append((pkg, mreasons))
6073 need_enable = sorted(atom.use.enabled.difference(use))
6074 need_disable = sorted(atom.use.disabled.intersection(use))
6075 if need_enable or need_disable:
6077 changes.extend(colorize("red", "+" + x) \
6078 for x in need_enable)
6079 changes.extend(colorize("blue", "-" + x) \
6080 for x in need_disable)
6081 mreasons.append("Change USE: %s" % " ".join(changes))
6082 missing_use_reasons.append((pkg, mreasons))
6084 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6085 in missing_use_reasons if pkg not in masked_pkg_instances]
6087 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6088 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6090 show_missing_use = False
6091 if unmasked_use_reasons:
6092 # Only show the latest version.
6093 show_missing_use = unmasked_use_reasons[:1]
6094 elif unmasked_iuse_reasons:
6095 if missing_use_reasons:
6096 # All packages with required IUSE are masked,
6097 # so display a normal masking message.
6100 show_missing_use = unmasked_iuse_reasons
6102 if show_missing_use:
6103 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6104 print "!!! One of the following packages is required to complete your request:"
6105 for pkg, mreasons in show_missing_use:
6106 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6108 elif masked_packages:
6110 colorize("BAD", "All ebuilds that could satisfy ") + \
6111 colorize("INFORM", xinfo) + \
6112 colorize("BAD", " have been masked.")
6113 print "!!! One of the following masked packages is required to complete your request:"
6114 have_eapi_mask = show_masked_packages(masked_packages)
6117 msg = ("The current version of portage supports " + \
6118 "EAPI '%s'. You must upgrade to a newer version" + \
6119 " of portage before EAPI masked packages can" + \
6120 " be installed.") % portage.const.EAPI
6121 from textwrap import wrap
6122 for line in wrap(msg, 75):
6127 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6129 # Show parent nodes and the argument that pulled them in.
6130 traversed_nodes = set()
6133 while node is not None:
6134 traversed_nodes.add(node)
6135 msg.append('(dependency required by "%s" [%s])' % \
6136 (colorize('INFORM', str(node.cpv)), node.type_name))
6137 # When traversing to parents, prefer arguments over packages
6138 # since arguments are root nodes. Never traverse the same
6139 # package twice, in order to prevent an infinite loop.
6140 selected_parent = None
6141 for parent in self.digraph.parent_nodes(node):
6142 if isinstance(parent, DependencyArg):
6143 msg.append('(dependency required by "%s" [argument])' % \
6144 (colorize('INFORM', str(parent))))
6145 selected_parent = None
6147 if parent not in traversed_nodes:
6148 selected_parent = parent
6149 node = selected_parent
6155 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6156 cache_key = (root, atom, onlydeps)
6157 ret = self._highest_pkg_cache.get(cache_key)
6160 if pkg and not existing:
6161 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6162 if existing and existing == pkg:
6163 # Update the cache to reflect that the
6164 # package has been added to the graph.
6166 self._highest_pkg_cache[cache_key] = ret
6168 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6169 self._highest_pkg_cache[cache_key] = ret
6172 settings = pkg.root_config.settings
6173 if visible(settings, pkg) and not (pkg.installed and \
6174 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6175 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6178 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6179 root_config = self.roots[root]
6180 pkgsettings = self.pkgsettings[root]
6181 dbs = self._filtered_trees[root]["dbs"]
6182 vardb = self.roots[root].trees["vartree"].dbapi
6183 portdb = self.roots[root].trees["porttree"].dbapi
6184 # List of acceptable packages, ordered by type preference.
6185 matched_packages = []
6186 highest_version = None
6187 if not isinstance(atom, portage.dep.Atom):
6188 atom = portage.dep.Atom(atom)
6190 atom_set = InternalPackageSet(initial_atoms=(atom,))
6191 existing_node = None
6193 usepkgonly = "--usepkgonly" in self.myopts
6194 empty = "empty" in self.myparams
6195 selective = "selective" in self.myparams
6197 noreplace = "--noreplace" in self.myopts
6198 # Behavior of the "selective" parameter depends on
6199 # whether or not a package matches an argument atom.
6200 # If an installed package provides an old-style
6201 # virtual that is no longer provided by an available
6202 # package, the installed package may match an argument
6203 # atom even though none of the available packages do.
6204 # Therefore, "selective" logic does not consider
6205 # whether or not an installed package matches an
6206 # argument atom. It only considers whether or not
6207 # available packages match argument atoms, which is
6208 # represented by the found_available_arg flag.
6209 found_available_arg = False
6210 for find_existing_node in True, False:
6213 for db, pkg_type, built, installed, db_keys in dbs:
6216 if installed and not find_existing_node:
6217 want_reinstall = reinstall or empty or \
6218 (found_available_arg and not selective)
6219 if want_reinstall and matched_packages:
6221 if hasattr(db, "xmatch"):
6222 cpv_list = db.xmatch("match-all", atom)
6224 cpv_list = db.match(atom)
6226 # USE=multislot can make an installed package appear as if
6227 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6228 # won't do any good as long as USE=multislot is enabled since
6229 # the newly built package still won't have the expected slot.
6230 # Therefore, assume that such SLOT dependencies are already
6231 # satisfied rather than forcing a rebuild.
6232 if installed and not cpv_list and atom.slot:
6233 for cpv in db.match(atom.cp):
6234 slot_available = False
6235 for other_db, other_type, other_built, \
6236 other_installed, other_keys in dbs:
6239 other_db.aux_get(cpv, ["SLOT"])[0]:
6240 slot_available = True
6244 if not slot_available:
6246 inst_pkg = self._pkg(cpv, "installed",
6247 root_config, installed=installed)
6248 # Remove the slot from the atom and verify that
6249 # the package matches the resulting atom.
6250 atom_without_slot = portage.dep.remove_slot(atom)
6252 atom_without_slot += str(atom.use)
6253 atom_without_slot = portage.dep.Atom(atom_without_slot)
6254 if portage.match_from_list(
6255 atom_without_slot, [inst_pkg]):
6256 cpv_list = [inst_pkg.cpv]
6261 pkg_status = "merge"
6262 if installed or onlydeps:
6263 pkg_status = "nomerge"
6266 for cpv in cpv_list:
6267 # Make --noreplace take precedence over --newuse.
6268 if not installed and noreplace and \
6269 cpv in vardb.match(atom):
6270 # If the installed version is masked, it may
6271 # be necessary to look at lower versions,
6272 # in case there is a visible downgrade.
6274 reinstall_for_flags = None
6275 cache_key = (pkg_type, root, cpv, pkg_status)
6276 calculated_use = True
6277 pkg = self._pkg_cache.get(cache_key)
6279 calculated_use = False
6281 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6284 pkg = Package(built=built, cpv=cpv,
6285 installed=installed, metadata=metadata,
6286 onlydeps=onlydeps, root_config=root_config,
6288 metadata = pkg.metadata
6290 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6291 if not built and ("?" in metadata["LICENSE"] or \
6292 "?" in metadata["PROVIDE"]):
6293 # This is avoided whenever possible because
6294 # it's expensive. It only needs to be done here
6295 # if it has an effect on visibility.
6296 pkgsettings.setcpv(pkg)
6297 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6298 calculated_use = True
6299 self._pkg_cache[pkg] = pkg
6301 if not installed or (built and matched_packages):
6302 # Only enforce visibility on installed packages
6303 # if there is at least one other visible package
6304 # available. By filtering installed masked packages
6305 # here, packages that have been masked since they
6306 # were installed can be automatically downgraded
6307 # to an unmasked version.
6309 if not visible(pkgsettings, pkg):
6311 except portage.exception.InvalidDependString:
6315 # Enable upgrade or downgrade to a version
6316 # with visible KEYWORDS when the installed
6317 # version is masked by KEYWORDS, but never
6318 # reinstall the same exact version only due
6319 # to a KEYWORDS mask.
6320 if built and matched_packages:
6322 different_version = None
6323 for avail_pkg in matched_packages:
6324 if not portage.dep.cpvequal(
6325 pkg.cpv, avail_pkg.cpv):
6326 different_version = avail_pkg
6328 if different_version is not None:
6331 pkgsettings._getMissingKeywords(
6332 pkg.cpv, pkg.metadata):
6335 # If the ebuild no longer exists or it's
6336 # keywords have been dropped, reject built
6337 # instances (installed or binary).
6338 # If --usepkgonly is enabled, assume that
6339 # the ebuild status should be ignored.
6343 pkg.cpv, "ebuild", root_config)
6344 except portage.exception.PackageNotFound:
6347 if not visible(pkgsettings, pkg_eb):
6350 if not pkg.built and not calculated_use:
6351 # This is avoided whenever possible because
6353 pkgsettings.setcpv(pkg)
6354 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6356 if pkg.cp != atom.cp:
6357 # A cpv can be returned from dbapi.match() as an
6358 # old-style virtual match even in cases when the
6359 # package does not actually PROVIDE the virtual.
6360 # Filter out any such false matches here.
6361 if not atom_set.findAtomForPackage(pkg):
6365 if root == self.target_root:
6367 # Ebuild USE must have been calculated prior
6368 # to this point, in case atoms have USE deps.
6369 myarg = self._iter_atoms_for_pkg(pkg).next()
6370 except StopIteration:
6372 except portage.exception.InvalidDependString:
6374 # masked by corruption
6376 if not installed and myarg:
6377 found_available_arg = True
6379 if atom.use and not pkg.built:
6380 use = pkg.use.enabled
6381 if atom.use.enabled.difference(use):
6383 if atom.use.disabled.intersection(use):
6385 if pkg.cp == atom_cp:
6386 if highest_version is None:
6387 highest_version = pkg
6388 elif pkg > highest_version:
6389 highest_version = pkg
6390 # At this point, we've found the highest visible
6391 # match from the current repo. Any lower versions
6392 # from this repo are ignored, so this so the loop
6393 # will always end with a break statement below
6395 if find_existing_node:
6396 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6399 if portage.dep.match_from_list(atom, [e_pkg]):
6400 if highest_version and \
6401 e_pkg.cp == atom_cp and \
6402 e_pkg < highest_version and \
6403 e_pkg.slot_atom != highest_version.slot_atom:
6404 # There is a higher version available in a
6405 # different slot, so this existing node is
6409 matched_packages.append(e_pkg)
6410 existing_node = e_pkg
6412 # Compare built package to current config and
6413 # reject the built package if necessary.
6414 if built and not installed and \
6415 ("--newuse" in self.myopts or \
6416 "--reinstall" in self.myopts):
6417 iuses = pkg.iuse.all
6418 old_use = pkg.use.enabled
6420 pkgsettings.setcpv(myeb)
6422 pkgsettings.setcpv(pkg)
6423 now_use = pkgsettings["PORTAGE_USE"].split()
6424 forced_flags = set()
6425 forced_flags.update(pkgsettings.useforce)
6426 forced_flags.update(pkgsettings.usemask)
6428 if myeb and not usepkgonly:
6429 cur_iuse = myeb.iuse.all
6430 if self._reinstall_for_flags(forced_flags,
6434 # Compare current config to installed package
6435 # and do not reinstall if possible.
6436 if not installed and \
6437 ("--newuse" in self.myopts or \
6438 "--reinstall" in self.myopts) and \
6439 cpv in vardb.match(atom):
6440 pkgsettings.setcpv(pkg)
6441 forced_flags = set()
6442 forced_flags.update(pkgsettings.useforce)
6443 forced_flags.update(pkgsettings.usemask)
6444 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6445 old_iuse = set(filter_iuse_defaults(
6446 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6447 cur_use = pkg.use.enabled
6448 cur_iuse = pkg.iuse.all
6449 reinstall_for_flags = \
6450 self._reinstall_for_flags(
6451 forced_flags, old_use, old_iuse,
6453 if reinstall_for_flags:
6457 matched_packages.append(pkg)
6458 if reinstall_for_flags:
6459 self._reinstall_nodes[pkg] = \
6463 if not matched_packages:
6466 if "--debug" in self.myopts:
6467 for pkg in matched_packages:
6468 portage.writemsg("%s %s\n" % \
6469 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6471 # Filter out any old-style virtual matches if they are
6472 # mixed with new-style virtual matches.
6473 cp = portage.dep_getkey(atom)
6474 if len(matched_packages) > 1 and \
6475 "virtual" == portage.catsplit(cp)[0]:
6476 for pkg in matched_packages:
6479 # Got a new-style virtual, so filter
6480 # out any old-style virtuals.
6481 matched_packages = [pkg for pkg in matched_packages \
6485 if len(matched_packages) > 1:
6486 bestmatch = portage.best(
6487 [pkg.cpv for pkg in matched_packages])
6488 matched_packages = [pkg for pkg in matched_packages \
6489 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6491 # ordered by type preference ("ebuild" type is the last resort)
6492 return matched_packages[-1], existing_node
6494 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6496 Select packages that have already been added to the graph or
6497 those that are installed and have not been scheduled for
6500 graph_db = self._graph_trees[root]["porttree"].dbapi
6501 matches = graph_db.match_pkgs(atom)
6504 pkg = matches[-1] # highest match
6505 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6506 return pkg, in_graph
6508 def _complete_graph(self):
6510 Add any deep dependencies of required sets (args, system, world) that
6511 have not been pulled into the graph yet. This ensures that the graph
6512 is consistent such that initially satisfied deep dependencies are not
6513 broken in the new graph. Initially unsatisfied dependencies are
6514 irrelevant since we only want to avoid breaking dependencies that are
6517 Since this method can consume enough time to disturb users, it is
6518 currently only enabled by the --complete-graph option.
6520 if "--buildpkgonly" in self.myopts or \
6521 "recurse" not in self.myparams:
6524 if "complete" not in self.myparams:
6525 # Skip this to avoid consuming enough time to disturb users.
6528 # Put the depgraph into a mode that causes it to only
6529 # select packages that have already been added to the
6530 # graph or those that are installed and have not been
6531 # scheduled for replacement. Also, toggle the "deep"
6532 # parameter so that all dependencies are traversed and
6534 self._select_atoms = self._select_atoms_from_graph
6535 self._select_package = self._select_pkg_from_graph
6536 already_deep = "deep" in self.myparams
6537 if not already_deep:
6538 self.myparams.add("deep")
6540 for root in self.roots:
6541 required_set_names = self._required_set_names.copy()
6542 if root == self.target_root and \
6543 (already_deep or "empty" in self.myparams):
6544 required_set_names.difference_update(self._sets)
6545 if not required_set_names and not self._ignored_deps:
6547 root_config = self.roots[root]
6548 setconfig = root_config.setconfig
6550 # Reuse existing SetArg instances when available.
6551 for arg in self.digraph.root_nodes():
6552 if not isinstance(arg, SetArg):
6554 if arg.root_config != root_config:
6556 if arg.name in required_set_names:
6558 required_set_names.remove(arg.name)
6559 # Create new SetArg instances only when necessary.
6560 for s in required_set_names:
6561 expanded_set = InternalPackageSet(
6562 initial_atoms=setconfig.getSetAtoms(s))
6563 atom = SETPREFIX + s
6564 args.append(SetArg(arg=atom, set=expanded_set,
6565 root_config=root_config))
6566 vardb = root_config.trees["vartree"].dbapi
6568 for atom in arg.set:
6569 self._dep_stack.append(
6570 Dependency(atom=atom, root=root, parent=arg))
6571 if self._ignored_deps:
6572 self._dep_stack.extend(self._ignored_deps)
6573 self._ignored_deps = []
6574 if not self._create_graph(allow_unsatisfied=True):
6576 # Check the unsatisfied deps to see if any initially satisfied deps
6577 # will become unsatisfied due to an upgrade. Initially unsatisfied
6578 # deps are irrelevant since we only want to avoid breaking deps
6579 # that are initially satisfied.
6580 while self._unsatisfied_deps:
6581 dep = self._unsatisfied_deps.pop()
6582 matches = vardb.match_pkgs(dep.atom)
6584 self._initially_unsatisfied_deps.append(dep)
6586 # An scheduled installation broke a deep dependency.
6587 # Add the installed package to the graph so that it
6588 # will be appropriately reported as a slot collision
6589 # (possibly solvable via backtracking).
6590 pkg = matches[-1] # highest match
6591 if not self._add_pkg(pkg, dep):
6593 if not self._create_graph(allow_unsatisfied=True):
6597 def _pkg(self, cpv, type_name, root_config, installed=False):
6599 Get a package instance from the cache, or create a new
6600 one if necessary. Raises KeyError from aux_get if it
6601 failures for some reason (package does not exist or is
6606 operation = "nomerge"
6607 pkg = self._pkg_cache.get(
6608 (type_name, root_config.root, cpv, operation))
6610 tree_type = self.pkg_tree_map[type_name]
6611 db = root_config.trees[tree_type].dbapi
6612 db_keys = list(self._trees_orig[root_config.root][
6613 tree_type].dbapi._aux_cache_keys)
6615 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6617 raise portage.exception.PackageNotFound(cpv)
6618 pkg = Package(cpv=cpv, metadata=metadata,
6619 root_config=root_config, installed=installed)
6620 if type_name == "ebuild":
6621 settings = self.pkgsettings[root_config.root]
6622 settings.setcpv(pkg)
6623 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6624 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6625 self._pkg_cache[pkg] = pkg
6628 def validate_blockers(self):
6629 """Remove any blockers from the digraph that do not match any of the
6630 packages within the graph. If necessary, create hard deps to ensure
6631 correct merge order such that mutually blocking packages are never
6632 installed simultaneously."""
6634 if "--buildpkgonly" in self.myopts or \
6635 "--nodeps" in self.myopts:
6638 #if "deep" in self.myparams:
6640 # Pull in blockers from all installed packages that haven't already
6641 # been pulled into the depgraph. This is not enabled by default
6642 # due to the performance penalty that is incurred by all the
6643 # additional dep_check calls that are required.
6645 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6646 for myroot in self.trees:
6647 vardb = self.trees[myroot]["vartree"].dbapi
6648 portdb = self.trees[myroot]["porttree"].dbapi
6649 pkgsettings = self.pkgsettings[myroot]
6650 final_db = self.mydbapi[myroot]
6652 blocker_cache = BlockerCache(myroot, vardb)
6653 stale_cache = set(blocker_cache)
6656 stale_cache.discard(cpv)
6657 pkg_in_graph = self.digraph.contains(pkg)
6659 # Check for masked installed packages. Only warn about
6660 # packages that are in the graph in order to avoid warning
6661 # about those that will be automatically uninstalled during
6662 # the merge process or by --depclean.
6664 if pkg_in_graph and not visible(pkgsettings, pkg):
6665 self._masked_installed.add(pkg)
6667 blocker_atoms = None
6673 self._blocker_parents.child_nodes(pkg))
6678 self._irrelevant_blockers.child_nodes(pkg))
6681 if blockers is not None:
6682 blockers = set(str(blocker.atom) \
6683 for blocker in blockers)
6685 # If this node has any blockers, create a "nomerge"
6686 # node for it so that they can be enforced.
6687 self.spinner.update()
6688 blocker_data = blocker_cache.get(cpv)
6689 if blocker_data is not None and \
6690 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6693 # If blocker data from the graph is available, use
6694 # it to validate the cache and update the cache if
6696 if blocker_data is not None and \
6697 blockers is not None:
6698 if not blockers.symmetric_difference(
6699 blocker_data.atoms):
6703 if blocker_data is None and \
6704 blockers is not None:
6705 # Re-use the blockers from the graph.
6706 blocker_atoms = sorted(blockers)
6707 counter = long(pkg.metadata["COUNTER"])
6709 blocker_cache.BlockerData(counter, blocker_atoms)
6710 blocker_cache[pkg.cpv] = blocker_data
6714 blocker_atoms = blocker_data.atoms
6716 # Use aux_get() to trigger FakeVartree global
6717 # updates on *DEPEND when appropriate.
6718 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6719 # It is crucial to pass in final_db here in order to
6720 # optimize dep_check calls by eliminating atoms via
6721 # dep_wordreduce and dep_eval calls.
6723 portage.dep._dep_check_strict = False
6725 success, atoms = portage.dep_check(depstr,
6726 final_db, pkgsettings, myuse=pkg.use.enabled,
6727 trees=self._graph_trees, myroot=myroot)
6728 except Exception, e:
6729 if isinstance(e, SystemExit):
6731 # This is helpful, for example, if a ValueError
6732 # is thrown from cpv_expand due to multiple
6733 # matches (this can happen if an atom lacks a
6735 show_invalid_depstring_notice(
6736 pkg, depstr, str(e))
6740 portage.dep._dep_check_strict = True
6742 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6743 if replacement_pkg and \
6744 replacement_pkg[0].operation == "merge":
6745 # This package is being replaced anyway, so
6746 # ignore invalid dependencies so as not to
6747 # annoy the user too much (otherwise they'd be
6748 # forced to manually unmerge it first).
6750 show_invalid_depstring_notice(pkg, depstr, atoms)
6752 blocker_atoms = [myatom for myatom in atoms \
6753 if myatom.startswith("!")]
6754 blocker_atoms.sort()
6755 counter = long(pkg.metadata["COUNTER"])
6756 blocker_cache[cpv] = \
6757 blocker_cache.BlockerData(counter, blocker_atoms)
6760 for atom in blocker_atoms:
6761 blocker = Blocker(atom=portage.dep.Atom(atom),
6762 eapi=pkg.metadata["EAPI"], root=myroot)
6763 self._blocker_parents.add(blocker, pkg)
6764 except portage.exception.InvalidAtom, e:
6765 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6766 show_invalid_depstring_notice(
6767 pkg, depstr, "Invalid Atom: %s" % (e,))
6769 for cpv in stale_cache:
6770 del blocker_cache[cpv]
6771 blocker_cache.flush()
6774 # Discard any "uninstall" tasks scheduled by previous calls
6775 # to this method, since those tasks may not make sense given
6776 # the current graph state.
6777 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6778 if previous_uninstall_tasks:
6779 self._blocker_uninstalls = digraph()
6780 self.digraph.difference_update(previous_uninstall_tasks)
6782 for blocker in self._blocker_parents.leaf_nodes():
6783 self.spinner.update()
6784 root_config = self.roots[blocker.root]
6785 virtuals = root_config.settings.getvirtuals()
6786 myroot = blocker.root
6787 initial_db = self.trees[myroot]["vartree"].dbapi
6788 final_db = self.mydbapi[myroot]
6790 provider_virtual = False
6791 if blocker.cp in virtuals and \
6792 not self._have_new_virt(blocker.root, blocker.cp):
6793 provider_virtual = True
6795 # Use this to check PROVIDE for each matched package
6797 atom_set = InternalPackageSet(
6798 initial_atoms=[blocker.atom])
6800 if provider_virtual:
6802 for provider_entry in virtuals[blocker.cp]:
6804 portage.dep_getkey(provider_entry)
6805 atoms.append(blocker.atom.replace(
6806 blocker.cp, provider_cp))
6808 atoms = [blocker.atom]
6810 blocked_initial = set()
6812 for pkg in initial_db.match_pkgs(atom):
6813 if atom_set.findAtomForPackage(pkg):
6814 blocked_initial.add(pkg)
6816 blocked_final = set()
6818 for pkg in final_db.match_pkgs(atom):
6819 if atom_set.findAtomForPackage(pkg):
6820 blocked_final.add(pkg)
6822 if not blocked_initial and not blocked_final:
6823 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6824 self._blocker_parents.remove(blocker)
6825 # Discard any parents that don't have any more blockers.
6826 for pkg in parent_pkgs:
6827 self._irrelevant_blockers.add(blocker, pkg)
6828 if not self._blocker_parents.child_nodes(pkg):
6829 self._blocker_parents.remove(pkg)
6831 for parent in self._blocker_parents.parent_nodes(blocker):
6832 unresolved_blocks = False
6833 depends_on_order = set()
6834 for pkg in blocked_initial:
6835 if pkg.slot_atom == parent.slot_atom:
6836 # TODO: Support blocks within slots in cases where it
6837 # might make sense. For example, a new version might
6838 # require that the old version be uninstalled at build
6841 if parent.installed:
6842 # Two currently installed packages conflict with
6843 # eachother. Ignore this case since the damage
6844 # is already done and this would be likely to
6845 # confuse users if displayed like a normal blocker.
6848 self._blocked_pkgs.add(pkg, blocker)
6850 if parent.operation == "merge":
6851 # Maybe the blocked package can be replaced or simply
6852 # unmerged to resolve this block.
6853 depends_on_order.add((pkg, parent))
6855 # None of the above blocker resolutions techniques apply,
6856 # so apparently this one is unresolvable.
6857 unresolved_blocks = True
6858 for pkg in blocked_final:
6859 if pkg.slot_atom == parent.slot_atom:
6860 # TODO: Support blocks within slots.
6862 if parent.operation == "nomerge" and \
6863 pkg.operation == "nomerge":
6864 # This blocker will be handled the next time that a
6865 # merge of either package is triggered.
6868 self._blocked_pkgs.add(pkg, blocker)
6870 # Maybe the blocking package can be
6871 # unmerged to resolve this block.
6872 if parent.operation == "merge" and pkg.installed:
6873 depends_on_order.add((pkg, parent))
6875 elif parent.operation == "nomerge":
6876 depends_on_order.add((parent, pkg))
6878 # None of the above blocker resolutions techniques apply,
6879 # so apparently this one is unresolvable.
6880 unresolved_blocks = True
6882 # Make sure we don't unmerge any package that have been pulled
6884 if not unresolved_blocks and depends_on_order:
6885 for inst_pkg, inst_task in depends_on_order:
6886 if self.digraph.contains(inst_pkg) and \
6887 self.digraph.parent_nodes(inst_pkg):
6888 unresolved_blocks = True
6891 if not unresolved_blocks and depends_on_order:
6892 for inst_pkg, inst_task in depends_on_order:
6893 uninst_task = Package(built=inst_pkg.built,
6894 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6895 metadata=inst_pkg.metadata,
6896 operation="uninstall",
6897 root_config=inst_pkg.root_config,
6898 type_name=inst_pkg.type_name)
6899 self._pkg_cache[uninst_task] = uninst_task
6900 # Enforce correct merge order with a hard dep.
6901 self.digraph.addnode(uninst_task, inst_task,
6902 priority=BlockerDepPriority.instance)
6903 # Count references to this blocker so that it can be
6904 # invalidated after nodes referencing it have been
6906 self._blocker_uninstalls.addnode(uninst_task, blocker)
6907 if not unresolved_blocks and not depends_on_order:
6908 self._irrelevant_blockers.add(blocker, parent)
6909 self._blocker_parents.remove_edge(blocker, parent)
6910 if not self._blocker_parents.parent_nodes(blocker):
6911 self._blocker_parents.remove(blocker)
6912 if not self._blocker_parents.child_nodes(parent):
6913 self._blocker_parents.remove(parent)
6914 if unresolved_blocks:
6915 self._unsolvable_blockers.add(blocker, parent)
6919 def _accept_blocker_conflicts(self):
6921 for x in ("--buildpkgonly", "--fetchonly",
6922 "--fetch-all-uri", "--nodeps"):
6923 if x in self.myopts:
6928 def _merge_order_bias(self, mygraph):
6930 For optimal leaf node selection, promote deep system runtime deps and
6931 order nodes from highest to lowest overall reference count.
6935 for node in mygraph.order:
6936 node_info[node] = len(mygraph.parent_nodes(node))
6937 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6939 def cmp_merge_preference(node1, node2):
6941 if node1.operation == 'uninstall':
6942 if node2.operation == 'uninstall':
6946 if node2.operation == 'uninstall':
6947 if node1.operation == 'uninstall':
6951 node1_sys = node1 in deep_system_deps
6952 node2_sys = node2 in deep_system_deps
6953 if node1_sys != node2_sys:
6958 return node_info[node2] - node_info[node1]
6960 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6962 def altlist(self, reversed=False):
6964 while self._serialized_tasks_cache is None:
6965 self._resolve_conflicts()
6967 self._serialized_tasks_cache, self._scheduler_graph = \
6968 self._serialize_tasks()
6969 except self._serialize_tasks_retry:
6972 retlist = self._serialized_tasks_cache[:]
6977 def schedulerGraph(self):
6979 The scheduler graph is identical to the normal one except that
6980 uninstall edges are reversed in specific cases that require
6981 conflicting packages to be temporarily installed simultaneously.
6982 This is intended for use by the Scheduler in it's parallelization
6983 logic. It ensures that temporary simultaneous installation of
6984 conflicting packages is avoided when appropriate (especially for
6985 !!atom blockers), but allowed in specific cases that require it.
6987 Note that this method calls break_refs() which alters the state of
6988 internal Package instances such that this depgraph instance should
6989 not be used to perform any more calculations.
6991 if self._scheduler_graph is None:
6993 self.break_refs(self._scheduler_graph.order)
6994 return self._scheduler_graph
6996 def break_refs(self, nodes):
6998 Take a mergelist like that returned from self.altlist() and
6999 break any references that lead back to the depgraph. This is
7000 useful if you want to hold references to packages without
7001 also holding the depgraph on the heap.
7004 if hasattr(node, "root_config"):
7005 # The FakeVartree references the _package_cache which
7006 # references the depgraph. So that Package instances don't
7007 # hold the depgraph and FakeVartree on the heap, replace
7008 # the RootConfig that references the FakeVartree with the
7009 # original RootConfig instance which references the actual
7011 node.root_config = \
7012 self._trees_orig[node.root_config.root]["root_config"]
7014 def _resolve_conflicts(self):
7015 if not self._complete_graph():
7016 raise self._unknown_internal_error()
7018 if not self.validate_blockers():
7019 raise self._unknown_internal_error()
7021 if self._slot_collision_info:
7022 self._process_slot_conflicts()
7024 def _serialize_tasks(self):
7026 if "--debug" in self.myopts:
7027 writemsg("\ndigraph:\n\n", noiselevel=-1)
7028 self.digraph.debug_print()
7029 writemsg("\n", noiselevel=-1)
7031 scheduler_graph = self.digraph.copy()
7032 mygraph=self.digraph.copy()
7033 # Prune "nomerge" root nodes if nothing depends on them, since
7034 # otherwise they slow down merge order calculation. Don't remove
7035 # non-root nodes since they help optimize merge order in some cases
7036 # such as revdep-rebuild.
7037 removed_nodes = set()
7039 for node in mygraph.root_nodes():
7040 if not isinstance(node, Package) or \
7041 node.installed or node.onlydeps:
7042 removed_nodes.add(node)
7044 self.spinner.update()
7045 mygraph.difference_update(removed_nodes)
7046 if not removed_nodes:
7048 removed_nodes.clear()
7049 self._merge_order_bias(mygraph)
7050 def cmp_circular_bias(n1, n2):
7052 RDEPEND is stronger than PDEPEND and this function
7053 measures such a strength bias within a circular
7054 dependency relationship.
7056 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7057 ignore_priority=priority_range.ignore_medium_soft)
7058 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7059 ignore_priority=priority_range.ignore_medium_soft)
7060 if n1_n2_medium == n2_n1_medium:
7065 myblocker_uninstalls = self._blocker_uninstalls.copy()
7067 # Contains uninstall tasks that have been scheduled to
7068 # occur after overlapping blockers have been installed.
7069 scheduled_uninstalls = set()
7070 # Contains any Uninstall tasks that have been ignored
7071 # in order to avoid the circular deps code path. These
7072 # correspond to blocker conflicts that could not be
7074 ignored_uninstall_tasks = set()
7075 have_uninstall_task = False
7076 complete = "complete" in self.myparams
7079 def get_nodes(**kwargs):
7081 Returns leaf nodes excluding Uninstall instances
7082 since those should be executed as late as possible.
7084 return [node for node in mygraph.leaf_nodes(**kwargs) \
7085 if isinstance(node, Package) and \
7086 (node.operation != "uninstall" or \
7087 node in scheduled_uninstalls)]
7089 # sys-apps/portage needs special treatment if ROOT="/"
7090 running_root = self._running_root.root
7091 from portage.const import PORTAGE_PACKAGE_ATOM
7092 runtime_deps = InternalPackageSet(
7093 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7094 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7095 PORTAGE_PACKAGE_ATOM)
7096 replacement_portage = self.mydbapi[running_root].match_pkgs(
7097 PORTAGE_PACKAGE_ATOM)
7100 running_portage = running_portage[0]
7102 running_portage = None
7104 if replacement_portage:
7105 replacement_portage = replacement_portage[0]
7107 replacement_portage = None
7109 if replacement_portage == running_portage:
7110 replacement_portage = None
7112 if replacement_portage is not None:
7113 # update from running_portage to replacement_portage asap
7114 asap_nodes.append(replacement_portage)
7116 if running_portage is not None:
7118 portage_rdepend = self._select_atoms_highest_available(
7119 running_root, running_portage.metadata["RDEPEND"],
7120 myuse=running_portage.use.enabled,
7121 parent=running_portage, strict=False)
7122 except portage.exception.InvalidDependString, e:
7123 portage.writemsg("!!! Invalid RDEPEND in " + \
7124 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7125 (running_root, running_portage.cpv, e), noiselevel=-1)
7127 portage_rdepend = []
7128 runtime_deps.update(atom for atom in portage_rdepend \
7129 if not atom.startswith("!"))
7131 def gather_deps(ignore_priority, mergeable_nodes,
7132 selected_nodes, node):
7134 Recursively gather a group of nodes that RDEPEND on
7135 eachother. This ensures that they are merged as a group
7136 and get their RDEPENDs satisfied as soon as possible.
7138 if node in selected_nodes:
7140 if node not in mergeable_nodes:
7142 if node == replacement_portage and \
7143 mygraph.child_nodes(node,
7144 ignore_priority=priority_range.ignore_medium_soft):
7145 # Make sure that portage always has all of it's
7146 # RDEPENDs installed first.
7148 selected_nodes.add(node)
7149 for child in mygraph.child_nodes(node,
7150 ignore_priority=ignore_priority):
7151 if not gather_deps(ignore_priority,
7152 mergeable_nodes, selected_nodes, child):
7156 def ignore_uninst_or_med(priority):
7157 if priority is BlockerDepPriority.instance:
7159 return priority_range.ignore_medium(priority)
7161 def ignore_uninst_or_med_soft(priority):
7162 if priority is BlockerDepPriority.instance:
7164 return priority_range.ignore_medium_soft(priority)
7166 tree_mode = "--tree" in self.myopts
7167 # Tracks whether or not the current iteration should prefer asap_nodes
7168 # if available. This is set to False when the previous iteration
7169 # failed to select any nodes. It is reset whenever nodes are
7170 # successfully selected.
7173 # Controls whether or not the current iteration should drop edges that
7174 # are "satisfied" by installed packages, in order to solve circular
7175 # dependencies. The deep runtime dependencies of installed packages are
7176 # not checked in this case (bug #199856), so it must be avoided
7177 # whenever possible.
7178 drop_satisfied = False
7180 # State of variables for successive iterations that loosen the
7181 # criteria for node selection.
7183 # iteration prefer_asap drop_satisfied
7188 # If no nodes are selected on the last iteration, it is due to
7189 # unresolved blockers or circular dependencies.
7191 while not mygraph.empty():
7192 self.spinner.update()
7193 selected_nodes = None
7194 ignore_priority = None
7195 if drop_satisfied or (prefer_asap and asap_nodes):
7196 priority_range = DepPrioritySatisfiedRange
7198 priority_range = DepPriorityNormalRange
7199 if prefer_asap and asap_nodes:
7200 # ASAP nodes are merged before their soft deps. Go ahead and
7201 # select root nodes here if necessary, since it's typical for
7202 # the parent to have been removed from the graph already.
7203 asap_nodes = [node for node in asap_nodes \
7204 if mygraph.contains(node)]
7205 for node in asap_nodes:
7206 if not mygraph.child_nodes(node,
7207 ignore_priority=priority_range.ignore_soft):
7208 selected_nodes = [node]
7209 asap_nodes.remove(node)
7211 if not selected_nodes and \
7212 not (prefer_asap and asap_nodes):
7213 for i in xrange(priority_range.NONE,
7214 priority_range.MEDIUM_SOFT + 1):
7215 ignore_priority = priority_range.ignore_priority[i]
7216 nodes = get_nodes(ignore_priority=ignore_priority)
7218 # If there is a mix of uninstall nodes with other
7219 # types, save the uninstall nodes for later since
7220 # sometimes a merge node will render an uninstall
7221 # node unnecessary (due to occupying the same slot),
7222 # and we want to avoid executing a separate uninstall
7223 # task in that case.
7225 good_uninstalls = []
7226 with_some_uninstalls_excluded = []
7228 if node.operation == "uninstall":
7229 slot_node = self.mydbapi[node.root
7230 ].match_pkgs(node.slot_atom)
7232 slot_node[0].operation == "merge":
7234 good_uninstalls.append(node)
7235 with_some_uninstalls_excluded.append(node)
7237 nodes = good_uninstalls
7238 elif with_some_uninstalls_excluded:
7239 nodes = with_some_uninstalls_excluded
7243 if ignore_priority is None and not tree_mode:
7244 # Greedily pop all of these nodes since no
7245 # relationship has been ignored. This optimization
7246 # destroys --tree output, so it's disabled in tree
7248 selected_nodes = nodes
7250 # For optimal merge order:
7251 # * Only pop one node.
7252 # * Removing a root node (node without a parent)
7253 # will not produce a leaf node, so avoid it.
7254 # * It's normal for a selected uninstall to be a
7255 # root node, so don't check them for parents.
7257 if node.operation == "uninstall" or \
7258 mygraph.parent_nodes(node):
7259 selected_nodes = [node]
7265 if not selected_nodes:
7266 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7268 mergeable_nodes = set(nodes)
7269 if prefer_asap and asap_nodes:
7271 for i in xrange(priority_range.SOFT,
7272 priority_range.MEDIUM_SOFT + 1):
7273 ignore_priority = priority_range.ignore_priority[i]
7275 if not mygraph.parent_nodes(node):
7277 selected_nodes = set()
7278 if gather_deps(ignore_priority,
7279 mergeable_nodes, selected_nodes, node):
7282 selected_nodes = None
7286 if prefer_asap and asap_nodes and not selected_nodes:
7287 # We failed to find any asap nodes to merge, so ignore
7288 # them for the next iteration.
7292 if selected_nodes and ignore_priority is not None:
7293 # Try to merge ignored medium_soft deps as soon as possible
7294 # if they're not satisfied by installed packages.
7295 for node in selected_nodes:
7296 children = set(mygraph.child_nodes(node))
7297 soft = children.difference(
7298 mygraph.child_nodes(node,
7299 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7300 medium_soft = children.difference(
7301 mygraph.child_nodes(node,
7303 DepPrioritySatisfiedRange.ignore_medium_soft))
7304 medium_soft.difference_update(soft)
7305 for child in medium_soft:
7306 if child in selected_nodes:
7308 if child in asap_nodes:
7310 asap_nodes.append(child)
7312 if selected_nodes and len(selected_nodes) > 1:
7313 if not isinstance(selected_nodes, list):
7314 selected_nodes = list(selected_nodes)
7315 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7317 if not selected_nodes and not myblocker_uninstalls.is_empty():
7318 # An Uninstall task needs to be executed in order to
7319 # avoid conflict if possible.
7322 priority_range = DepPrioritySatisfiedRange
7324 priority_range = DepPriorityNormalRange
7326 mergeable_nodes = get_nodes(
7327 ignore_priority=ignore_uninst_or_med)
7329 min_parent_deps = None
7331 for task in myblocker_uninstalls.leaf_nodes():
7332 # Do some sanity checks so that system or world packages
7333 # don't get uninstalled inappropriately here (only really
7334 # necessary when --complete-graph has not been enabled).
7336 if task in ignored_uninstall_tasks:
7339 if task in scheduled_uninstalls:
7340 # It's been scheduled but it hasn't
7341 # been executed yet due to dependence
7342 # on installation of blocking packages.
7345 root_config = self.roots[task.root]
7346 inst_pkg = self._pkg_cache[
7347 ("installed", task.root, task.cpv, "nomerge")]
7349 if self.digraph.contains(inst_pkg):
7352 forbid_overlap = False
7353 heuristic_overlap = False
7354 for blocker in myblocker_uninstalls.parent_nodes(task):
7355 if blocker.eapi in ("0", "1"):
7356 heuristic_overlap = True
7357 elif blocker.atom.blocker.overlap.forbid:
7358 forbid_overlap = True
7360 if forbid_overlap and running_root == task.root:
7363 if heuristic_overlap and running_root == task.root:
7364 # Never uninstall sys-apps/portage or it's essential
7365 # dependencies, except through replacement.
7367 runtime_dep_atoms = \
7368 list(runtime_deps.iterAtomsForPackage(task))
7369 except portage.exception.InvalidDependString, e:
7370 portage.writemsg("!!! Invalid PROVIDE in " + \
7371 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7372 (task.root, task.cpv, e), noiselevel=-1)
7376 # Don't uninstall a runtime dep if it appears
7377 # to be the only suitable one installed.
7379 vardb = root_config.trees["vartree"].dbapi
7380 for atom in runtime_dep_atoms:
7381 other_version = None
7382 for pkg in vardb.match_pkgs(atom):
7383 if pkg.cpv == task.cpv and \
7384 pkg.metadata["COUNTER"] == \
7385 task.metadata["COUNTER"]:
7389 if other_version is None:
7395 # For packages in the system set, don't take
7396 # any chances. If the conflict can't be resolved
7397 # by a normal replacement operation then abort.
7400 for atom in root_config.sets[
7401 "system"].iterAtomsForPackage(task):
7404 except portage.exception.InvalidDependString, e:
7405 portage.writemsg("!!! Invalid PROVIDE in " + \
7406 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7407 (task.root, task.cpv, e), noiselevel=-1)
7413 # Note that the world check isn't always
7414 # necessary since self._complete_graph() will
7415 # add all packages from the system and world sets to the
7416 # graph. This just allows unresolved conflicts to be
7417 # detected as early as possible, which makes it possible
7418 # to avoid calling self._complete_graph() when it is
7419 # unnecessary due to blockers triggering an abortion.
7421 # For packages in the world set, go ahead an uninstall
7422 # when necessary, as long as the atom will be satisfied
7423 # in the final state.
7424 graph_db = self.mydbapi[task.root]
7427 for atom in root_config.sets[
7428 "world"].iterAtomsForPackage(task):
7430 for pkg in graph_db.match_pkgs(atom):
7437 self._blocked_world_pkgs[inst_pkg] = atom
7439 except portage.exception.InvalidDependString, e:
7440 portage.writemsg("!!! Invalid PROVIDE in " + \
7441 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7442 (task.root, task.cpv, e), noiselevel=-1)
7448 # Check the deps of parent nodes to ensure that
7449 # the chosen task produces a leaf node. Maybe
7450 # this can be optimized some more to make the
7451 # best possible choice, but the current algorithm
7452 # is simple and should be near optimal for most
7454 mergeable_parent = False
7456 for parent in mygraph.parent_nodes(task):
7457 parent_deps.update(mygraph.child_nodes(parent,
7458 ignore_priority=priority_range.ignore_medium_soft))
7459 if parent in mergeable_nodes and \
7460 gather_deps(ignore_uninst_or_med_soft,
7461 mergeable_nodes, set(), parent):
7462 mergeable_parent = True
7464 if not mergeable_parent:
7467 parent_deps.remove(task)
7468 if min_parent_deps is None or \
7469 len(parent_deps) < min_parent_deps:
7470 min_parent_deps = len(parent_deps)
7473 if uninst_task is not None:
7474 # The uninstall is performed only after blocking
7475 # packages have been merged on top of it. File
7476 # collisions between blocking packages are detected
7477 # and removed from the list of files to be uninstalled.
7478 scheduled_uninstalls.add(uninst_task)
7479 parent_nodes = mygraph.parent_nodes(uninst_task)
7481 # Reverse the parent -> uninstall edges since we want
7482 # to do the uninstall after blocking packages have
7483 # been merged on top of it.
7484 mygraph.remove(uninst_task)
7485 for blocked_pkg in parent_nodes:
7486 mygraph.add(blocked_pkg, uninst_task,
7487 priority=BlockerDepPriority.instance)
7488 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7489 scheduler_graph.add(blocked_pkg, uninst_task,
7490 priority=BlockerDepPriority.instance)
7492 # Reset the state variables for leaf node selection and
7493 # continue trying to select leaf nodes.
7495 drop_satisfied = False
7498 if not selected_nodes:
7499 # Only select root nodes as a last resort. This case should
7500 # only trigger when the graph is nearly empty and the only
7501 # remaining nodes are isolated (no parents or children). Since
7502 # the nodes must be isolated, ignore_priority is not needed.
7503 selected_nodes = get_nodes()
7505 if not selected_nodes and not drop_satisfied:
7506 drop_satisfied = True
7509 if not selected_nodes and not myblocker_uninstalls.is_empty():
7510 # If possible, drop an uninstall task here in order to avoid
7511 # the circular deps code path. The corresponding blocker will
7512 # still be counted as an unresolved conflict.
7514 for node in myblocker_uninstalls.leaf_nodes():
7516 mygraph.remove(node)
7521 ignored_uninstall_tasks.add(node)
7524 if uninst_task is not None:
7525 # Reset the state variables for leaf node selection and
7526 # continue trying to select leaf nodes.
7528 drop_satisfied = False
7531 if not selected_nodes:
7532 self._circular_deps_for_display = mygraph
7533 raise self._unknown_internal_error()
7535 # At this point, we've succeeded in selecting one or more nodes, so
7536 # reset state variables for leaf node selection.
7538 drop_satisfied = False
7540 mygraph.difference_update(selected_nodes)
7542 for node in selected_nodes:
7543 if isinstance(node, Package) and \
7544 node.operation == "nomerge":
7547 # Handle interactions between blockers
7548 # and uninstallation tasks.
7549 solved_blockers = set()
7551 if isinstance(node, Package) and \
7552 "uninstall" == node.operation:
7553 have_uninstall_task = True
7556 vardb = self.trees[node.root]["vartree"].dbapi
7557 previous_cpv = vardb.match(node.slot_atom)
7559 # The package will be replaced by this one, so remove
7560 # the corresponding Uninstall task if necessary.
7561 previous_cpv = previous_cpv[0]
7563 ("installed", node.root, previous_cpv, "uninstall")
7565 mygraph.remove(uninst_task)
7569 if uninst_task is not None and \
7570 uninst_task not in ignored_uninstall_tasks and \
7571 myblocker_uninstalls.contains(uninst_task):
7572 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7573 myblocker_uninstalls.remove(uninst_task)
7574 # Discard any blockers that this Uninstall solves.
7575 for blocker in blocker_nodes:
7576 if not myblocker_uninstalls.child_nodes(blocker):
7577 myblocker_uninstalls.remove(blocker)
7578 solved_blockers.add(blocker)
7580 retlist.append(node)
7582 if (isinstance(node, Package) and \
7583 "uninstall" == node.operation) or \
7584 (uninst_task is not None and \
7585 uninst_task in scheduled_uninstalls):
7586 # Include satisfied blockers in the merge list
7587 # since the user might be interested and also
7588 # it serves as an indicator that blocking packages
7589 # will be temporarily installed simultaneously.
7590 for blocker in solved_blockers:
7591 retlist.append(Blocker(atom=blocker.atom,
7592 root=blocker.root, eapi=blocker.eapi,
7595 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7596 for node in myblocker_uninstalls.root_nodes():
7597 unsolvable_blockers.add(node)
7599 for blocker in unsolvable_blockers:
7600 retlist.append(blocker)
7602 # If any Uninstall tasks need to be executed in order
7603 # to avoid a conflict, complete the graph with any
7604 # dependencies that may have been initially
7605 # neglected (to ensure that unsafe Uninstall tasks
7606 # are properly identified and blocked from execution).
7607 if have_uninstall_task and \
7609 not unsolvable_blockers:
7610 self.myparams.add("complete")
7611 raise self._serialize_tasks_retry("")
7613 if unsolvable_blockers and \
7614 not self._accept_blocker_conflicts():
7615 self._unsatisfied_blockers_for_display = unsolvable_blockers
7616 self._serialized_tasks_cache = retlist[:]
7617 self._scheduler_graph = scheduler_graph
7618 raise self._unknown_internal_error()
7620 if self._slot_collision_info and \
7621 not self._accept_blocker_conflicts():
7622 self._serialized_tasks_cache = retlist[:]
7623 self._scheduler_graph = scheduler_graph
7624 raise self._unknown_internal_error()
7626 return retlist, scheduler_graph
7628 def _show_circular_deps(self, mygraph):
7629 # No leaf nodes are available, so we have a circular
7630 # dependency panic situation. Reduce the noise level to a
7631 # minimum via repeated elimination of root nodes since they
7632 # have no parents and thus can not be part of a cycle.
7634 root_nodes = mygraph.root_nodes(
7635 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7638 mygraph.difference_update(root_nodes)
7639 # Display the USE flags that are enabled on nodes that are part
7640 # of dependency cycles in case that helps the user decide to
7641 # disable some of them.
7643 tempgraph = mygraph.copy()
7644 while not tempgraph.empty():
7645 nodes = tempgraph.leaf_nodes()
7647 node = tempgraph.order[0]
7650 display_order.append(node)
7651 tempgraph.remove(node)
7652 display_order.reverse()
7653 self.myopts.pop("--quiet", None)
7654 self.myopts.pop("--verbose", None)
7655 self.myopts["--tree"] = True
7656 portage.writemsg("\n\n", noiselevel=-1)
7657 self.display(display_order)
7658 prefix = colorize("BAD", " * ")
7659 portage.writemsg("\n", noiselevel=-1)
7660 portage.writemsg(prefix + "Error: circular dependencies:\n",
7662 portage.writemsg("\n", noiselevel=-1)
7663 mygraph.debug_print()
7664 portage.writemsg("\n", noiselevel=-1)
7665 portage.writemsg(prefix + "Note that circular dependencies " + \
7666 "can often be avoided by temporarily\n", noiselevel=-1)
7667 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7668 "optional dependencies.\n", noiselevel=-1)
7670 def _show_merge_list(self):
7671 if self._serialized_tasks_cache is not None and \
7672 not (self._displayed_list and \
7673 (self._displayed_list == self._serialized_tasks_cache or \
7674 self._displayed_list == \
7675 list(reversed(self._serialized_tasks_cache)))):
7676 display_list = self._serialized_tasks_cache[:]
7677 if "--tree" in self.myopts:
7678 display_list.reverse()
7679 self.display(display_list)
7681 def _show_unsatisfied_blockers(self, blockers):
7682 self._show_merge_list()
7683 msg = "Error: The above package list contains " + \
7684 "packages which cannot be installed " + \
7685 "at the same time on the same system."
7686 prefix = colorize("BAD", " * ")
7687 from textwrap import wrap
7688 portage.writemsg("\n", noiselevel=-1)
7689 for line in wrap(msg, 70):
7690 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7692 # Display the conflicting packages along with the packages
7693 # that pulled them in. This is helpful for troubleshooting
7694 # cases in which blockers don't solve automatically and
7695 # the reasons are not apparent from the normal merge list
7699 for blocker in blockers:
7700 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7701 self._blocker_parents.parent_nodes(blocker)):
7702 parent_atoms = self._parent_atoms.get(pkg)
7703 if not parent_atoms:
7704 atom = self._blocked_world_pkgs.get(pkg)
7705 if atom is not None:
7706 parent_atoms = set([("@world", atom)])
7708 conflict_pkgs[pkg] = parent_atoms
7711 # Reduce noise by pruning packages that are only
7712 # pulled in by other conflict packages.
7714 for pkg, parent_atoms in conflict_pkgs.iteritems():
7715 relevant_parent = False
7716 for parent, atom in parent_atoms:
7717 if parent not in conflict_pkgs:
7718 relevant_parent = True
7720 if not relevant_parent:
7721 pruned_pkgs.add(pkg)
7722 for pkg in pruned_pkgs:
7723 del conflict_pkgs[pkg]
7729 # Max number of parents shown, to avoid flooding the display.
7731 for pkg, parent_atoms in conflict_pkgs.iteritems():
7735 # Prefer packages that are not directly involved in a conflict.
7736 for parent_atom in parent_atoms:
7737 if len(pruned_list) >= max_parents:
7739 parent, atom = parent_atom
7740 if parent not in conflict_pkgs:
7741 pruned_list.add(parent_atom)
7743 for parent_atom in parent_atoms:
7744 if len(pruned_list) >= max_parents:
7746 pruned_list.add(parent_atom)
7748 omitted_parents = len(parent_atoms) - len(pruned_list)
7749 msg.append(indent + "%s pulled in by\n" % pkg)
7751 for parent_atom in pruned_list:
7752 parent, atom = parent_atom
7753 msg.append(2*indent)
7754 if isinstance(parent,
7755 (PackageArg, AtomArg)):
7756 # For PackageArg and AtomArg types, it's
7757 # redundant to display the atom attribute.
7758 msg.append(str(parent))
7760 # Display the specific atom from SetArg or
7762 msg.append("%s required by %s" % (atom, parent))
7766 msg.append(2*indent)
7767 msg.append("(and %d more)\n" % omitted_parents)
7771 sys.stderr.write("".join(msg))
7774 if "--quiet" not in self.myopts:
7775 show_blocker_docs_link()
7777 def display(self, mylist, favorites=[], verbosity=None):
7779 # This is used to prevent display_problems() from
7780 # redundantly displaying this exact same merge list
7781 # again via _show_merge_list().
7782 self._displayed_list = mylist
7784 if verbosity is None:
7785 verbosity = ("--quiet" in self.myopts and 1 or \
7786 "--verbose" in self.myopts and 3 or 2)
7787 favorites_set = InternalPackageSet(favorites)
7788 oneshot = "--oneshot" in self.myopts or \
7789 "--onlydeps" in self.myopts
7790 columns = "--columns" in self.myopts
7795 counters = PackageCounters()
7797 if verbosity == 1 and "--verbose" not in self.myopts:
7798 def create_use_string(*args):
7801 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7803 is_new, reinst_flags,
7804 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7805 alphabetical=("--alphabetical" in self.myopts)):
7813 cur_iuse = set(cur_iuse)
7814 enabled_flags = cur_iuse.intersection(cur_use)
7815 removed_iuse = set(old_iuse).difference(cur_iuse)
7816 any_iuse = cur_iuse.union(old_iuse)
7817 any_iuse = list(any_iuse)
7819 for flag in any_iuse:
7822 reinst_flag = reinst_flags and flag in reinst_flags
7823 if flag in enabled_flags:
7825 if is_new or flag in old_use and \
7826 (all_flags or reinst_flag):
7827 flag_str = red(flag)
7828 elif flag not in old_iuse:
7829 flag_str = yellow(flag) + "%*"
7830 elif flag not in old_use:
7831 flag_str = green(flag) + "*"
7832 elif flag in removed_iuse:
7833 if all_flags or reinst_flag:
7834 flag_str = yellow("-" + flag) + "%"
7837 flag_str = "(" + flag_str + ")"
7838 removed.append(flag_str)
7841 if is_new or flag in old_iuse and \
7842 flag not in old_use and \
7843 (all_flags or reinst_flag):
7844 flag_str = blue("-" + flag)
7845 elif flag not in old_iuse:
7846 flag_str = yellow("-" + flag)
7847 if flag not in iuse_forced:
7849 elif flag in old_use:
7850 flag_str = green("-" + flag) + "*"
7852 if flag in iuse_forced:
7853 flag_str = "(" + flag_str + ")"
7855 enabled.append(flag_str)
7857 disabled.append(flag_str)
7860 ret = " ".join(enabled)
7862 ret = " ".join(enabled + disabled + removed)
7864 ret = '%s="%s" ' % (name, ret)
7867 repo_display = RepoDisplay(self.roots)
7871 mygraph = self.digraph.copy()
7873 # If there are any Uninstall instances, add the corresponding
7874 # blockers to the digraph (useful for --tree display).
7876 executed_uninstalls = set(node for node in mylist \
7877 if isinstance(node, Package) and node.operation == "unmerge")
7879 for uninstall in self._blocker_uninstalls.leaf_nodes():
7880 uninstall_parents = \
7881 self._blocker_uninstalls.parent_nodes(uninstall)
7882 if not uninstall_parents:
7885 # Remove the corresponding "nomerge" node and substitute
7886 # the Uninstall node.
7887 inst_pkg = self._pkg_cache[
7888 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7890 mygraph.remove(inst_pkg)
7895 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7897 inst_pkg_blockers = []
7899 # Break the Package -> Uninstall edges.
7900 mygraph.remove(uninstall)
7902 # Resolution of a package's blockers
7903 # depend on it's own uninstallation.
7904 for blocker in inst_pkg_blockers:
7905 mygraph.add(uninstall, blocker)
7907 # Expand Package -> Uninstall edges into
7908 # Package -> Blocker -> Uninstall edges.
7909 for blocker in uninstall_parents:
7910 mygraph.add(uninstall, blocker)
7911 for parent in self._blocker_parents.parent_nodes(blocker):
7912 if parent != inst_pkg:
7913 mygraph.add(blocker, parent)
7915 # If the uninstall task did not need to be executed because
7916 # of an upgrade, display Blocker -> Upgrade edges since the
7917 # corresponding Blocker -> Uninstall edges will not be shown.
7919 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7920 if upgrade_node is not None and \
7921 uninstall not in executed_uninstalls:
7922 for blocker in uninstall_parents:
7923 mygraph.add(upgrade_node, blocker)
7925 unsatisfied_blockers = []
7930 if isinstance(x, Blocker) and not x.satisfied:
7931 unsatisfied_blockers.append(x)
7934 if "--tree" in self.myopts:
7935 depth = len(tree_nodes)
7936 while depth and graph_key not in \
7937 mygraph.child_nodes(tree_nodes[depth-1]):
7940 tree_nodes = tree_nodes[:depth]
7941 tree_nodes.append(graph_key)
7942 display_list.append((x, depth, True))
7943 shown_edges.add((graph_key, tree_nodes[depth-1]))
7945 traversed_nodes = set() # prevent endless circles
7946 traversed_nodes.add(graph_key)
7947 def add_parents(current_node, ordered):
7949 # Do not traverse to parents if this node is an
7950 # an argument or a direct member of a set that has
7951 # been specified as an argument (system or world).
7952 if current_node not in self._set_nodes:
7953 parent_nodes = mygraph.parent_nodes(current_node)
7955 child_nodes = set(mygraph.child_nodes(current_node))
7956 selected_parent = None
7957 # First, try to avoid a direct cycle.
7958 for node in parent_nodes:
7959 if not isinstance(node, (Blocker, Package)):
7961 if node not in traversed_nodes and \
7962 node not in child_nodes:
7963 edge = (current_node, node)
7964 if edge in shown_edges:
7966 selected_parent = node
7968 if not selected_parent:
7969 # A direct cycle is unavoidable.
7970 for node in parent_nodes:
7971 if not isinstance(node, (Blocker, Package)):
7973 if node not in traversed_nodes:
7974 edge = (current_node, node)
7975 if edge in shown_edges:
7977 selected_parent = node
7980 shown_edges.add((current_node, selected_parent))
7981 traversed_nodes.add(selected_parent)
7982 add_parents(selected_parent, False)
7983 display_list.append((current_node,
7984 len(tree_nodes), ordered))
7985 tree_nodes.append(current_node)
7987 add_parents(graph_key, True)
7989 display_list.append((x, depth, True))
7990 mylist = display_list
7991 for x in unsatisfied_blockers:
7992 mylist.append((x, 0, True))
7994 last_merge_depth = 0
7995 for i in xrange(len(mylist)-1,-1,-1):
7996 graph_key, depth, ordered = mylist[i]
7997 if not ordered and depth == 0 and i > 0 \
7998 and graph_key == mylist[i-1][0] and \
7999 mylist[i-1][1] == 0:
8000 # An ordered node got a consecutive duplicate when the tree was
8004 if ordered and graph_key[-1] != "nomerge":
8005 last_merge_depth = depth
8007 if depth >= last_merge_depth or \
8008 i < len(mylist) - 1 and \
8009 depth >= mylist[i+1][1]:
8012 from portage import flatten
8013 from portage.dep import use_reduce, paren_reduce
8014 # files to fetch list - avoids counting a same file twice
8015 # in size display (verbose mode)
8018 # Use this set to detect when all the "repoadd" strings are "[0]"
8019 # and disable the entire repo display in this case.
8022 for mylist_index in xrange(len(mylist)):
8023 x, depth, ordered = mylist[mylist_index]
8027 portdb = self.trees[myroot]["porttree"].dbapi
8028 bindb = self.trees[myroot]["bintree"].dbapi
8029 vardb = self.trees[myroot]["vartree"].dbapi
8030 vartree = self.trees[myroot]["vartree"]
8031 pkgsettings = self.pkgsettings[myroot]
8034 indent = " " * depth
8036 if isinstance(x, Blocker):
8038 blocker_style = "PKG_BLOCKER_SATISFIED"
8039 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8041 blocker_style = "PKG_BLOCKER"
8042 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8044 counters.blocks += 1
8046 counters.blocks_satisfied += 1
8047 resolved = portage.key_expand(
8048 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8049 if "--columns" in self.myopts and "--quiet" in self.myopts:
8050 addl += " " + colorize(blocker_style, resolved)
8052 addl = "[%s %s] %s%s" % \
8053 (colorize(blocker_style, "blocks"),
8054 addl, indent, colorize(blocker_style, resolved))
8055 block_parents = self._blocker_parents.parent_nodes(x)
8056 block_parents = set([pnode[2] for pnode in block_parents])
8057 block_parents = ", ".join(block_parents)
8059 addl += colorize(blocker_style,
8060 " (\"%s\" is blocking %s)") % \
8061 (str(x.atom).lstrip("!"), block_parents)
8063 addl += colorize(blocker_style,
8064 " (is blocking %s)") % block_parents
8065 if isinstance(x, Blocker) and x.satisfied:
8070 blockers.append(addl)
8073 pkg_merge = ordered and pkg_status == "merge"
8074 if not pkg_merge and pkg_status == "merge":
8075 pkg_status = "nomerge"
8076 built = pkg_type != "ebuild"
8077 installed = pkg_type == "installed"
8079 metadata = pkg.metadata
8081 repo_name = metadata["repository"]
8082 if pkg_type == "ebuild":
8083 ebuild_path = portdb.findname(pkg_key)
8084 if not ebuild_path: # shouldn't happen
8085 raise portage.exception.PackageNotFound(pkg_key)
8086 repo_path_real = os.path.dirname(os.path.dirname(
8087 os.path.dirname(ebuild_path)))
8089 repo_path_real = portdb.getRepositoryPath(repo_name)
8090 pkg_use = list(pkg.use.enabled)
8092 restrict = flatten(use_reduce(paren_reduce(
8093 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8094 except portage.exception.InvalidDependString, e:
8095 if not pkg.installed:
8096 show_invalid_depstring_notice(x,
8097 pkg.metadata["RESTRICT"], str(e))
8101 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8102 "fetch" in restrict:
8105 counters.restrict_fetch += 1
8106 if portdb.fetch_check(pkg_key, pkg_use):
8109 counters.restrict_fetch_satisfied += 1
8111 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8112 #param is used for -u, where you still *do* want to see when something is being upgraded.
8115 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8116 if vardb.cpv_exists(pkg_key):
8117 addl=" "+yellow("R")+fetch+" "
8120 counters.reinst += 1
8121 elif pkg_status == "uninstall":
8122 counters.uninst += 1
8123 # filter out old-style virtual matches
8124 elif installed_versions and \
8125 portage.cpv_getkey(installed_versions[0]) == \
8126 portage.cpv_getkey(pkg_key):
8127 myinslotlist = vardb.match(pkg.slot_atom)
8128 # If this is the first install of a new-style virtual, we
8129 # need to filter out old-style virtual matches.
8130 if myinslotlist and \
8131 portage.cpv_getkey(myinslotlist[0]) != \
8132 portage.cpv_getkey(pkg_key):
8135 myoldbest = myinslotlist[:]
8137 if not portage.dep.cpvequal(pkg_key,
8138 portage.best([pkg_key] + myoldbest)):
8140 addl += turquoise("U")+blue("D")
8142 counters.downgrades += 1
8145 addl += turquoise("U") + " "
8147 counters.upgrades += 1
8149 # New slot, mark it new.
8150 addl = " " + green("NS") + fetch + " "
8151 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8153 counters.newslot += 1
8155 if "--changelog" in self.myopts:
8156 inst_matches = vardb.match(pkg.slot_atom)
8158 changelogs.extend(self.calc_changelog(
8159 portdb.findname(pkg_key),
8160 inst_matches[0], pkg_key))
8162 addl = " " + green("N") + " " + fetch + " "
8171 forced_flags = set()
8172 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8173 forced_flags.update(pkgsettings.useforce)
8174 forced_flags.update(pkgsettings.usemask)
8176 cur_use = [flag for flag in pkg.use.enabled \
8177 if flag in pkg.iuse.all]
8178 cur_iuse = sorted(pkg.iuse.all)
8180 if myoldbest and myinslotlist:
8181 previous_cpv = myoldbest[0]
8183 previous_cpv = pkg.cpv
8184 if vardb.cpv_exists(previous_cpv):
8185 old_iuse, old_use = vardb.aux_get(
8186 previous_cpv, ["IUSE", "USE"])
8187 old_iuse = list(set(
8188 filter_iuse_defaults(old_iuse.split())))
8190 old_use = old_use.split()
8197 old_use = [flag for flag in old_use if flag in old_iuse]
8199 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8201 use_expand.reverse()
8202 use_expand_hidden = \
8203 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8205 def map_to_use_expand(myvals, forcedFlags=False,
8209 for exp in use_expand:
8212 for val in myvals[:]:
8213 if val.startswith(exp.lower()+"_"):
8214 if val in forced_flags:
8215 forced[exp].add(val[len(exp)+1:])
8216 ret[exp].append(val[len(exp)+1:])
8219 forced["USE"] = [val for val in myvals \
8220 if val in forced_flags]
8222 for exp in use_expand_hidden:
8228 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8229 # are the only thing that triggered reinstallation.
8230 reinst_flags_map = {}
8231 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8232 reinst_expand_map = None
8233 if reinstall_for_flags:
8234 reinst_flags_map = map_to_use_expand(
8235 list(reinstall_for_flags), removeHidden=False)
8236 for k in list(reinst_flags_map):
8237 if not reinst_flags_map[k]:
8238 del reinst_flags_map[k]
8239 if not reinst_flags_map.get("USE"):
8240 reinst_expand_map = reinst_flags_map.copy()
8241 reinst_expand_map.pop("USE", None)
8242 if reinst_expand_map and \
8243 not set(reinst_expand_map).difference(
8245 use_expand_hidden = \
8246 set(use_expand_hidden).difference(
8249 cur_iuse_map, iuse_forced = \
8250 map_to_use_expand(cur_iuse, forcedFlags=True)
8251 cur_use_map = map_to_use_expand(cur_use)
8252 old_iuse_map = map_to_use_expand(old_iuse)
8253 old_use_map = map_to_use_expand(old_use)
8256 use_expand.insert(0, "USE")
8258 for key in use_expand:
8259 if key in use_expand_hidden:
8261 verboseadd += create_use_string(key.upper(),
8262 cur_iuse_map[key], iuse_forced[key],
8263 cur_use_map[key], old_iuse_map[key],
8264 old_use_map[key], is_new,
8265 reinst_flags_map.get(key))
8270 if pkg_type == "ebuild" and pkg_merge:
8272 myfilesdict = portdb.getfetchsizes(pkg_key,
8273 useflags=pkg_use, debug=self.edebug)
8274 except portage.exception.InvalidDependString, e:
8275 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8276 show_invalid_depstring_notice(x, src_uri, str(e))
8279 if myfilesdict is None:
8280 myfilesdict="[empty/missing/bad digest]"
8282 for myfetchfile in myfilesdict:
8283 if myfetchfile not in myfetchlist:
8284 mysize+=myfilesdict[myfetchfile]
8285 myfetchlist.append(myfetchfile)
8287 counters.totalsize += mysize
8288 verboseadd += format_size(mysize)
8291 # assign index for a previous version in the same slot
8292 has_previous = False
8293 repo_name_prev = None
8294 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8296 slot_matches = vardb.match(slot_atom)
8299 repo_name_prev = vardb.aux_get(slot_matches[0],
8302 # now use the data to generate output
8303 if pkg.installed or not has_previous:
8304 repoadd = repo_display.repoStr(repo_path_real)
8306 repo_path_prev = None
8308 repo_path_prev = portdb.getRepositoryPath(
8310 if repo_path_prev == repo_path_real:
8311 repoadd = repo_display.repoStr(repo_path_real)
8313 repoadd = "%s=>%s" % (
8314 repo_display.repoStr(repo_path_prev),
8315 repo_display.repoStr(repo_path_real))
8317 repoadd_set.add(repoadd)
8319 xs = [portage.cpv_getkey(pkg_key)] + \
8320 list(portage.catpkgsplit(pkg_key)[2:])
8327 if "COLUMNWIDTH" in self.settings:
8329 mywidth = int(self.settings["COLUMNWIDTH"])
8330 except ValueError, e:
8331 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8333 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8334 self.settings["COLUMNWIDTH"], noiselevel=-1)
8336 oldlp = mywidth - 30
8339 # Convert myoldbest from a list to a string.
8343 for pos, key in enumerate(myoldbest):
8344 key = portage.catpkgsplit(key)[2] + \
8345 "-" + portage.catpkgsplit(key)[3]
8346 if key[-3:] == "-r0":
8348 myoldbest[pos] = key
8349 myoldbest = blue("["+", ".join(myoldbest)+"]")
8352 root_config = self.roots[myroot]
8353 system_set = root_config.sets["system"]
8354 world_set = root_config.sets["world"]
8359 pkg_system = system_set.findAtomForPackage(pkg)
8360 pkg_world = world_set.findAtomForPackage(pkg)
8361 if not (oneshot or pkg_world) and \
8362 myroot == self.target_root and \
8363 favorites_set.findAtomForPackage(pkg):
8364 # Maybe it will be added to world now.
8365 if create_world_atom(pkg, favorites_set, root_config):
8367 except portage.exception.InvalidDependString:
8368 # This is reported elsewhere if relevant.
8371 def pkgprint(pkg_str):
8374 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8376 return colorize("PKG_MERGE_WORLD", pkg_str)
8378 return colorize("PKG_MERGE", pkg_str)
8379 elif pkg_status == "uninstall":
8380 return colorize("PKG_UNINSTALL", pkg_str)
8383 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8385 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8387 return colorize("PKG_NOMERGE", pkg_str)
8390 properties = flatten(use_reduce(paren_reduce(
8391 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8392 except portage.exception.InvalidDependString, e:
8393 if not pkg.installed:
8394 show_invalid_depstring_notice(pkg,
8395 pkg.metadata["PROPERTIES"], str(e))
8399 interactive = "interactive" in properties
8400 if interactive and pkg.operation == "merge":
8401 addl = colorize("WARN", "I") + addl[1:]
8403 counters.interactive += 1
8408 if "--columns" in self.myopts:
8409 if "--quiet" in self.myopts:
8410 myprint=addl+" "+indent+pkgprint(pkg_cp)
8411 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8412 myprint=myprint+myoldbest
8413 myprint=myprint+darkgreen("to "+x[1])
8417 myprint = "[%s] %s%s" % \
8418 (pkgprint(pkg_status.ljust(13)),
8419 indent, pkgprint(pkg.cp))
8421 myprint = "[%s %s] %s%s" % \
8422 (pkgprint(pkg.type_name), addl,
8423 indent, pkgprint(pkg.cp))
8424 if (newlp-nc_len(myprint)) > 0:
8425 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8426 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8427 if (oldlp-nc_len(myprint)) > 0:
8428 myprint=myprint+" "*(oldlp-nc_len(myprint))
8429 myprint=myprint+myoldbest
8430 myprint += darkgreen("to " + pkg.root)
8433 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8435 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8436 myprint += indent + pkgprint(pkg_key) + " " + \
8437 myoldbest + darkgreen("to " + myroot)
8439 if "--columns" in self.myopts:
8440 if "--quiet" in self.myopts:
8441 myprint=addl+" "+indent+pkgprint(pkg_cp)
8442 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8443 myprint=myprint+myoldbest
8447 myprint = "[%s] %s%s" % \
8448 (pkgprint(pkg_status.ljust(13)),
8449 indent, pkgprint(pkg.cp))
8451 myprint = "[%s %s] %s%s" % \
8452 (pkgprint(pkg.type_name), addl,
8453 indent, pkgprint(pkg.cp))
8454 if (newlp-nc_len(myprint)) > 0:
8455 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8456 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8457 if (oldlp-nc_len(myprint)) > 0:
8458 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8459 myprint += myoldbest
8462 myprint = "[%s] %s%s %s" % \
8463 (pkgprint(pkg_status.ljust(13)),
8464 indent, pkgprint(pkg.cpv),
8467 myprint = "[%s %s] %s%s %s" % \
8468 (pkgprint(pkg_type), addl, indent,
8469 pkgprint(pkg.cpv), myoldbest)
8471 if columns and pkg.operation == "uninstall":
8473 p.append((myprint, verboseadd, repoadd))
8475 if "--tree" not in self.myopts and \
8476 "--quiet" not in self.myopts and \
8477 not self._opts_no_restart.intersection(self.myopts) and \
8478 pkg.root == self._running_root.root and \
8479 portage.match_from_list(
8480 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8481 not vardb.cpv_exists(pkg.cpv) and \
8482 "--quiet" not in self.myopts:
8483 if mylist_index < len(mylist) - 1:
8484 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8485 p.append(colorize("WARN", " then resume the merge."))
8488 show_repos = repoadd_set and repoadd_set != set(["0"])
8491 if isinstance(x, basestring):
8492 out.write("%s\n" % (x,))
8495 myprint, verboseadd, repoadd = x
8498 myprint += " " + verboseadd
8500 if show_repos and repoadd:
8501 myprint += " " + teal("[%s]" % repoadd)
8503 out.write("%s\n" % (myprint,))
8512 sys.stdout.write(str(repo_display))
8514 if "--changelog" in self.myopts:
8516 for revision,text in changelogs:
8517 print bold('*'+revision)
8518 sys.stdout.write(text)
8523 def display_problems(self):
8525 Display problems with the dependency graph such as slot collisions.
8526 This is called internally by display() to show the problems _after_
8527 the merge list where it is most likely to be seen, but if display()
8528 is not going to be called then this method should be called explicitly
8529 to ensure that the user is notified of problems with the graph.
8531 All output goes to stderr, except for unsatisfied dependencies which
8532 go to stdout for parsing by programs such as autounmask.
8535 # Note that show_masked_packages() sends it's output to
8536 # stdout, and some programs such as autounmask parse the
8537 # output in cases when emerge bails out. However, when
8538 # show_masked_packages() is called for installed packages
8539 # here, the message is a warning that is more appropriate
8540 # to send to stderr, so temporarily redirect stdout to
8541 # stderr. TODO: Fix output code so there's a cleaner way
8542 # to redirect everything to stderr.
8547 sys.stdout = sys.stderr
8548 self._display_problems()
8554 # This goes to stdout for parsing by programs like autounmask.
8555 for pargs, kwargs in self._unsatisfied_deps_for_display:
8556 self._show_unsatisfied_dep(*pargs, **kwargs)
8558 def _display_problems(self):
8559 if self._circular_deps_for_display is not None:
8560 self._show_circular_deps(
8561 self._circular_deps_for_display)
8563 # The user is only notified of a slot conflict if
8564 # there are no unresolvable blocker conflicts.
8565 if self._unsatisfied_blockers_for_display is not None:
8566 self._show_unsatisfied_blockers(
8567 self._unsatisfied_blockers_for_display)
8569 self._show_slot_collision_notice()
8571 # TODO: Add generic support for "set problem" handlers so that
8572 # the below warnings aren't special cases for world only.
8574 if self._missing_args:
8575 world_problems = False
8576 if "world" in self._sets:
8577 # Filter out indirect members of world (from nested sets)
8578 # since only direct members of world are desired here.
8579 world_set = self.roots[self.target_root].sets["world"]
8580 for arg, atom in self._missing_args:
8581 if arg.name == "world" and atom in world_set:
8582 world_problems = True
8586 sys.stderr.write("\n!!! Problems have been " + \
8587 "detected with your world file\n")
8588 sys.stderr.write("!!! Please run " + \
8589 green("emaint --check world")+"\n\n")
8591 if self._missing_args:
8592 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8593 " Ebuilds for the following packages are either all\n")
8594 sys.stderr.write(colorize("BAD", "!!!") + \
8595 " masked or don't exist:\n")
8596 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8597 self._missing_args) + "\n")
8599 if self._pprovided_args:
8601 for arg, atom in self._pprovided_args:
8602 if isinstance(arg, SetArg):
8604 arg_atom = (atom, atom)
8607 arg_atom = (arg.arg, atom)
8608 refs = arg_refs.setdefault(arg_atom, [])
8609 if parent not in refs:
8612 msg.append(bad("\nWARNING: "))
8613 if len(self._pprovided_args) > 1:
8614 msg.append("Requested packages will not be " + \
8615 "merged because they are listed in\n")
8617 msg.append("A requested package will not be " + \
8618 "merged because it is listed in\n")
8619 msg.append("package.provided:\n\n")
8620 problems_sets = set()
8621 for (arg, atom), refs in arg_refs.iteritems():
8624 problems_sets.update(refs)
8626 ref_string = ", ".join(["'%s'" % name for name in refs])
8627 ref_string = " pulled in by " + ref_string
8628 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8630 if "world" in problems_sets:
8631 msg.append("This problem can be solved in one of the following ways:\n\n")
8632 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8633 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8634 msg.append(" C) Remove offending entries from package.provided.\n\n")
8635 msg.append("The best course of action depends on the reason that an offending\n")
8636 msg.append("package.provided entry exists.\n\n")
8637 sys.stderr.write("".join(msg))
8639 masked_packages = []
8640 for pkg in self._masked_installed:
8641 root_config = pkg.root_config
8642 pkgsettings = self.pkgsettings[pkg.root]
8643 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8644 masked_packages.append((root_config, pkgsettings,
8645 pkg.cpv, pkg.metadata, mreasons))
8647 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8648 " The following installed packages are masked:\n")
8649 show_masked_packages(masked_packages)
8653 def calc_changelog(self,ebuildpath,current,next):
8654 if ebuildpath == None or not os.path.exists(ebuildpath):
8656 current = '-'.join(portage.catpkgsplit(current)[1:])
8657 if current.endswith('-r0'):
8658 current = current[:-3]
8659 next = '-'.join(portage.catpkgsplit(next)[1:])
8660 if next.endswith('-r0'):
8662 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8664 changelog = open(changelogpath).read()
8665 except SystemExit, e:
8666 raise # Needed else can't exit
8669 divisions = self.find_changelog_tags(changelog)
8670 #print 'XX from',current,'to',next
8671 #for div,text in divisions: print 'XX',div
8672 # skip entries for all revisions above the one we are about to emerge
8673 for i in range(len(divisions)):
8674 if divisions[i][0]==next:
8675 divisions = divisions[i:]
8677 # find out how many entries we are going to display
8678 for i in range(len(divisions)):
8679 if divisions[i][0]==current:
8680 divisions = divisions[:i]
8683 # couldnt find the current revision in the list. display nothing
8687 def find_changelog_tags(self,changelog):
8691 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8693 if release is not None:
8694 divs.append((release,changelog))
8696 if release is not None:
8697 divs.append((release,changelog[:match.start()]))
8698 changelog = changelog[match.end():]
8699 release = match.group(1)
8700 if release.endswith('.ebuild'):
8701 release = release[:-7]
8702 if release.endswith('-r0'):
8703 release = release[:-3]
8705 def saveNomergeFavorites(self):
8706 """Find atoms in favorites that are not in the mergelist and add them
8707 to the world file if necessary."""
8708 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8709 "--oneshot", "--onlydeps", "--pretend"):
8710 if x in self.myopts:
8712 root_config = self.roots[self.target_root]
8713 world_set = root_config.sets["world"]
8715 world_locked = False
8716 if hasattr(world_set, "lock"):
8720 if hasattr(world_set, "load"):
8721 world_set.load() # maybe it's changed on disk
8723 args_set = self._sets["args"]
8724 portdb = self.trees[self.target_root]["porttree"].dbapi
8725 added_favorites = set()
8726 for x in self._set_nodes:
8727 pkg_type, root, pkg_key, pkg_status = x
8728 if pkg_status != "nomerge":
8732 myfavkey = create_world_atom(x, args_set, root_config)
8734 if myfavkey in added_favorites:
8736 added_favorites.add(myfavkey)
8737 except portage.exception.InvalidDependString, e:
8738 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8739 (pkg_key, str(e)), noiselevel=-1)
8740 writemsg("!!! see '%s'\n\n" % os.path.join(
8741 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8744 for k in self._sets:
8745 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8750 all_added.append(SETPREFIX + k)
8751 all_added.extend(added_favorites)
8754 print ">>> Recording %s in \"world\" favorites file..." % \
8755 colorize("INFORM", str(a))
8757 world_set.update(all_added)
8762 def loadResumeCommand(self, resume_data, skip_masked=False):
8764 Add a resume command to the graph and validate it in the process. This
8765 will raise a PackageNotFound exception if a package is not available.
8768 if not isinstance(resume_data, dict):
8771 mergelist = resume_data.get("mergelist")
8772 if not isinstance(mergelist, list):
8775 fakedb = self.mydbapi
8777 serialized_tasks = []
8780 if not (isinstance(x, list) and len(x) == 4):
8782 pkg_type, myroot, pkg_key, action = x
8783 if pkg_type not in self.pkg_tree_map:
8785 if action != "merge":
8787 tree_type = self.pkg_tree_map[pkg_type]
8788 mydb = trees[myroot][tree_type].dbapi
8789 db_keys = list(self._trees_orig[myroot][
8790 tree_type].dbapi._aux_cache_keys)
8792 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8794 # It does no exist or it is corrupt.
8795 if action == "uninstall":
8797 raise portage.exception.PackageNotFound(pkg_key)
8798 installed = action == "uninstall"
8799 built = pkg_type != "ebuild"
8800 root_config = self.roots[myroot]
8801 pkg = Package(built=built, cpv=pkg_key,
8802 installed=installed, metadata=metadata,
8803 operation=action, root_config=root_config,
8805 if pkg_type == "ebuild":
8806 pkgsettings = self.pkgsettings[myroot]
8807 pkgsettings.setcpv(pkg)
8808 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8809 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8810 self._pkg_cache[pkg] = pkg
8812 root_config = self.roots[pkg.root]
8813 if "merge" == pkg.operation and \
8814 not visible(root_config.settings, pkg):
8816 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8818 self._unsatisfied_deps_for_display.append(
8819 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8821 fakedb[myroot].cpv_inject(pkg)
8822 serialized_tasks.append(pkg)
8823 self.spinner.update()
8825 if self._unsatisfied_deps_for_display:
8828 if not serialized_tasks or "--nodeps" in self.myopts:
8829 self._serialized_tasks_cache = serialized_tasks
8830 self._scheduler_graph = self.digraph
8832 self._select_package = self._select_pkg_from_graph
8833 self.myparams.add("selective")
8834 # Always traverse deep dependencies in order to account for
8835 # potentially unsatisfied dependencies of installed packages.
8836 # This is necessary for correct --keep-going or --resume operation
8837 # in case a package from a group of circularly dependent packages
8838 # fails. In this case, a package which has recently been installed
8839 # may have an unsatisfied circular dependency (pulled in by
8840 # PDEPEND, for example). So, even though a package is already
8841 # installed, it may not have all of it's dependencies satisfied, so
8842 # it may not be usable. If such a package is in the subgraph of
8843 # deep depenedencies of a scheduled build, that build needs to
8844 # be cancelled. In order for this type of situation to be
8845 # recognized, deep traversal of dependencies is required.
8846 self.myparams.add("deep")
8848 favorites = resume_data.get("favorites")
8849 args_set = self._sets["args"]
8850 if isinstance(favorites, list):
8851 args = self._load_favorites(favorites)
8855 for task in serialized_tasks:
8856 if isinstance(task, Package) and \
8857 task.operation == "merge":
8858 if not self._add_pkg(task, None):
8861 # Packages for argument atoms need to be explicitly
8862 # added via _add_pkg() so that they are included in the
8863 # digraph (needed at least for --tree display).
8865 for atom in arg.set:
8866 pkg, existing_node = self._select_package(
8867 arg.root_config.root, atom)
8868 if existing_node is None and \
8870 if not self._add_pkg(pkg, Dependency(atom=atom,
8871 root=pkg.root, parent=arg)):
8874 # Allow unsatisfied deps here to avoid showing a masking
8875 # message for an unsatisfied dep that isn't necessarily
8877 if not self._create_graph(allow_unsatisfied=True):
8880 unsatisfied_deps = []
8881 for dep in self._unsatisfied_deps:
8882 if not isinstance(dep.parent, Package):
8884 if dep.parent.operation == "merge":
8885 unsatisfied_deps.append(dep)
8888 # For unsatisfied deps of installed packages, only account for
8889 # them if they are in the subgraph of dependencies of a package
8890 # which is scheduled to be installed.
8891 unsatisfied_install = False
8893 dep_stack = self.digraph.parent_nodes(dep.parent)
8895 node = dep_stack.pop()
8896 if not isinstance(node, Package):
8898 if node.operation == "merge":
8899 unsatisfied_install = True
8901 if node in traversed:
8904 dep_stack.extend(self.digraph.parent_nodes(node))
8906 if unsatisfied_install:
8907 unsatisfied_deps.append(dep)
8909 if masked_tasks or unsatisfied_deps:
8910 # This probably means that a required package
8911 # was dropped via --skipfirst. It makes the
8912 # resume list invalid, so convert it to a
8913 # UnsatisfiedResumeDep exception.
8914 raise self.UnsatisfiedResumeDep(self,
8915 masked_tasks + unsatisfied_deps)
8916 self._serialized_tasks_cache = None
8919 except self._unknown_internal_error:
8924 def _load_favorites(self, favorites):
8926 Use a list of favorites to resume state from a
8927 previous select_files() call. This creates similar
8928 DependencyArg instances to those that would have
8929 been created by the original select_files() call.
8930 This allows Package instances to be matched with
8931 DependencyArg instances during graph creation.
8933 root_config = self.roots[self.target_root]
8934 getSetAtoms = root_config.setconfig.getSetAtoms
8935 sets = root_config.sets
8938 if not isinstance(x, basestring):
8940 if x in ("system", "world"):
8942 if x.startswith(SETPREFIX):
8943 s = x[len(SETPREFIX):]
8948 # Recursively expand sets so that containment tests in
8949 # self._get_parent_sets() properly match atoms in nested
8950 # sets (like if world contains system).
8951 expanded_set = InternalPackageSet(
8952 initial_atoms=getSetAtoms(s))
8953 self._sets[s] = expanded_set
8954 args.append(SetArg(arg=x, set=expanded_set,
8955 root_config=root_config))
8957 if not portage.isvalidatom(x):
8959 args.append(AtomArg(arg=x, atom=x,
8960 root_config=root_config))
8962 self._set_args(args)
8965 class UnsatisfiedResumeDep(portage.exception.PortageException):
8967 A dependency of a resume list is not installed. This
8968 can occur when a required package is dropped from the
8969 merge list via --skipfirst.
8971 def __init__(self, depgraph, value):
8972 portage.exception.PortageException.__init__(self, value)
8973 self.depgraph = depgraph
8975 class _internal_exception(portage.exception.PortageException):
8976 def __init__(self, value=""):
8977 portage.exception.PortageException.__init__(self, value)
8979 class _unknown_internal_error(_internal_exception):
8981 Used by the depgraph internally to terminate graph creation.
8982 The specific reason for the failure should have been dumped
8983 to stderr, unfortunately, the exact reason for the failure
8987 class _serialize_tasks_retry(_internal_exception):
8989 This is raised by the _serialize_tasks() method when it needs to
8990 be called again for some reason. The only case that it's currently
8991 used for is when neglected dependencies need to be added to the
8992 graph in order to avoid making a potentially unsafe decision.
8995 class _dep_check_composite_db(portage.dbapi):
8997 A dbapi-like interface that is optimized for use in dep_check() calls.
8998 This is built on top of the existing depgraph package selection logic.
8999 Some packages that have been added to the graph may be masked from this
9000 view in order to influence the atom preference selection that occurs
9003 def __init__(self, depgraph, root):
9004 portage.dbapi.__init__(self)
9005 self._depgraph = depgraph
9007 self._match_cache = {}
9008 self._cpv_pkg_map = {}
9010 def _clear_cache(self):
9011 self._match_cache.clear()
9012 self._cpv_pkg_map.clear()
9014 def match(self, atom):
9015 ret = self._match_cache.get(atom)
9020 atom = self._dep_expand(atom)
9021 pkg, existing = self._depgraph._select_package(self._root, atom)
9025 # Return the highest available from select_package() as well as
9026 # any matching slots in the graph db.
9028 slots.add(pkg.metadata["SLOT"])
9029 atom_cp = portage.dep_getkey(atom)
9030 if pkg.cp.startswith("virtual/"):
9031 # For new-style virtual lookahead that occurs inside
9032 # dep_check(), examine all slots. This is needed
9033 # so that newer slots will not unnecessarily be pulled in
9034 # when a satisfying lower slot is already installed. For
9035 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9036 # there's no need to pull in a newer slot to satisfy a
9037 # virtual/jdk dependency.
9038 for db, pkg_type, built, installed, db_keys in \
9039 self._depgraph._filtered_trees[self._root]["dbs"]:
9040 for cpv in db.match(atom):
9041 if portage.cpv_getkey(cpv) != pkg.cp:
9043 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9045 if self._visible(pkg):
9046 self._cpv_pkg_map[pkg.cpv] = pkg
9048 slots.remove(pkg.metadata["SLOT"])
9050 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9051 pkg, existing = self._depgraph._select_package(
9052 self._root, slot_atom)
9055 if not self._visible(pkg):
9057 self._cpv_pkg_map[pkg.cpv] = pkg
9060 self._cpv_sort_ascending(ret)
9061 self._match_cache[orig_atom] = ret
9064 def _visible(self, pkg):
9065 if pkg.installed and "selective" not in self._depgraph.myparams:
9067 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9068 except (StopIteration, portage.exception.InvalidDependString):
9075 self._depgraph.pkgsettings[pkg.root], pkg):
9077 except portage.exception.InvalidDependString:
9079 in_graph = self._depgraph._slot_pkg_map[
9080 self._root].get(pkg.slot_atom)
9081 if in_graph is None:
9082 # Mask choices for packages which are not the highest visible
9083 # version within their slot (since they usually trigger slot
9085 highest_visible, in_graph = self._depgraph._select_package(
9086 self._root, pkg.slot_atom)
9087 if pkg != highest_visible:
9089 elif in_graph != pkg:
9090 # Mask choices for packages that would trigger a slot
9091 # conflict with a previously selected package.
9095 def _dep_expand(self, atom):
9097 This is only needed for old installed packages that may
9098 contain atoms that are not fully qualified with a specific
9099 category. Emulate the cpv_expand() function that's used by
9100 dbapi.match() in cases like this. If there are multiple
9101 matches, it's often due to a new-style virtual that has
9102 been added, so try to filter those out to avoid raising
9105 root_config = self._depgraph.roots[self._root]
9107 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9108 if len(expanded_atoms) > 1:
9109 non_virtual_atoms = []
9110 for x in expanded_atoms:
9111 if not portage.dep_getkey(x).startswith("virtual/"):
9112 non_virtual_atoms.append(x)
9113 if len(non_virtual_atoms) == 1:
9114 expanded_atoms = non_virtual_atoms
9115 if len(expanded_atoms) > 1:
9116 # compatible with portage.cpv_expand()
9117 raise portage.exception.AmbiguousPackageName(
9118 [portage.dep_getkey(x) for x in expanded_atoms])
9120 atom = expanded_atoms[0]
9122 null_atom = insert_category_into_atom(atom, "null")
9123 null_cp = portage.dep_getkey(null_atom)
9124 cat, atom_pn = portage.catsplit(null_cp)
9125 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9127 # Allow the resolver to choose which virtual.
9128 atom = insert_category_into_atom(atom, "virtual")
9130 atom = insert_category_into_atom(atom, "null")
9133 def aux_get(self, cpv, wants):
9134 metadata = self._cpv_pkg_map[cpv].metadata
9135 return [metadata.get(x, "") for x in wants]
9137 class RepoDisplay(object):
9138 def __init__(self, roots):
9139 self._shown_repos = {}
9140 self._unknown_repo = False
9142 for root_config in roots.itervalues():
9143 portdir = root_config.settings.get("PORTDIR")
9145 repo_paths.add(portdir)
9146 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9148 repo_paths.update(overlays.split())
9149 repo_paths = list(repo_paths)
9150 self._repo_paths = repo_paths
9151 self._repo_paths_real = [ os.path.realpath(repo_path) \
9152 for repo_path in repo_paths ]
9154 # pre-allocate index for PORTDIR so that it always has index 0.
9155 for root_config in roots.itervalues():
9156 portdb = root_config.trees["porttree"].dbapi
9157 portdir = portdb.porttree_root
9159 self.repoStr(portdir)
9161 def repoStr(self, repo_path_real):
9164 real_index = self._repo_paths_real.index(repo_path_real)
9165 if real_index == -1:
9167 self._unknown_repo = True
9169 shown_repos = self._shown_repos
9170 repo_paths = self._repo_paths
9171 repo_path = repo_paths[real_index]
9172 index = shown_repos.get(repo_path)
9174 index = len(shown_repos)
9175 shown_repos[repo_path] = index
9181 shown_repos = self._shown_repos
9182 unknown_repo = self._unknown_repo
9183 if shown_repos or self._unknown_repo:
9184 output.append("Portage tree and overlays:\n")
9185 show_repo_paths = list(shown_repos)
9186 for repo_path, repo_index in shown_repos.iteritems():
9187 show_repo_paths[repo_index] = repo_path
9189 for index, repo_path in enumerate(show_repo_paths):
9190 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9192 output.append(" "+teal("[?]") + \
9193 " indicates that the source repository could not be determined\n")
9194 return "".join(output)
9196 class PackageCounters(object):
9206 self.blocks_satisfied = 0
9208 self.restrict_fetch = 0
9209 self.restrict_fetch_satisfied = 0
9210 self.interactive = 0
9213 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9216 myoutput.append("Total: %s package" % total_installs)
9217 if total_installs != 1:
9218 myoutput.append("s")
9219 if total_installs != 0:
9220 myoutput.append(" (")
9221 if self.upgrades > 0:
9222 details.append("%s upgrade" % self.upgrades)
9223 if self.upgrades > 1:
9225 if self.downgrades > 0:
9226 details.append("%s downgrade" % self.downgrades)
9227 if self.downgrades > 1:
9230 details.append("%s new" % self.new)
9231 if self.newslot > 0:
9232 details.append("%s in new slot" % self.newslot)
9233 if self.newslot > 1:
9236 details.append("%s reinstall" % self.reinst)
9240 details.append("%s uninstall" % self.uninst)
9243 if self.interactive > 0:
9244 details.append("%s %s" % (self.interactive,
9245 colorize("WARN", "interactive")))
9246 myoutput.append(", ".join(details))
9247 if total_installs != 0:
9248 myoutput.append(")")
9249 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9250 if self.restrict_fetch:
9251 myoutput.append("\nFetch Restriction: %s package" % \
9252 self.restrict_fetch)
9253 if self.restrict_fetch > 1:
9254 myoutput.append("s")
9255 if self.restrict_fetch_satisfied < self.restrict_fetch:
9256 myoutput.append(bad(" (%s unsatisfied)") % \
9257 (self.restrict_fetch - self.restrict_fetch_satisfied))
9259 myoutput.append("\nConflict: %s block" % \
9262 myoutput.append("s")
9263 if self.blocks_satisfied < self.blocks:
9264 myoutput.append(bad(" (%s unsatisfied)") % \
9265 (self.blocks - self.blocks_satisfied))
9266 return "".join(myoutput)
9268 class UseFlagDisplay(object):
9270 __slots__ = ('name', 'enabled', 'forced')
9272 def __init__(self, name, enabled, forced):
9274 self.enabled = enabled
9275 self.forced = forced
9288 def _cmp_combined(a, b):
9290 Sort by name, combining enabled and disabled flags.
9292 return (a.name > b.name) - (a.name < b.name)
9294 sort_combined = cmp_sort_key(_cmp_combined)
9297 def _cmp_separated(a, b):
9299 Sort by name, separating enabled flags from disabled flags.
9301 enabled_diff = b.enabled - a.enabled
9304 return (a.name > b.name) - (a.name < b.name)
9306 sort_separated = cmp_sort_key(_cmp_separated)
9309 class PollSelectAdapter(PollConstants):
9312 Use select to emulate a poll object, for
9313 systems that don't support poll().
9317 self._registered = {}
9318 self._select_args = [[], [], []]
9320 def register(self, fd, *args):
9322 Only POLLIN is currently supported!
9326 "register expected at most 2 arguments, got " + \
9327 repr(1 + len(args)))
9329 eventmask = PollConstants.POLLIN | \
9330 PollConstants.POLLPRI | PollConstants.POLLOUT
9334 self._registered[fd] = eventmask
9335 self._select_args = None
9337 def unregister(self, fd):
9338 self._select_args = None
9339 del self._registered[fd]
9341 def poll(self, *args):
9344 "poll expected at most 2 arguments, got " + \
9345 repr(1 + len(args)))
9351 select_args = self._select_args
9352 if select_args is None:
9353 select_args = [self._registered.keys(), [], []]
9355 if timeout is not None:
9356 select_args = select_args[:]
9357 # Translate poll() timeout args to select() timeout args:
9359 # | units | value(s) for indefinite block
9360 # ---------|--------------|------------------------------
9361 # poll | milliseconds | omitted, negative, or None
9362 # ---------|--------------|------------------------------
9363 # select | seconds | omitted
9364 # ---------|--------------|------------------------------
9366 if timeout is not None and timeout < 0:
9368 if timeout is not None:
9369 select_args.append(timeout / 1000)
9371 select_events = select.select(*select_args)
9373 for fd in select_events[0]:
9374 poll_events.append((fd, PollConstants.POLLIN))
9377 class SequentialTaskQueue(SlotObject):
9379 __slots__ = ("max_jobs", "running_tasks") + \
9380 ("_dirty", "_scheduling", "_task_queue")
9382 def __init__(self, **kwargs):
9383 SlotObject.__init__(self, **kwargs)
9384 self._task_queue = deque()
9385 self.running_tasks = set()
9386 if self.max_jobs is None:
9390 def add(self, task):
9391 self._task_queue.append(task)
9394 def addFront(self, task):
9395 self._task_queue.appendleft(task)
9406 if self._scheduling:
9407 # Ignore any recursive schedule() calls triggered via
9408 # self._task_exit().
9411 self._scheduling = True
9413 task_queue = self._task_queue
9414 running_tasks = self.running_tasks
9415 max_jobs = self.max_jobs
9416 state_changed = False
9418 while task_queue and \
9419 (max_jobs is True or len(running_tasks) < max_jobs):
9420 task = task_queue.popleft()
9421 cancelled = getattr(task, "cancelled", None)
9423 running_tasks.add(task)
9424 task.addExitListener(self._task_exit)
9426 state_changed = True
9429 self._scheduling = False
9431 return state_changed
9433 def _task_exit(self, task):
9435 Since we can always rely on exit listeners being called, the set of
9436 running tasks is always pruned automatically and there is never any need
9437 to actively prune it.
9439 self.running_tasks.remove(task)
9440 if self._task_queue:
9444 self._task_queue.clear()
9445 running_tasks = self.running_tasks
9446 while running_tasks:
9447 task = running_tasks.pop()
9448 task.removeExitListener(self._task_exit)
9452 def __nonzero__(self):
9453 return bool(self._task_queue or self.running_tasks)
9456 return len(self._task_queue) + len(self.running_tasks)
9458 _can_poll_device = None
9460 def can_poll_device():
9462 Test if it's possible to use poll() on a device such as a pty. This
9463 is known to fail on Darwin.
9465 @returns: True if poll() on a device succeeds, False otherwise.
9468 global _can_poll_device
9469 if _can_poll_device is not None:
9470 return _can_poll_device
9472 if not hasattr(select, "poll"):
9473 _can_poll_device = False
9474 return _can_poll_device
9477 dev_null = open('/dev/null', 'rb')
9479 _can_poll_device = False
9480 return _can_poll_device
9483 p.register(dev_null.fileno(), PollConstants.POLLIN)
9485 invalid_request = False
9486 for f, event in p.poll():
9487 if event & PollConstants.POLLNVAL:
9488 invalid_request = True
9492 _can_poll_device = not invalid_request
9493 return _can_poll_device
9495 def create_poll_instance():
9497 Create an instance of select.poll, or an instance of
9498 PollSelectAdapter there is no poll() implementation or
9499 it is broken somehow.
9501 if can_poll_device():
9502 return select.poll()
9503 return PollSelectAdapter()
9505 getloadavg = getattr(os, "getloadavg", None)
9506 if getloadavg is None:
9509 Uses /proc/loadavg to emulate os.getloadavg().
9510 Raises OSError if the load average was unobtainable.
9513 loadavg_str = open('/proc/loadavg').readline()
9515 # getloadavg() is only supposed to raise OSError, so convert
9516 raise OSError('unknown')
9517 loadavg_split = loadavg_str.split()
9518 if len(loadavg_split) < 3:
9519 raise OSError('unknown')
9523 loadavg_floats.append(float(loadavg_split[i]))
9525 raise OSError('unknown')
9526 return tuple(loadavg_floats)
9528 class PollScheduler(object):
9530 class _sched_iface_class(SlotObject):
9531 __slots__ = ("register", "schedule", "unregister")
9535 self._max_load = None
9537 self._poll_event_queue = []
9538 self._poll_event_handlers = {}
9539 self._poll_event_handler_ids = {}
9540 # Increment id for each new handler.
9541 self._event_handler_id = 0
9542 self._poll_obj = create_poll_instance()
9543 self._scheduling = False
9545 def _schedule(self):
9547 Calls _schedule_tasks() and automatically returns early from
9548 any recursive calls to this method that the _schedule_tasks()
9549 call might trigger. This makes _schedule() safe to call from
9550 inside exit listeners.
9552 if self._scheduling:
9554 self._scheduling = True
9556 return self._schedule_tasks()
9558 self._scheduling = False
9560 def _running_job_count(self):
9563 def _can_add_job(self):
9564 max_jobs = self._max_jobs
9565 max_load = self._max_load
9567 if self._max_jobs is not True and \
9568 self._running_job_count() >= self._max_jobs:
9571 if max_load is not None and \
9572 (max_jobs is True or max_jobs > 1) and \
9573 self._running_job_count() >= 1:
9575 avg1, avg5, avg15 = getloadavg()
9579 if avg1 >= max_load:
9584 def _poll(self, timeout=None):
9586 All poll() calls pass through here. The poll events
9587 are added directly to self._poll_event_queue.
9588 In order to avoid endless blocking, this raises
9589 StopIteration if timeout is None and there are
9590 no file descriptors to poll.
9592 if not self._poll_event_handlers:
9594 if timeout is None and \
9595 not self._poll_event_handlers:
9596 raise StopIteration(
9597 "timeout is None and there are no poll() event handlers")
9599 # The following error is known to occur with Linux kernel versions
9602 # select.error: (4, 'Interrupted system call')
9604 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9605 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9606 # without any events.
9609 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9611 except select.error, e:
9612 writemsg_level("\n!!! select error: %s\n" % (e,),
9613 level=logging.ERROR, noiselevel=-1)
9615 if timeout is not None:
9618 def _next_poll_event(self, timeout=None):
9620 Since the _schedule_wait() loop is called by event
9621 handlers from _poll_loop(), maintain a central event
9622 queue for both of them to share events from a single
9623 poll() call. In order to avoid endless blocking, this
9624 raises StopIteration if timeout is None and there are
9625 no file descriptors to poll.
9627 if not self._poll_event_queue:
9629 return self._poll_event_queue.pop()
9631 def _poll_loop(self):
9633 event_handlers = self._poll_event_handlers
9634 event_handled = False
9637 while event_handlers:
9638 f, event = self._next_poll_event()
9639 handler, reg_id = event_handlers[f]
9641 event_handled = True
9642 except StopIteration:
9643 event_handled = True
9645 if not event_handled:
9646 raise AssertionError("tight loop")
9648 def _schedule_yield(self):
9650 Schedule for a short period of time chosen by the scheduler based
9651 on internal state. Synchronous tasks should call this periodically
9652 in order to allow the scheduler to service pending poll events. The
9653 scheduler will call poll() exactly once, without blocking, and any
9654 resulting poll events will be serviced.
9656 event_handlers = self._poll_event_handlers
9659 if not event_handlers:
9660 return bool(events_handled)
9662 if not self._poll_event_queue:
9666 while event_handlers and self._poll_event_queue:
9667 f, event = self._next_poll_event()
9668 handler, reg_id = event_handlers[f]
9671 except StopIteration:
9674 return bool(events_handled)
9676 def _register(self, f, eventmask, handler):
9679 @return: A unique registration id, for use in schedule() or
9682 if f in self._poll_event_handlers:
9683 raise AssertionError("fd %d is already registered" % f)
9684 self._event_handler_id += 1
9685 reg_id = self._event_handler_id
9686 self._poll_event_handler_ids[reg_id] = f
9687 self._poll_event_handlers[f] = (handler, reg_id)
9688 self._poll_obj.register(f, eventmask)
9691 def _unregister(self, reg_id):
9692 f = self._poll_event_handler_ids[reg_id]
9693 self._poll_obj.unregister(f)
9694 del self._poll_event_handlers[f]
9695 del self._poll_event_handler_ids[reg_id]
9697 def _schedule_wait(self, wait_ids):
9699 Schedule until wait_id is not longer registered
9702 @param wait_id: a task id to wait for
9704 event_handlers = self._poll_event_handlers
9705 handler_ids = self._poll_event_handler_ids
9706 event_handled = False
9708 if isinstance(wait_ids, int):
9709 wait_ids = frozenset([wait_ids])
9712 while wait_ids.intersection(handler_ids):
9713 f, event = self._next_poll_event()
9714 handler, reg_id = event_handlers[f]
9716 event_handled = True
9717 except StopIteration:
9718 event_handled = True
9720 return event_handled
9722 class QueueScheduler(PollScheduler):
9725 Add instances of SequentialTaskQueue and then call run(). The
9726 run() method returns when no tasks remain.
9729 def __init__(self, max_jobs=None, max_load=None):
9730 PollScheduler.__init__(self)
9732 if max_jobs is None:
9735 self._max_jobs = max_jobs
9736 self._max_load = max_load
9737 self.sched_iface = self._sched_iface_class(
9738 register=self._register,
9739 schedule=self._schedule_wait,
9740 unregister=self._unregister)
9743 self._schedule_listeners = []
9746 self._queues.append(q)
9748 def remove(self, q):
9749 self._queues.remove(q)
9753 while self._schedule():
9756 while self._running_job_count():
9759 def _schedule_tasks(self):
9762 @returns: True if there may be remaining tasks to schedule,
9765 while self._can_add_job():
9766 n = self._max_jobs - self._running_job_count()
9770 if not self._start_next_job(n):
9773 for q in self._queues:
9778 def _running_job_count(self):
9780 for q in self._queues:
9781 job_count += len(q.running_tasks)
9782 self._jobs = job_count
9785 def _start_next_job(self, n=1):
9787 for q in self._queues:
9788 initial_job_count = len(q.running_tasks)
9790 final_job_count = len(q.running_tasks)
9791 if final_job_count > initial_job_count:
9792 started_count += (final_job_count - initial_job_count)
9793 if started_count >= n:
9795 return started_count
9797 class TaskScheduler(object):
9800 A simple way to handle scheduling of AsynchrousTask instances. Simply
9801 add tasks and call run(). The run() method returns when no tasks remain.
9804 def __init__(self, max_jobs=None, max_load=None):
9805 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9806 self._scheduler = QueueScheduler(
9807 max_jobs=max_jobs, max_load=max_load)
9808 self.sched_iface = self._scheduler.sched_iface
9809 self.run = self._scheduler.run
9810 self._scheduler.add(self._queue)
9812 def add(self, task):
9813 self._queue.add(task)
9815 class JobStatusDisplay(object):
9817 _bound_properties = ("curval", "failed", "running")
9818 _jobs_column_width = 48
9820 # Don't update the display unless at least this much
9821 # time has passed, in units of seconds.
9822 _min_display_latency = 2
9824 _default_term_codes = {
9830 _termcap_name_map = {
9831 'carriage_return' : 'cr',
9836 def __init__(self, out=sys.stdout, quiet=False):
9837 object.__setattr__(self, "out", out)
9838 object.__setattr__(self, "quiet", quiet)
9839 object.__setattr__(self, "maxval", 0)
9840 object.__setattr__(self, "merges", 0)
9841 object.__setattr__(self, "_changed", False)
9842 object.__setattr__(self, "_displayed", False)
9843 object.__setattr__(self, "_last_display_time", 0)
9844 object.__setattr__(self, "width", 80)
9847 isatty = hasattr(out, "isatty") and out.isatty()
9848 object.__setattr__(self, "_isatty", isatty)
9849 if not isatty or not self._init_term():
9851 for k, capname in self._termcap_name_map.iteritems():
9852 term_codes[k] = self._default_term_codes[capname]
9853 object.__setattr__(self, "_term_codes", term_codes)
9854 encoding = sys.getdefaultencoding()
9855 for k, v in self._term_codes.items():
9856 if not isinstance(v, basestring):
9857 self._term_codes[k] = v.decode(encoding, 'replace')
9859 def _init_term(self):
9861 Initialize term control codes.
9863 @returns: True if term codes were successfully initialized,
9867 term_type = os.environ.get("TERM", "vt100")
9873 curses.setupterm(term_type, self.out.fileno())
9874 tigetstr = curses.tigetstr
9875 except curses.error:
9880 if tigetstr is None:
9884 for k, capname in self._termcap_name_map.iteritems():
9885 code = tigetstr(capname)
9887 code = self._default_term_codes[capname]
9888 term_codes[k] = code
9889 object.__setattr__(self, "_term_codes", term_codes)
9892 def _format_msg(self, msg):
9893 return ">>> %s" % msg
9897 self._term_codes['carriage_return'] + \
9898 self._term_codes['clr_eol'])
9900 self._displayed = False
9902 def _display(self, line):
9903 self.out.write(line)
9905 self._displayed = True
9907 def _update(self, msg):
9910 if not self._isatty:
9911 out.write(self._format_msg(msg) + self._term_codes['newline'])
9913 self._displayed = True
9919 self._display(self._format_msg(msg))
9921 def displayMessage(self, msg):
9923 was_displayed = self._displayed
9925 if self._isatty and self._displayed:
9928 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9930 self._displayed = False
9933 self._changed = True
9939 for name in self._bound_properties:
9940 object.__setattr__(self, name, 0)
9943 self.out.write(self._term_codes['newline'])
9945 self._displayed = False
9947 def __setattr__(self, name, value):
9948 old_value = getattr(self, name)
9949 if value == old_value:
9951 object.__setattr__(self, name, value)
9952 if name in self._bound_properties:
9953 self._property_change(name, old_value, value)
9955 def _property_change(self, name, old_value, new_value):
9956 self._changed = True
9959 def _load_avg_str(self):
9974 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9978 Display status on stdout, but only if something has
9979 changed since the last call.
9985 current_time = time.time()
9986 time_delta = current_time - self._last_display_time
9987 if self._displayed and \
9989 if not self._isatty:
9991 if time_delta < self._min_display_latency:
9994 self._last_display_time = current_time
9995 self._changed = False
9996 self._display_status()
9998 def _display_status(self):
9999 # Don't use len(self._completed_tasks) here since that also
10000 # can include uninstall tasks.
10001 curval_str = str(self.curval)
10002 maxval_str = str(self.maxval)
10003 running_str = str(self.running)
10004 failed_str = str(self.failed)
10005 load_avg_str = self._load_avg_str()
10007 color_output = StringIO()
10008 plain_output = StringIO()
10009 style_file = portage.output.ConsoleStyleFile(color_output)
10010 style_file.write_listener = plain_output
10011 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10012 style_writer.style_listener = style_file.new_styles
10013 f = formatter.AbstractFormatter(style_writer)
10015 number_style = "INFORM"
10016 f.add_literal_data("Jobs: ")
10017 f.push_style(number_style)
10018 f.add_literal_data(curval_str)
10020 f.add_literal_data(" of ")
10021 f.push_style(number_style)
10022 f.add_literal_data(maxval_str)
10024 f.add_literal_data(" complete")
10027 f.add_literal_data(", ")
10028 f.push_style(number_style)
10029 f.add_literal_data(running_str)
10031 f.add_literal_data(" running")
10034 f.add_literal_data(", ")
10035 f.push_style(number_style)
10036 f.add_literal_data(failed_str)
10038 f.add_literal_data(" failed")
10040 padding = self._jobs_column_width - len(plain_output.getvalue())
10042 f.add_literal_data(padding * " ")
10044 f.add_literal_data("Load avg: ")
10045 f.add_literal_data(load_avg_str)
10047 # Truncate to fit width, to avoid making the terminal scroll if the
10048 # line overflows (happens when the load average is large).
10049 plain_output = plain_output.getvalue()
10050 if self._isatty and len(plain_output) > self.width:
10051 # Use plain_output here since it's easier to truncate
10052 # properly than the color output which contains console
10054 self._update(plain_output[:self.width])
10056 self._update(color_output.getvalue())
10058 xtermTitle(" ".join(plain_output.split()))
10060 class Scheduler(PollScheduler):
10062 _opts_ignore_blockers = \
10063 frozenset(["--buildpkgonly",
10064 "--fetchonly", "--fetch-all-uri",
10065 "--nodeps", "--pretend"])
10067 _opts_no_background = \
10068 frozenset(["--pretend",
10069 "--fetchonly", "--fetch-all-uri"])
10071 _opts_no_restart = frozenset(["--buildpkgonly",
10072 "--fetchonly", "--fetch-all-uri", "--pretend"])
10074 _bad_resume_opts = set(["--ask", "--changelog",
10075 "--resume", "--skipfirst"])
10077 _fetch_log = "/var/log/emerge-fetch.log"
10079 class _iface_class(SlotObject):
10080 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10081 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10082 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10085 class _fetch_iface_class(SlotObject):
10086 __slots__ = ("log_file", "schedule")
10088 _task_queues_class = slot_dict_class(
10089 ("merge", "jobs", "fetch", "unpack"), prefix="")
10091 class _build_opts_class(SlotObject):
10092 __slots__ = ("buildpkg", "buildpkgonly",
10093 "fetch_all_uri", "fetchonly", "pretend")
10095 class _binpkg_opts_class(SlotObject):
10096 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10098 class _pkg_count_class(SlotObject):
10099 __slots__ = ("curval", "maxval")
10101 class _emerge_log_class(SlotObject):
10102 __slots__ = ("xterm_titles",)
10104 def log(self, *pargs, **kwargs):
10105 if not self.xterm_titles:
10106 # Avoid interference with the scheduler's status display.
10107 kwargs.pop("short_msg", None)
10108 emergelog(self.xterm_titles, *pargs, **kwargs)
10110 class _failed_pkg(SlotObject):
10111 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10113 class _ConfigPool(object):
10114 """Interface for a task to temporarily allocate a config
10115 instance from a pool. This allows a task to be constructed
10116 long before the config instance actually becomes needed, like
10117 when prefetchers are constructed for the whole merge list."""
10118 __slots__ = ("_root", "_allocate", "_deallocate")
10119 def __init__(self, root, allocate, deallocate):
10121 self._allocate = allocate
10122 self._deallocate = deallocate
10123 def allocate(self):
10124 return self._allocate(self._root)
10125 def deallocate(self, settings):
10126 self._deallocate(settings)
10128 class _unknown_internal_error(portage.exception.PortageException):
10130 Used internally to terminate scheduling. The specific reason for
10131 the failure should have been dumped to stderr.
10133 def __init__(self, value=""):
10134 portage.exception.PortageException.__init__(self, value)
10136 def __init__(self, settings, trees, mtimedb, myopts,
10137 spinner, mergelist, favorites, digraph):
10138 PollScheduler.__init__(self)
10139 self.settings = settings
10140 self.target_root = settings["ROOT"]
10142 self.myopts = myopts
10143 self._spinner = spinner
10144 self._mtimedb = mtimedb
10145 self._mergelist = mergelist
10146 self._favorites = favorites
10147 self._args_set = InternalPackageSet(favorites)
10148 self._build_opts = self._build_opts_class()
10149 for k in self._build_opts.__slots__:
10150 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10151 self._binpkg_opts = self._binpkg_opts_class()
10152 for k in self._binpkg_opts.__slots__:
10153 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10156 self._logger = self._emerge_log_class()
10157 self._task_queues = self._task_queues_class()
10158 for k in self._task_queues.allowed_keys:
10159 setattr(self._task_queues, k,
10160 SequentialTaskQueue())
10162 # Holds merges that will wait to be executed when no builds are
10163 # executing. This is useful for system packages since dependencies
10164 # on system packages are frequently unspecified.
10165 self._merge_wait_queue = []
10166 # Holds merges that have been transfered from the merge_wait_queue to
10167 # the actual merge queue. They are removed from this list upon
10168 # completion. Other packages can start building only when this list is
10170 self._merge_wait_scheduled = []
10172 # Holds system packages and their deep runtime dependencies. Before
10173 # being merged, these packages go to merge_wait_queue, to be merged
10174 # when no other packages are building.
10175 self._deep_system_deps = set()
10177 # Holds packages to merge which will satisfy currently unsatisfied
10178 # deep runtime dependencies of system packages. If this is not empty
10179 # then no parallel builds will be spawned until it is empty. This
10180 # minimizes the possibility that a build will fail due to the system
10181 # being in a fragile state. For example, see bug #259954.
10182 self._unsatisfied_system_deps = set()
10184 self._status_display = JobStatusDisplay()
10185 self._max_load = myopts.get("--load-average")
10186 max_jobs = myopts.get("--jobs")
10187 if max_jobs is None:
10189 self._set_max_jobs(max_jobs)
10191 # The root where the currently running
10192 # portage instance is installed.
10193 self._running_root = trees["/"]["root_config"]
10195 if settings.get("PORTAGE_DEBUG", "") == "1":
10197 self.pkgsettings = {}
10198 self._config_pool = {}
10199 self._blocker_db = {}
10201 self._config_pool[root] = []
10202 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10204 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10205 schedule=self._schedule_fetch)
10206 self._sched_iface = self._iface_class(
10207 dblinkEbuildPhase=self._dblink_ebuild_phase,
10208 dblinkDisplayMerge=self._dblink_display_merge,
10209 dblinkElog=self._dblink_elog,
10210 dblinkEmergeLog=self._dblink_emerge_log,
10211 fetch=fetch_iface, register=self._register,
10212 schedule=self._schedule_wait,
10213 scheduleSetup=self._schedule_setup,
10214 scheduleUnpack=self._schedule_unpack,
10215 scheduleYield=self._schedule_yield,
10216 unregister=self._unregister)
10218 self._prefetchers = weakref.WeakValueDictionary()
10219 self._pkg_queue = []
10220 self._completed_tasks = set()
10222 self._failed_pkgs = []
10223 self._failed_pkgs_all = []
10224 self._failed_pkgs_die_msgs = []
10225 self._post_mod_echo_msgs = []
10226 self._parallel_fetch = False
10227 merge_count = len([x for x in mergelist \
10228 if isinstance(x, Package) and x.operation == "merge"])
10229 self._pkg_count = self._pkg_count_class(
10230 curval=0, maxval=merge_count)
10231 self._status_display.maxval = self._pkg_count.maxval
10233 # The load average takes some time to respond when new
10234 # jobs are added, so we need to limit the rate of adding
10236 self._job_delay_max = 10
10237 self._job_delay_factor = 1.0
10238 self._job_delay_exp = 1.5
10239 self._previous_job_start_time = None
10241 self._set_digraph(digraph)
10243 # This is used to memoize the _choose_pkg() result when
10244 # no packages can be chosen until one of the existing
10246 self._choose_pkg_return_early = False
10248 features = self.settings.features
10249 if "parallel-fetch" in features and \
10250 not ("--pretend" in self.myopts or \
10251 "--fetch-all-uri" in self.myopts or \
10252 "--fetchonly" in self.myopts):
10253 if "distlocks" not in features:
10254 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10255 portage.writemsg(red("!!!")+" parallel-fetching " + \
10256 "requires the distlocks feature enabled"+"\n",
10258 portage.writemsg(red("!!!")+" you have it disabled, " + \
10259 "thus parallel-fetching is being disabled"+"\n",
10261 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10262 elif len(mergelist) > 1:
10263 self._parallel_fetch = True
10265 if self._parallel_fetch:
10266 # clear out existing fetch log if it exists
10268 open(self._fetch_log, 'w')
10269 except EnvironmentError:
10272 self._running_portage = None
10273 portage_match = self._running_root.trees["vartree"].dbapi.match(
10274 portage.const.PORTAGE_PACKAGE_ATOM)
10276 cpv = portage_match.pop()
10277 self._running_portage = self._pkg(cpv, "installed",
10278 self._running_root, installed=True)
10280 def _poll(self, timeout=None):
10282 PollScheduler._poll(self, timeout=timeout)
10284 def _set_max_jobs(self, max_jobs):
10285 self._max_jobs = max_jobs
10286 self._task_queues.jobs.max_jobs = max_jobs
10288 def _background_mode(self):
10290 Check if background mode is enabled and adjust states as necessary.
10293 @returns: True if background mode is enabled, False otherwise.
10295 background = (self._max_jobs is True or \
10296 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10297 not bool(self._opts_no_background.intersection(self.myopts))
10300 interactive_tasks = self._get_interactive_tasks()
10301 if interactive_tasks:
10303 writemsg_level(">>> Sending package output to stdio due " + \
10304 "to interactive package(s):\n",
10305 level=logging.INFO, noiselevel=-1)
10307 for pkg in interactive_tasks:
10308 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10309 if pkg.root != "/":
10310 pkg_str += " for " + pkg.root
10311 msg.append(pkg_str)
10313 writemsg_level("".join("%s\n" % (l,) for l in msg),
10314 level=logging.INFO, noiselevel=-1)
10315 if self._max_jobs is True or self._max_jobs > 1:
10316 self._set_max_jobs(1)
10317 writemsg_level(">>> Setting --jobs=1 due " + \
10318 "to the above interactive package(s)\n",
10319 level=logging.INFO, noiselevel=-1)
10321 self._status_display.quiet = \
10322 not background or \
10323 ("--quiet" in self.myopts and \
10324 "--verbose" not in self.myopts)
10326 self._logger.xterm_titles = \
10327 "notitles" not in self.settings.features and \
10328 self._status_display.quiet
10332 def _get_interactive_tasks(self):
10333 from portage import flatten
10334 from portage.dep import use_reduce, paren_reduce
10335 interactive_tasks = []
10336 for task in self._mergelist:
10337 if not (isinstance(task, Package) and \
10338 task.operation == "merge"):
10341 properties = flatten(use_reduce(paren_reduce(
10342 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10343 except portage.exception.InvalidDependString, e:
10344 show_invalid_depstring_notice(task,
10345 task.metadata["PROPERTIES"], str(e))
10346 raise self._unknown_internal_error()
10347 if "interactive" in properties:
10348 interactive_tasks.append(task)
10349 return interactive_tasks
10351 def _set_digraph(self, digraph):
10352 if "--nodeps" in self.myopts or \
10353 (self._max_jobs is not True and self._max_jobs < 2):
10355 self._digraph = None
10358 self._digraph = digraph
10359 self._find_system_deps()
10360 self._prune_digraph()
10361 self._prevent_builddir_collisions()
10363 def _find_system_deps(self):
10365 Find system packages and their deep runtime dependencies. Before being
10366 merged, these packages go to merge_wait_queue, to be merged when no
10367 other packages are building.
10369 deep_system_deps = self._deep_system_deps
10370 deep_system_deps.clear()
10371 deep_system_deps.update(
10372 _find_deep_system_runtime_deps(self._digraph))
10373 deep_system_deps.difference_update([pkg for pkg in \
10374 deep_system_deps if pkg.operation != "merge"])
10376 def _prune_digraph(self):
10378 Prune any root nodes that are irrelevant.
10381 graph = self._digraph
10382 completed_tasks = self._completed_tasks
10383 removed_nodes = set()
10385 for node in graph.root_nodes():
10386 if not isinstance(node, Package) or \
10387 (node.installed and node.operation == "nomerge") or \
10389 node in completed_tasks:
10390 removed_nodes.add(node)
10392 graph.difference_update(removed_nodes)
10393 if not removed_nodes:
10395 removed_nodes.clear()
10397 def _prevent_builddir_collisions(self):
10399 When building stages, sometimes the same exact cpv needs to be merged
10400 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10401 in the builddir. Currently, normal file locks would be inappropriate
10402 for this purpose since emerge holds all of it's build dir locks from
10406 for pkg in self._mergelist:
10407 if not isinstance(pkg, Package):
10408 # a satisfied blocker
10412 if pkg.cpv not in cpv_map:
10413 cpv_map[pkg.cpv] = [pkg]
10415 for earlier_pkg in cpv_map[pkg.cpv]:
10416 self._digraph.add(earlier_pkg, pkg,
10417 priority=DepPriority(buildtime=True))
10418 cpv_map[pkg.cpv].append(pkg)
10420 class _pkg_failure(portage.exception.PortageException):
10422 An instance of this class is raised by unmerge() when
10423 an uninstallation fails.
10426 def __init__(self, *pargs):
10427 portage.exception.PortageException.__init__(self, pargs)
10429 self.status = pargs[0]
10431 def _schedule_fetch(self, fetcher):
10433 Schedule a fetcher on the fetch queue, in order to
10434 serialize access to the fetch log.
10436 self._task_queues.fetch.addFront(fetcher)
10438 def _schedule_setup(self, setup_phase):
10440 Schedule a setup phase on the merge queue, in order to
10441 serialize unsandboxed access to the live filesystem.
10443 self._task_queues.merge.addFront(setup_phase)
10446 def _schedule_unpack(self, unpack_phase):
10448 Schedule an unpack phase on the unpack queue, in order
10449 to serialize $DISTDIR access for live ebuilds.
10451 self._task_queues.unpack.add(unpack_phase)
10453 def _find_blockers(self, new_pkg):
10455 Returns a callable which should be called only when
10456 the vdb lock has been acquired.
10458 def get_blockers():
10459 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10460 return get_blockers
10462 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10463 if self._opts_ignore_blockers.intersection(self.myopts):
10466 # Call gc.collect() here to avoid heap overflow that
10467 # triggers 'Cannot allocate memory' errors (reported
10468 # with python-2.5).
10472 blocker_db = self._blocker_db[new_pkg.root]
10474 blocker_dblinks = []
10475 for blocking_pkg in blocker_db.findInstalledBlockers(
10476 new_pkg, acquire_lock=acquire_lock):
10477 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10479 if new_pkg.cpv == blocking_pkg.cpv:
10481 blocker_dblinks.append(portage.dblink(
10482 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10483 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10484 vartree=self.trees[blocking_pkg.root]["vartree"]))
10488 return blocker_dblinks
10490 def _dblink_pkg(self, pkg_dblink):
10491 cpv = pkg_dblink.mycpv
10492 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10493 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10494 installed = type_name == "installed"
10495 return self._pkg(cpv, type_name, root_config, installed=installed)
10497 def _append_to_log_path(self, log_path, msg):
10498 f = open(log_path, 'a')
10504 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10506 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10509 background = self._background
10511 if background and log_path is not None:
10512 log_file = open(log_path, 'a')
10517 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10519 if log_file is not None:
10522 def _dblink_emerge_log(self, msg):
10523 self._logger.log(msg)
10525 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10526 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10527 background = self._background
10529 if log_path is None:
10530 if not (background and level < logging.WARN):
10531 portage.util.writemsg_level(msg,
10532 level=level, noiselevel=noiselevel)
10535 portage.util.writemsg_level(msg,
10536 level=level, noiselevel=noiselevel)
10537 self._append_to_log_path(log_path, msg)
10539 def _dblink_ebuild_phase(self,
10540 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10542 Using this callback for merge phases allows the scheduler
10543 to run while these phases execute asynchronously, and allows
10544 the scheduler control output handling.
10547 scheduler = self._sched_iface
10548 settings = pkg_dblink.settings
10549 pkg = self._dblink_pkg(pkg_dblink)
10550 background = self._background
10551 log_path = settings.get("PORTAGE_LOG_FILE")
10553 ebuild_phase = EbuildPhase(background=background,
10554 pkg=pkg, phase=phase, scheduler=scheduler,
10555 settings=settings, tree=pkg_dblink.treetype)
10556 ebuild_phase.start()
10557 ebuild_phase.wait()
10559 return ebuild_phase.returncode
10561 def _generate_digests(self):
10563 Generate digests if necessary for --digests or FEATURES=digest.
10564 In order to avoid interference, this must done before parallel
10568 if '--fetchonly' in self.myopts:
10571 digest = '--digest' in self.myopts
10573 for pkgsettings in self.pkgsettings.itervalues():
10574 if 'digest' in pkgsettings.features:
10581 for x in self._mergelist:
10582 if not isinstance(x, Package) or \
10583 x.type_name != 'ebuild' or \
10584 x.operation != 'merge':
10586 pkgsettings = self.pkgsettings[x.root]
10587 if '--digest' not in self.myopts and \
10588 'digest' not in pkgsettings.features:
10590 portdb = x.root_config.trees['porttree'].dbapi
10591 ebuild_path = portdb.findname(x.cpv)
10592 if not ebuild_path:
10594 "!!! Could not locate ebuild for '%s'.\n" \
10595 % x.cpv, level=logging.ERROR, noiselevel=-1)
10597 pkgsettings['O'] = os.path.dirname(ebuild_path)
10598 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10600 "!!! Unable to generate manifest for '%s'.\n" \
10601 % x.cpv, level=logging.ERROR, noiselevel=-1)
10606 def _check_manifests(self):
10607 # Verify all the manifests now so that the user is notified of failure
10608 # as soon as possible.
10609 if "strict" not in self.settings.features or \
10610 "--fetchonly" in self.myopts or \
10611 "--fetch-all-uri" in self.myopts:
10614 shown_verifying_msg = False
10615 quiet_settings = {}
10616 for myroot, pkgsettings in self.pkgsettings.iteritems():
10617 quiet_config = portage.config(clone=pkgsettings)
10618 quiet_config["PORTAGE_QUIET"] = "1"
10619 quiet_config.backup_changes("PORTAGE_QUIET")
10620 quiet_settings[myroot] = quiet_config
10623 for x in self._mergelist:
10624 if not isinstance(x, Package) or \
10625 x.type_name != "ebuild":
10628 if not shown_verifying_msg:
10629 shown_verifying_msg = True
10630 self._status_msg("Verifying ebuild manifests")
10632 root_config = x.root_config
10633 portdb = root_config.trees["porttree"].dbapi
10634 quiet_config = quiet_settings[root_config.root]
10635 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10636 if not portage.digestcheck([], quiet_config, strict=True):
10641 def _add_prefetchers(self):
10643 if not self._parallel_fetch:
10646 if self._parallel_fetch:
10647 self._status_msg("Starting parallel fetch")
10649 prefetchers = self._prefetchers
10650 getbinpkg = "--getbinpkg" in self.myopts
10652 # In order to avoid "waiting for lock" messages
10653 # at the beginning, which annoy users, never
10654 # spawn a prefetcher for the first package.
10655 for pkg in self._mergelist[1:]:
10656 prefetcher = self._create_prefetcher(pkg)
10657 if prefetcher is not None:
10658 self._task_queues.fetch.add(prefetcher)
10659 prefetchers[pkg] = prefetcher
10661 def _create_prefetcher(self, pkg):
10663 @return: a prefetcher, or None if not applicable
10667 if not isinstance(pkg, Package):
10670 elif pkg.type_name == "ebuild":
10672 prefetcher = EbuildFetcher(background=True,
10673 config_pool=self._ConfigPool(pkg.root,
10674 self._allocate_config, self._deallocate_config),
10675 fetchonly=1, logfile=self._fetch_log,
10676 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10678 elif pkg.type_name == "binary" and \
10679 "--getbinpkg" in self.myopts and \
10680 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10682 prefetcher = BinpkgPrefetcher(background=True,
10683 pkg=pkg, scheduler=self._sched_iface)
10687 def _is_restart_scheduled(self):
10689 Check if the merge list contains a replacement
10690 for the current running instance, that will result
10691 in restart after merge.
10693 @returns: True if a restart is scheduled, False otherwise.
10695 if self._opts_no_restart.intersection(self.myopts):
10698 mergelist = self._mergelist
10700 for i, pkg in enumerate(mergelist):
10701 if self._is_restart_necessary(pkg) and \
10702 i != len(mergelist) - 1:
10707 def _is_restart_necessary(self, pkg):
10709 @return: True if merging the given package
10710 requires restart, False otherwise.
10713 # Figure out if we need a restart.
10714 if pkg.root == self._running_root.root and \
10715 portage.match_from_list(
10716 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10717 if self._running_portage:
10718 return pkg.cpv != self._running_portage.cpv
10722 def _restart_if_necessary(self, pkg):
10724 Use execv() to restart emerge. This happens
10725 if portage upgrades itself and there are
10726 remaining packages in the list.
10729 if self._opts_no_restart.intersection(self.myopts):
10732 if not self._is_restart_necessary(pkg):
10735 if pkg == self._mergelist[-1]:
10738 self._main_loop_cleanup()
10740 logger = self._logger
10741 pkg_count = self._pkg_count
10742 mtimedb = self._mtimedb
10743 bad_resume_opts = self._bad_resume_opts
10745 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10746 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10748 logger.log(" *** RESTARTING " + \
10749 "emerge via exec() after change of " + \
10750 "portage version.")
10752 mtimedb["resume"]["mergelist"].remove(list(pkg))
10754 portage.run_exitfuncs()
10755 mynewargv = [sys.argv[0], "--resume"]
10756 resume_opts = self.myopts.copy()
10757 # For automatic resume, we need to prevent
10758 # any of bad_resume_opts from leaking in
10759 # via EMERGE_DEFAULT_OPTS.
10760 resume_opts["--ignore-default-opts"] = True
10761 for myopt, myarg in resume_opts.iteritems():
10762 if myopt not in bad_resume_opts:
10764 mynewargv.append(myopt)
10766 mynewargv.append(myopt +"="+ str(myarg))
10767 # priority only needs to be adjusted on the first run
10768 os.environ["PORTAGE_NICENESS"] = "0"
10769 os.execv(mynewargv[0], mynewargv)
10773 if "--resume" in self.myopts:
10775 portage.writemsg_stdout(
10776 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10777 self._logger.log(" *** Resuming merge...")
10779 self._save_resume_list()
10782 self._background = self._background_mode()
10783 except self._unknown_internal_error:
10786 for root in self.trees:
10787 root_config = self.trees[root]["root_config"]
10789 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10790 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10791 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10792 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10793 if not tmpdir or not os.path.isdir(tmpdir):
10794 msg = "The directory specified in your " + \
10795 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10796 "does not exist. Please create this " + \
10797 "directory or correct your PORTAGE_TMPDIR setting."
10798 msg = textwrap.wrap(msg, 70)
10799 out = portage.output.EOutput()
10804 if self._background:
10805 root_config.settings.unlock()
10806 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10807 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10808 root_config.settings.lock()
10810 self.pkgsettings[root] = portage.config(
10811 clone=root_config.settings)
10813 rval = self._generate_digests()
10814 if rval != os.EX_OK:
10817 rval = self._check_manifests()
10818 if rval != os.EX_OK:
10821 keep_going = "--keep-going" in self.myopts
10822 fetchonly = self._build_opts.fetchonly
10823 mtimedb = self._mtimedb
10824 failed_pkgs = self._failed_pkgs
10827 rval = self._merge()
10828 if rval == os.EX_OK or fetchonly or not keep_going:
10830 if "resume" not in mtimedb:
10832 mergelist = self._mtimedb["resume"].get("mergelist")
10836 if not failed_pkgs:
10839 for failed_pkg in failed_pkgs:
10840 mergelist.remove(list(failed_pkg.pkg))
10842 self._failed_pkgs_all.extend(failed_pkgs)
10848 if not self._calc_resume_list():
10851 clear_caches(self.trees)
10852 if not self._mergelist:
10855 self._save_resume_list()
10856 self._pkg_count.curval = 0
10857 self._pkg_count.maxval = len([x for x in self._mergelist \
10858 if isinstance(x, Package) and x.operation == "merge"])
10859 self._status_display.maxval = self._pkg_count.maxval
10861 self._logger.log(" *** Finished. Cleaning up...")
10864 self._failed_pkgs_all.extend(failed_pkgs)
10867 background = self._background
10868 failure_log_shown = False
10869 if background and len(self._failed_pkgs_all) == 1:
10870 # If only one package failed then just show it's
10871 # whole log for easy viewing.
10872 failed_pkg = self._failed_pkgs_all[-1]
10873 build_dir = failed_pkg.build_dir
10876 log_paths = [failed_pkg.build_log]
10878 log_path = self._locate_failure_log(failed_pkg)
10879 if log_path is not None:
10881 log_file = open(log_path)
10885 if log_file is not None:
10887 for line in log_file:
10888 writemsg_level(line, noiselevel=-1)
10891 failure_log_shown = True
10893 # Dump mod_echo output now since it tends to flood the terminal.
10894 # This allows us to avoid having more important output, generated
10895 # later, from being swept away by the mod_echo output.
10896 mod_echo_output = _flush_elog_mod_echo()
10898 if background and not failure_log_shown and \
10899 self._failed_pkgs_all and \
10900 self._failed_pkgs_die_msgs and \
10901 not mod_echo_output:
10903 printer = portage.output.EOutput()
10904 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10906 if mysettings["ROOT"] != "/":
10907 root_msg = " merged to %s" % mysettings["ROOT"]
10909 printer.einfo("Error messages for package %s%s:" % \
10910 (colorize("INFORM", key), root_msg))
10912 for phase in portage.const.EBUILD_PHASES:
10913 if phase not in logentries:
10915 for msgtype, msgcontent in logentries[phase]:
10916 if isinstance(msgcontent, basestring):
10917 msgcontent = [msgcontent]
10918 for line in msgcontent:
10919 printer.eerror(line.strip("\n"))
10921 if self._post_mod_echo_msgs:
10922 for msg in self._post_mod_echo_msgs:
10925 if len(self._failed_pkgs_all) > 1 or \
10926 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10927 if len(self._failed_pkgs_all) > 1:
10928 msg = "The following %d packages have " % \
10929 len(self._failed_pkgs_all) + \
10930 "failed to build or install:"
10932 msg = "The following package has " + \
10933 "failed to build or install:"
10934 prefix = bad(" * ")
10935 writemsg(prefix + "\n", noiselevel=-1)
10936 from textwrap import wrap
10937 for line in wrap(msg, 72):
10938 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10939 writemsg(prefix + "\n", noiselevel=-1)
10940 for failed_pkg in self._failed_pkgs_all:
10941 writemsg("%s\t%s\n" % (prefix,
10942 colorize("INFORM", str(failed_pkg.pkg))),
10944 writemsg(prefix + "\n", noiselevel=-1)
10948 def _elog_listener(self, mysettings, key, logentries, fulltext):
10949 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10951 self._failed_pkgs_die_msgs.append(
10952 (mysettings, key, errors))
10954 def _locate_failure_log(self, failed_pkg):
10956 build_dir = failed_pkg.build_dir
10959 log_paths = [failed_pkg.build_log]
10961 for log_path in log_paths:
10966 log_size = os.stat(log_path).st_size
10977 def _add_packages(self):
10978 pkg_queue = self._pkg_queue
10979 for pkg in self._mergelist:
10980 if isinstance(pkg, Package):
10981 pkg_queue.append(pkg)
10982 elif isinstance(pkg, Blocker):
10985 def _system_merge_started(self, merge):
10987 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10989 graph = self._digraph
10992 pkg = merge.merge.pkg
10994 # Skip this if $ROOT != / since it shouldn't matter if there
10995 # are unsatisfied system runtime deps in this case.
10996 if pkg.root != '/':
10999 completed_tasks = self._completed_tasks
11000 unsatisfied = self._unsatisfied_system_deps
11002 def ignore_non_runtime_or_satisfied(priority):
11004 Ignore non-runtime and satisfied runtime priorities.
11006 if isinstance(priority, DepPriority) and \
11007 not priority.satisfied and \
11008 (priority.runtime or priority.runtime_post):
11012 # When checking for unsatisfied runtime deps, only check
11013 # direct deps since indirect deps are checked when the
11014 # corresponding parent is merged.
11015 for child in graph.child_nodes(pkg,
11016 ignore_priority=ignore_non_runtime_or_satisfied):
11017 if not isinstance(child, Package) or \
11018 child.operation == 'uninstall':
11022 if child.operation == 'merge' and \
11023 child not in completed_tasks:
11024 unsatisfied.add(child)
11026 def _merge_wait_exit_handler(self, task):
11027 self._merge_wait_scheduled.remove(task)
11028 self._merge_exit(task)
11030 def _merge_exit(self, merge):
11031 self._do_merge_exit(merge)
11032 self._deallocate_config(merge.merge.settings)
11033 if merge.returncode == os.EX_OK and \
11034 not merge.merge.pkg.installed:
11035 self._status_display.curval += 1
11036 self._status_display.merges = len(self._task_queues.merge)
11039 def _do_merge_exit(self, merge):
11040 pkg = merge.merge.pkg
11041 if merge.returncode != os.EX_OK:
11042 settings = merge.merge.settings
11043 build_dir = settings.get("PORTAGE_BUILDDIR")
11044 build_log = settings.get("PORTAGE_LOG_FILE")
11046 self._failed_pkgs.append(self._failed_pkg(
11047 build_dir=build_dir, build_log=build_log,
11049 returncode=merge.returncode))
11050 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11052 self._status_display.failed = len(self._failed_pkgs)
11055 self._task_complete(pkg)
11056 pkg_to_replace = merge.merge.pkg_to_replace
11057 if pkg_to_replace is not None:
11058 # When a package is replaced, mark it's uninstall
11059 # task complete (if any).
11060 uninst_hash_key = \
11061 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11062 self._task_complete(uninst_hash_key)
11067 self._restart_if_necessary(pkg)
11069 # Call mtimedb.commit() after each merge so that
11070 # --resume still works after being interrupted
11071 # by reboot, sigkill or similar.
11072 mtimedb = self._mtimedb
11073 mtimedb["resume"]["mergelist"].remove(list(pkg))
11074 if not mtimedb["resume"]["mergelist"]:
11075 del mtimedb["resume"]
11078 def _build_exit(self, build):
11079 if build.returncode == os.EX_OK:
11081 merge = PackageMerge(merge=build)
11082 if not build.build_opts.buildpkgonly and \
11083 build.pkg in self._deep_system_deps:
11084 # Since dependencies on system packages are frequently
11085 # unspecified, merge them only when no builds are executing.
11086 self._merge_wait_queue.append(merge)
11087 merge.addStartListener(self._system_merge_started)
11089 merge.addExitListener(self._merge_exit)
11090 self._task_queues.merge.add(merge)
11091 self._status_display.merges = len(self._task_queues.merge)
11093 settings = build.settings
11094 build_dir = settings.get("PORTAGE_BUILDDIR")
11095 build_log = settings.get("PORTAGE_LOG_FILE")
11097 self._failed_pkgs.append(self._failed_pkg(
11098 build_dir=build_dir, build_log=build_log,
11100 returncode=build.returncode))
11101 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11103 self._status_display.failed = len(self._failed_pkgs)
11104 self._deallocate_config(build.settings)
11106 self._status_display.running = self._jobs
11109 def _extract_exit(self, build):
11110 self._build_exit(build)
11112 def _task_complete(self, pkg):
11113 self._completed_tasks.add(pkg)
11114 self._unsatisfied_system_deps.discard(pkg)
11115 self._choose_pkg_return_early = False
11119 self._add_prefetchers()
11120 self._add_packages()
11121 pkg_queue = self._pkg_queue
11122 failed_pkgs = self._failed_pkgs
11123 portage.locks._quiet = self._background
11124 portage.elog._emerge_elog_listener = self._elog_listener
11130 self._main_loop_cleanup()
11131 portage.locks._quiet = False
11132 portage.elog._emerge_elog_listener = None
11134 rval = failed_pkgs[-1].returncode
11138 def _main_loop_cleanup(self):
11139 del self._pkg_queue[:]
11140 self._completed_tasks.clear()
11141 self._deep_system_deps.clear()
11142 self._unsatisfied_system_deps.clear()
11143 self._choose_pkg_return_early = False
11144 self._status_display.reset()
11145 self._digraph = None
11146 self._task_queues.fetch.clear()
11148 def _choose_pkg(self):
11150 Choose a task that has all it's dependencies satisfied.
11153 if self._choose_pkg_return_early:
11156 if self._digraph is None:
11157 if (self._jobs or self._task_queues.merge) and \
11158 not ("--nodeps" in self.myopts and \
11159 (self._max_jobs is True or self._max_jobs > 1)):
11160 self._choose_pkg_return_early = True
11162 return self._pkg_queue.pop(0)
11164 if not (self._jobs or self._task_queues.merge):
11165 return self._pkg_queue.pop(0)
11167 self._prune_digraph()
11170 later = set(self._pkg_queue)
11171 for pkg in self._pkg_queue:
11173 if not self._dependent_on_scheduled_merges(pkg, later):
11177 if chosen_pkg is not None:
11178 self._pkg_queue.remove(chosen_pkg)
11180 if chosen_pkg is None:
11181 # There's no point in searching for a package to
11182 # choose until at least one of the existing jobs
11184 self._choose_pkg_return_early = True
11188 def _dependent_on_scheduled_merges(self, pkg, later):
11190 Traverse the subgraph of the given packages deep dependencies
11191 to see if it contains any scheduled merges.
11192 @param pkg: a package to check dependencies for
11194 @param later: packages for which dependence should be ignored
11195 since they will be merged later than pkg anyway and therefore
11196 delaying the merge of pkg will not result in a more optimal
11200 @returns: True if the package is dependent, False otherwise.
11203 graph = self._digraph
11204 completed_tasks = self._completed_tasks
11207 traversed_nodes = set([pkg])
11208 direct_deps = graph.child_nodes(pkg)
11209 node_stack = direct_deps
11210 direct_deps = frozenset(direct_deps)
11212 node = node_stack.pop()
11213 if node in traversed_nodes:
11215 traversed_nodes.add(node)
11216 if not ((node.installed and node.operation == "nomerge") or \
11217 (node.operation == "uninstall" and \
11218 node not in direct_deps) or \
11219 node in completed_tasks or \
11223 node_stack.extend(graph.child_nodes(node))
11227 def _allocate_config(self, root):
11229 Allocate a unique config instance for a task in order
11230 to prevent interference between parallel tasks.
11232 if self._config_pool[root]:
11233 temp_settings = self._config_pool[root].pop()
11235 temp_settings = portage.config(clone=self.pkgsettings[root])
11236 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11237 # performance reasons, call it here to make sure all settings from the
11238 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11239 temp_settings.reload()
11240 temp_settings.reset()
11241 return temp_settings
11243 def _deallocate_config(self, settings):
11244 self._config_pool[settings["ROOT"]].append(settings)
11246 def _main_loop(self):
11248 # Only allow 1 job max if a restart is scheduled
11249 # due to portage update.
11250 if self._is_restart_scheduled() or \
11251 self._opts_no_background.intersection(self.myopts):
11252 self._set_max_jobs(1)
11254 merge_queue = self._task_queues.merge
11256 while self._schedule():
11257 if self._poll_event_handlers:
11262 if not (self._jobs or merge_queue):
11264 if self._poll_event_handlers:
11267 def _keep_scheduling(self):
11268 return bool(self._pkg_queue and \
11269 not (self._failed_pkgs and not self._build_opts.fetchonly))
11271 def _schedule_tasks(self):
11273 # When the number of jobs drops to zero, process all waiting merges.
11274 if not self._jobs and self._merge_wait_queue:
11275 for task in self._merge_wait_queue:
11276 task.addExitListener(self._merge_wait_exit_handler)
11277 self._task_queues.merge.add(task)
11278 self._status_display.merges = len(self._task_queues.merge)
11279 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11280 del self._merge_wait_queue[:]
11282 self._schedule_tasks_imp()
11283 self._status_display.display()
11286 for q in self._task_queues.values():
11290 # Cancel prefetchers if they're the only reason
11291 # the main poll loop is still running.
11292 if self._failed_pkgs and not self._build_opts.fetchonly and \
11293 not (self._jobs or self._task_queues.merge) and \
11294 self._task_queues.fetch:
11295 self._task_queues.fetch.clear()
11299 self._schedule_tasks_imp()
11300 self._status_display.display()
11302 return self._keep_scheduling()
11304 def _job_delay(self):
11307 @returns: True if job scheduling should be delayed, False otherwise.
11310 if self._jobs and self._max_load is not None:
11312 current_time = time.time()
11314 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11315 if delay > self._job_delay_max:
11316 delay = self._job_delay_max
11317 if (current_time - self._previous_job_start_time) < delay:
11322 def _schedule_tasks_imp(self):
11325 @returns: True if state changed, False otherwise.
11332 if not self._keep_scheduling():
11333 return bool(state_change)
11335 if self._choose_pkg_return_early or \
11336 self._merge_wait_scheduled or \
11337 (self._jobs and self._unsatisfied_system_deps) or \
11338 not self._can_add_job() or \
11340 return bool(state_change)
11342 pkg = self._choose_pkg()
11344 return bool(state_change)
11348 if not pkg.installed:
11349 self._pkg_count.curval += 1
11351 task = self._task(pkg)
11354 merge = PackageMerge(merge=task)
11355 merge.addExitListener(self._merge_exit)
11356 self._task_queues.merge.add(merge)
11360 self._previous_job_start_time = time.time()
11361 self._status_display.running = self._jobs
11362 task.addExitListener(self._extract_exit)
11363 self._task_queues.jobs.add(task)
11367 self._previous_job_start_time = time.time()
11368 self._status_display.running = self._jobs
11369 task.addExitListener(self._build_exit)
11370 self._task_queues.jobs.add(task)
11372 return bool(state_change)
11374 def _task(self, pkg):
11376 pkg_to_replace = None
11377 if pkg.operation != "uninstall":
11378 vardb = pkg.root_config.trees["vartree"].dbapi
11379 previous_cpv = vardb.match(pkg.slot_atom)
11381 previous_cpv = previous_cpv.pop()
11382 pkg_to_replace = self._pkg(previous_cpv,
11383 "installed", pkg.root_config, installed=True)
11385 task = MergeListItem(args_set=self._args_set,
11386 background=self._background, binpkg_opts=self._binpkg_opts,
11387 build_opts=self._build_opts,
11388 config_pool=self._ConfigPool(pkg.root,
11389 self._allocate_config, self._deallocate_config),
11390 emerge_opts=self.myopts,
11391 find_blockers=self._find_blockers(pkg), logger=self._logger,
11392 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11393 pkg_to_replace=pkg_to_replace,
11394 prefetcher=self._prefetchers.get(pkg),
11395 scheduler=self._sched_iface,
11396 settings=self._allocate_config(pkg.root),
11397 statusMessage=self._status_msg,
11398 world_atom=self._world_atom)
11402 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11403 pkg = failed_pkg.pkg
11404 msg = "%s to %s %s" % \
11405 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11406 if pkg.root != "/":
11407 msg += " %s %s" % (preposition, pkg.root)
11409 log_path = self._locate_failure_log(failed_pkg)
11410 if log_path is not None:
11411 msg += ", Log file:"
11412 self._status_msg(msg)
11414 if log_path is not None:
11415 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11417 def _status_msg(self, msg):
11419 Display a brief status message (no newlines) in the status display.
11420 This is called by tasks to provide feedback to the user. This
11421 delegates the resposibility of generating \r and \n control characters,
11422 to guarantee that lines are created or erased when necessary and
11426 @param msg: a brief status message (no newlines allowed)
11428 if not self._background:
11429 writemsg_level("\n")
11430 self._status_display.displayMessage(msg)
11432 def _save_resume_list(self):
11434 Do this before verifying the ebuild Manifests since it might
11435 be possible for the user to use --resume --skipfirst get past
11436 a non-essential package with a broken digest.
11438 mtimedb = self._mtimedb
11439 mtimedb["resume"]["mergelist"] = [list(x) \
11440 for x in self._mergelist \
11441 if isinstance(x, Package) and x.operation == "merge"]
11445 def _calc_resume_list(self):
11447 Use the current resume list to calculate a new one,
11448 dropping any packages with unsatisfied deps.
11450 @returns: True if successful, False otherwise.
11452 print colorize("GOOD", "*** Resuming merge...")
11454 if self._show_list():
11455 if "--tree" in self.myopts:
11456 portage.writemsg_stdout("\n" + \
11457 darkgreen("These are the packages that " + \
11458 "would be merged, in reverse order:\n\n"))
11461 portage.writemsg_stdout("\n" + \
11462 darkgreen("These are the packages that " + \
11463 "would be merged, in order:\n\n"))
11465 show_spinner = "--quiet" not in self.myopts and \
11466 "--nodeps" not in self.myopts
11469 print "Calculating dependencies ",
11471 myparams = create_depgraph_params(self.myopts, None)
11475 success, mydepgraph, dropped_tasks = resume_depgraph(
11476 self.settings, self.trees, self._mtimedb, self.myopts,
11477 myparams, self._spinner)
11478 except depgraph.UnsatisfiedResumeDep, exc:
11479 # rename variable to avoid python-3.0 error:
11480 # SyntaxError: can not delete variable 'e' referenced in nested
11483 mydepgraph = e.depgraph
11484 dropped_tasks = set()
11487 print "\b\b... done!"
11490 def unsatisfied_resume_dep_msg():
11491 mydepgraph.display_problems()
11492 out = portage.output.EOutput()
11493 out.eerror("One or more packages are either masked or " + \
11494 "have missing dependencies:")
11497 show_parents = set()
11498 for dep in e.value:
11499 if dep.parent in show_parents:
11501 show_parents.add(dep.parent)
11502 if dep.atom is None:
11503 out.eerror(indent + "Masked package:")
11504 out.eerror(2 * indent + str(dep.parent))
11507 out.eerror(indent + str(dep.atom) + " pulled in by:")
11508 out.eerror(2 * indent + str(dep.parent))
11510 msg = "The resume list contains packages " + \
11511 "that are either masked or have " + \
11512 "unsatisfied dependencies. " + \
11513 "Please restart/continue " + \
11514 "the operation manually, or use --skipfirst " + \
11515 "to skip the first package in the list and " + \
11516 "any other packages that may be " + \
11517 "masked or have missing dependencies."
11518 for line in textwrap.wrap(msg, 72):
11520 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11523 if success and self._show_list():
11524 mylist = mydepgraph.altlist()
11526 if "--tree" in self.myopts:
11528 mydepgraph.display(mylist, favorites=self._favorites)
11531 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11533 mydepgraph.display_problems()
11535 mylist = mydepgraph.altlist()
11536 mydepgraph.break_refs(mylist)
11537 mydepgraph.break_refs(dropped_tasks)
11538 self._mergelist = mylist
11539 self._set_digraph(mydepgraph.schedulerGraph())
11542 for task in dropped_tasks:
11543 if not (isinstance(task, Package) and task.operation == "merge"):
11546 msg = "emerge --keep-going:" + \
11548 if pkg.root != "/":
11549 msg += " for %s" % (pkg.root,)
11550 msg += " dropped due to unsatisfied dependency."
11551 for line in textwrap.wrap(msg, msg_width):
11552 eerror(line, phase="other", key=pkg.cpv)
11553 settings = self.pkgsettings[pkg.root]
11554 # Ensure that log collection from $T is disabled inside
11555 # elog_process(), since any logs that might exist are
11557 settings.pop("T", None)
11558 portage.elog.elog_process(pkg.cpv, settings)
11559 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11563 def _show_list(self):
11564 myopts = self.myopts
11565 if "--quiet" not in myopts and \
11566 ("--ask" in myopts or "--tree" in myopts or \
11567 "--verbose" in myopts):
11571 def _world_atom(self, pkg):
11573 Add the package to the world file, but only if
11574 it's supposed to be added. Otherwise, do nothing.
11577 if set(("--buildpkgonly", "--fetchonly",
11579 "--oneshot", "--onlydeps",
11580 "--pretend")).intersection(self.myopts):
11583 if pkg.root != self.target_root:
11586 args_set = self._args_set
11587 if not args_set.findAtomForPackage(pkg):
11590 logger = self._logger
11591 pkg_count = self._pkg_count
11592 root_config = pkg.root_config
11593 world_set = root_config.sets["world"]
11594 world_locked = False
11595 if hasattr(world_set, "lock"):
11597 world_locked = True
11600 if hasattr(world_set, "load"):
11601 world_set.load() # maybe it's changed on disk
11603 atom = create_world_atom(pkg, args_set, root_config)
11605 if hasattr(world_set, "add"):
11606 self._status_msg(('Recording %s in "world" ' + \
11607 'favorites file...') % atom)
11608 logger.log(" === (%s of %s) Updating world file (%s)" % \
11609 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11610 world_set.add(atom)
11612 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11613 (atom,), level=logging.WARN, noiselevel=-1)
11618 def _pkg(self, cpv, type_name, root_config, installed=False):
11620 Get a package instance from the cache, or create a new
11621 one if necessary. Raises KeyError from aux_get if it
11622 failures for some reason (package does not exist or is
11625 operation = "merge"
11627 operation = "nomerge"
11629 if self._digraph is not None:
11630 # Reuse existing instance when available.
11631 pkg = self._digraph.get(
11632 (type_name, root_config.root, cpv, operation))
11633 if pkg is not None:
11636 tree_type = depgraph.pkg_tree_map[type_name]
11637 db = root_config.trees[tree_type].dbapi
11638 db_keys = list(self.trees[root_config.root][
11639 tree_type].dbapi._aux_cache_keys)
11640 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11641 pkg = Package(cpv=cpv, metadata=metadata,
11642 root_config=root_config, installed=installed)
11643 if type_name == "ebuild":
11644 settings = self.pkgsettings[root_config.root]
11645 settings.setcpv(pkg)
11646 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11647 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11651 class MetadataRegen(PollScheduler):
11653 def __init__(self, portdb, cp_iter=None, consumer=None,
11654 max_jobs=None, max_load=None):
11655 PollScheduler.__init__(self)
11656 self._portdb = portdb
11657 self._global_cleanse = False
11658 if cp_iter is None:
11659 cp_iter = self._iter_every_cp()
11660 # We can globally cleanse stale cache only if we
11661 # iterate over every single cp.
11662 self._global_cleanse = True
11663 self._cp_iter = cp_iter
11664 self._consumer = consumer
11666 if max_jobs is None:
11669 self._max_jobs = max_jobs
11670 self._max_load = max_load
11671 self._sched_iface = self._sched_iface_class(
11672 register=self._register,
11673 schedule=self._schedule_wait,
11674 unregister=self._unregister)
11676 self._valid_pkgs = set()
11677 self._cp_set = set()
11678 self._process_iter = self._iter_metadata_processes()
11679 self.returncode = os.EX_OK
11680 self._error_count = 0
11682 def _iter_every_cp(self):
11683 every_cp = self._portdb.cp_all()
11684 every_cp.sort(reverse=True)
11687 yield every_cp.pop()
11691 def _iter_metadata_processes(self):
11692 portdb = self._portdb
11693 valid_pkgs = self._valid_pkgs
11694 cp_set = self._cp_set
11695 consumer = self._consumer
11697 for cp in self._cp_iter:
11699 portage.writemsg_stdout("Processing %s\n" % cp)
11700 cpv_list = portdb.cp_list(cp)
11701 for cpv in cpv_list:
11702 valid_pkgs.add(cpv)
11703 ebuild_path, repo_path = portdb.findname2(cpv)
11704 metadata, st, emtime = portdb._pull_valid_cache(
11705 cpv, ebuild_path, repo_path)
11706 if metadata is not None:
11707 if consumer is not None:
11708 consumer(cpv, ebuild_path,
11709 repo_path, metadata)
11712 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11713 ebuild_mtime=emtime,
11714 metadata_callback=portdb._metadata_callback,
11715 portdb=portdb, repo_path=repo_path,
11716 settings=portdb.doebuild_settings)
11720 portdb = self._portdb
11721 from portage.cache.cache_errors import CacheError
11724 while self._schedule():
11730 if self._global_cleanse:
11731 for mytree in portdb.porttrees:
11733 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11734 except CacheError, e:
11735 portage.writemsg("Error listing cache entries for " + \
11736 "'%s': %s, continuing...\n" % (mytree, e),
11742 cp_set = self._cp_set
11743 cpv_getkey = portage.cpv_getkey
11744 for mytree in portdb.porttrees:
11746 dead_nodes[mytree] = set(cpv for cpv in \
11747 portdb.auxdb[mytree].iterkeys() \
11748 if cpv_getkey(cpv) in cp_set)
11749 except CacheError, e:
11750 portage.writemsg("Error listing cache entries for " + \
11751 "'%s': %s, continuing...\n" % (mytree, e),
11758 for y in self._valid_pkgs:
11759 for mytree in portdb.porttrees:
11760 if portdb.findname2(y, mytree=mytree)[0]:
11761 dead_nodes[mytree].discard(y)
11763 for mytree, nodes in dead_nodes.iteritems():
11764 auxdb = portdb.auxdb[mytree]
11768 except (KeyError, CacheError):
11771 def _schedule_tasks(self):
11774 @returns: True if there may be remaining tasks to schedule,
11777 while self._can_add_job():
11779 metadata_process = self._process_iter.next()
11780 except StopIteration:
11784 metadata_process.scheduler = self._sched_iface
11785 metadata_process.addExitListener(self._metadata_exit)
11786 metadata_process.start()
11789 def _metadata_exit(self, metadata_process):
11791 if metadata_process.returncode != os.EX_OK:
11792 self.returncode = 1
11793 self._error_count += 1
11794 self._valid_pkgs.discard(metadata_process.cpv)
11795 portage.writemsg("Error processing %s, continuing...\n" % \
11796 (metadata_process.cpv,), noiselevel=-1)
11798 if self._consumer is not None:
11799 # On failure, still notify the consumer (in this case the metadata
11800 # argument is None).
11801 self._consumer(metadata_process.cpv,
11802 metadata_process.ebuild_path,
11803 metadata_process.repo_path,
11804 metadata_process.metadata)
11808 class UninstallFailure(portage.exception.PortageException):
11810 An instance of this class is raised by unmerge() when
11811 an uninstallation fails.
11814 def __init__(self, *pargs):
11815 portage.exception.PortageException.__init__(self, pargs)
11817 self.status = pargs[0]
11819 def unmerge(root_config, myopts, unmerge_action,
11820 unmerge_files, ldpath_mtimes, autoclean=0,
11821 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11822 scheduler=None, writemsg_level=portage.util.writemsg_level):
11824 quiet = "--quiet" in myopts
11825 settings = root_config.settings
11826 sets = root_config.sets
11827 vartree = root_config.trees["vartree"]
11828 candidate_catpkgs=[]
11830 xterm_titles = "notitles" not in settings.features
11831 out = portage.output.EOutput()
11833 db_keys = list(vartree.dbapi._aux_cache_keys)
11836 pkg = pkg_cache.get(cpv)
11838 pkg = Package(cpv=cpv, installed=True,
11839 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11840 root_config=root_config,
11841 type_name="installed")
11842 pkg_cache[cpv] = pkg
11845 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11847 # At least the parent needs to exist for the lock file.
11848 portage.util.ensure_dirs(vdb_path)
11849 except portage.exception.PortageException:
11853 if os.access(vdb_path, os.W_OK):
11854 vdb_lock = portage.locks.lockdir(vdb_path)
11855 realsyslist = sets["system"].getAtoms()
11857 for x in realsyslist:
11858 mycp = portage.dep_getkey(x)
11859 if mycp in settings.getvirtuals():
11861 for provider in settings.getvirtuals()[mycp]:
11862 if vartree.dbapi.match(provider):
11863 providers.append(provider)
11864 if len(providers) == 1:
11865 syslist.extend(providers)
11867 syslist.append(mycp)
11869 mysettings = portage.config(clone=settings)
11871 if not unmerge_files:
11872 if unmerge_action == "unmerge":
11874 print bold("emerge unmerge") + " can only be used with specific package names"
11880 localtree = vartree
11881 # process all arguments and add all
11882 # valid db entries to candidate_catpkgs
11884 if not unmerge_files:
11885 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11887 #we've got command-line arguments
11888 if not unmerge_files:
11889 print "\nNo packages to unmerge have been provided.\n"
11891 for x in unmerge_files:
11892 arg_parts = x.split('/')
11893 if x[0] not in [".","/"] and \
11894 arg_parts[-1][-7:] != ".ebuild":
11895 #possible cat/pkg or dep; treat as such
11896 candidate_catpkgs.append(x)
11897 elif unmerge_action in ["prune","clean"]:
11898 print "\n!!! Prune and clean do not accept individual" + \
11899 " ebuilds as arguments;\n skipping.\n"
11902 # it appears that the user is specifying an installed
11903 # ebuild and we're in "unmerge" mode, so it's ok.
11904 if not os.path.exists(x):
11905 print "\n!!! The path '"+x+"' doesn't exist.\n"
11908 absx = os.path.abspath(x)
11909 sp_absx = absx.split("/")
11910 if sp_absx[-1][-7:] == ".ebuild":
11912 absx = "/".join(sp_absx)
11914 sp_absx_len = len(sp_absx)
11916 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11917 vdb_len = len(vdb_path)
11919 sp_vdb = vdb_path.split("/")
11920 sp_vdb_len = len(sp_vdb)
11922 if not os.path.exists(absx+"/CONTENTS"):
11923 print "!!! Not a valid db dir: "+str(absx)
11926 if sp_absx_len <= sp_vdb_len:
11927 # The Path is shorter... so it can't be inside the vdb.
11930 print "\n!!!",x,"cannot be inside "+ \
11931 vdb_path+"; aborting.\n"
11934 for idx in range(0,sp_vdb_len):
11935 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11938 print "\n!!!", x, "is not inside "+\
11939 vdb_path+"; aborting.\n"
11942 print "="+"/".join(sp_absx[sp_vdb_len:])
11943 candidate_catpkgs.append(
11944 "="+"/".join(sp_absx[sp_vdb_len:]))
11947 if (not "--quiet" in myopts):
11949 if settings["ROOT"] != "/":
11950 writemsg_level(darkgreen(newline+ \
11951 ">>> Using system located in ROOT tree %s\n" % \
11954 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11955 not ("--quiet" in myopts):
11956 writemsg_level(darkgreen(newline+\
11957 ">>> These are the packages that would be unmerged:\n"))
11959 # Preservation of order is required for --depclean and --prune so
11960 # that dependencies are respected. Use all_selected to eliminate
11961 # duplicate packages since the same package may be selected by
11964 all_selected = set()
11965 for x in candidate_catpkgs:
11966 # cycle through all our candidate deps and determine
11967 # what will and will not get unmerged
11969 mymatch = vartree.dbapi.match(x)
11970 except portage.exception.AmbiguousPackageName, errpkgs:
11971 print "\n\n!!! The short ebuild name \"" + \
11972 x + "\" is ambiguous. Please specify"
11973 print "!!! one of the following fully-qualified " + \
11974 "ebuild names instead:\n"
11975 for i in errpkgs[0]:
11976 print " " + green(i)
11980 if not mymatch and x[0] not in "<>=~":
11981 mymatch = localtree.dep_match(x)
11983 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11984 (x, unmerge_action), noiselevel=-1)
11988 {"protected": set(), "selected": set(), "omitted": set()})
11989 mykey = len(pkgmap) - 1
11990 if unmerge_action=="unmerge":
11992 if y not in all_selected:
11993 pkgmap[mykey]["selected"].add(y)
11994 all_selected.add(y)
11995 elif unmerge_action == "prune":
11996 if len(mymatch) == 1:
11998 best_version = mymatch[0]
11999 best_slot = vartree.getslot(best_version)
12000 best_counter = vartree.dbapi.cpv_counter(best_version)
12001 for mypkg in mymatch[1:]:
12002 myslot = vartree.getslot(mypkg)
12003 mycounter = vartree.dbapi.cpv_counter(mypkg)
12004 if (myslot == best_slot and mycounter > best_counter) or \
12005 mypkg == portage.best([mypkg, best_version]):
12006 if myslot == best_slot:
12007 if mycounter < best_counter:
12008 # On slot collision, keep the one with the
12009 # highest counter since it is the most
12010 # recently installed.
12012 best_version = mypkg
12014 best_counter = mycounter
12015 pkgmap[mykey]["protected"].add(best_version)
12016 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12017 if mypkg != best_version and mypkg not in all_selected)
12018 all_selected.update(pkgmap[mykey]["selected"])
12020 # unmerge_action == "clean"
12022 for mypkg in mymatch:
12023 if unmerge_action == "clean":
12024 myslot = localtree.getslot(mypkg)
12026 # since we're pruning, we don't care about slots
12027 # and put all the pkgs in together
12029 if myslot not in slotmap:
12030 slotmap[myslot] = {}
12031 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12033 for mypkg in vartree.dbapi.cp_list(
12034 portage.dep_getkey(mymatch[0])):
12035 myslot = vartree.getslot(mypkg)
12036 if myslot not in slotmap:
12037 slotmap[myslot] = {}
12038 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12040 for myslot in slotmap:
12041 counterkeys = slotmap[myslot].keys()
12042 if not counterkeys:
12045 pkgmap[mykey]["protected"].add(
12046 slotmap[myslot][counterkeys[-1]])
12047 del counterkeys[-1]
12049 for counter in counterkeys[:]:
12050 mypkg = slotmap[myslot][counter]
12051 if mypkg not in mymatch:
12052 counterkeys.remove(counter)
12053 pkgmap[mykey]["protected"].add(
12054 slotmap[myslot][counter])
12056 #be pretty and get them in order of merge:
12057 for ckey in counterkeys:
12058 mypkg = slotmap[myslot][ckey]
12059 if mypkg not in all_selected:
12060 pkgmap[mykey]["selected"].add(mypkg)
12061 all_selected.add(mypkg)
12062 # ok, now the last-merged package
12063 # is protected, and the rest are selected
12064 numselected = len(all_selected)
12065 if global_unmerge and not numselected:
12066 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12069 if not numselected:
12070 portage.writemsg_stdout(
12071 "\n>>> No packages selected for removal by " + \
12072 unmerge_action + "\n")
12076 vartree.dbapi.flush_cache()
12077 portage.locks.unlockdir(vdb_lock)
12079 from portage.sets.base import EditablePackageSet
12081 # generate a list of package sets that are directly or indirectly listed in "world",
12082 # as there is no persistent list of "installed" sets
12083 installed_sets = ["world"]
12088 pos = len(installed_sets)
12089 for s in installed_sets[pos - 1:]:
12092 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12095 installed_sets += candidates
12096 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12099 # we don't want to unmerge packages that are still listed in user-editable package sets
12100 # listed in "world" as they would be remerged on the next update of "world" or the
12101 # relevant package sets.
12102 unknown_sets = set()
12103 for cp in xrange(len(pkgmap)):
12104 for cpv in pkgmap[cp]["selected"].copy():
12108 # It could have been uninstalled
12109 # by a concurrent process.
12112 if unmerge_action != "clean" and \
12113 root_config.root == "/" and \
12114 portage.match_from_list(
12115 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12116 msg = ("Not unmerging package %s since there is no valid " + \
12117 "reason for portage to unmerge itself.") % (pkg.cpv,)
12118 for line in textwrap.wrap(msg, 75):
12120 # adjust pkgmap so the display output is correct
12121 pkgmap[cp]["selected"].remove(cpv)
12122 all_selected.remove(cpv)
12123 pkgmap[cp]["protected"].add(cpv)
12127 for s in installed_sets:
12128 # skip sets that the user requested to unmerge, and skip world
12129 # unless we're unmerging a package set (as the package would be
12130 # removed from "world" later on)
12131 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12135 if s in unknown_sets:
12137 unknown_sets.add(s)
12138 out = portage.output.EOutput()
12139 out.eerror(("Unknown set '@%s' in " + \
12140 "%svar/lib/portage/world_sets") % \
12141 (s, root_config.root))
12144 # only check instances of EditablePackageSet as other classes are generally used for
12145 # special purposes and can be ignored here (and are usually generated dynamically, so the
12146 # user can't do much about them anyway)
12147 if isinstance(sets[s], EditablePackageSet):
12149 # This is derived from a snippet of code in the
12150 # depgraph._iter_atoms_for_pkg() method.
12151 for atom in sets[s].iterAtomsForPackage(pkg):
12152 inst_matches = vartree.dbapi.match(atom)
12153 inst_matches.reverse() # descending order
12155 for inst_cpv in inst_matches:
12157 inst_pkg = _pkg(inst_cpv)
12159 # It could have been uninstalled
12160 # by a concurrent process.
12163 if inst_pkg.cp != atom.cp:
12165 if pkg >= inst_pkg:
12166 # This is descending order, and we're not
12167 # interested in any versions <= pkg given.
12169 if pkg.slot_atom != inst_pkg.slot_atom:
12170 higher_slot = inst_pkg
12172 if higher_slot is None:
12176 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12177 #print colorize("WARN", "but still listed in the following package sets:")
12178 #print " %s\n" % ", ".join(parents)
12179 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12180 print colorize("WARN", "still referenced by the following package sets:")
12181 print " %s\n" % ", ".join(parents)
12182 # adjust pkgmap so the display output is correct
12183 pkgmap[cp]["selected"].remove(cpv)
12184 all_selected.remove(cpv)
12185 pkgmap[cp]["protected"].add(cpv)
12189 numselected = len(all_selected)
12190 if not numselected:
12192 "\n>>> No packages selected for removal by " + \
12193 unmerge_action + "\n")
12196 # Unmerge order only matters in some cases
12200 selected = d["selected"]
12203 cp = portage.cpv_getkey(iter(selected).next())
12204 cp_dict = unordered.get(cp)
12205 if cp_dict is None:
12207 unordered[cp] = cp_dict
12210 for k, v in d.iteritems():
12211 cp_dict[k].update(v)
12212 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12214 for x in xrange(len(pkgmap)):
12215 selected = pkgmap[x]["selected"]
12218 for mytype, mylist in pkgmap[x].iteritems():
12219 if mytype == "selected":
12221 mylist.difference_update(all_selected)
12222 cp = portage.cpv_getkey(iter(selected).next())
12223 for y in localtree.dep_match(cp):
12224 if y not in pkgmap[x]["omitted"] and \
12225 y not in pkgmap[x]["selected"] and \
12226 y not in pkgmap[x]["protected"] and \
12227 y not in all_selected:
12228 pkgmap[x]["omitted"].add(y)
12229 if global_unmerge and not pkgmap[x]["selected"]:
12230 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12232 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12233 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12234 "'%s' is part of your system profile.\n" % cp),
12235 level=logging.WARNING, noiselevel=-1)
12236 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12237 "be damaging to your system.\n\n"),
12238 level=logging.WARNING, noiselevel=-1)
12239 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12240 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12241 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12243 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12245 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12246 for mytype in ["selected","protected","omitted"]:
12248 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12249 if pkgmap[x][mytype]:
12250 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12251 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12252 for pn, ver, rev in sorted_pkgs:
12256 myversion = ver + "-" + rev
12257 if mytype == "selected":
12259 colorize("UNMERGE_WARN", myversion + " "),
12263 colorize("GOOD", myversion + " "), noiselevel=-1)
12265 writemsg_level("none ", noiselevel=-1)
12267 writemsg_level("\n", noiselevel=-1)
12269 writemsg_level("\n", noiselevel=-1)
12271 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12272 " packages are slated for removal.\n")
12273 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12274 " and " + colorize("GOOD", "'omitted'") + \
12275 " packages will not be removed.\n\n")
12277 if "--pretend" in myopts:
12278 #we're done... return
12280 if "--ask" in myopts:
12281 if userquery("Would you like to unmerge these packages?")=="No":
12282 # enter pretend mode for correct formatting of results
12283 myopts["--pretend"] = True
12288 #the real unmerging begins, after a short delay....
12289 if clean_delay and not autoclean:
12290 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12292 for x in xrange(len(pkgmap)):
12293 for y in pkgmap[x]["selected"]:
12294 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12295 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12296 mysplit = y.split("/")
12298 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12299 mysettings, unmerge_action not in ["clean","prune"],
12300 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12301 scheduler=scheduler)
12303 if retval != os.EX_OK:
12304 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12306 raise UninstallFailure(retval)
12309 if clean_world and hasattr(sets["world"], "cleanPackage"):
12310 sets["world"].cleanPackage(vartree.dbapi, y)
12311 emergelog(xterm_titles, " >>> unmerge success: "+y)
12312 if clean_world and hasattr(sets["world"], "remove"):
12313 for s in root_config.setconfig.active:
12314 sets["world"].remove(SETPREFIX+s)
12317 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12319 if os.path.exists("/usr/bin/install-info"):
12320 out = portage.output.EOutput()
12325 inforoot=normpath(root+z)
12326 if os.path.isdir(inforoot):
12327 infomtime = long(os.stat(inforoot).st_mtime)
12328 if inforoot not in prev_mtimes or \
12329 prev_mtimes[inforoot] != infomtime:
12330 regen_infodirs.append(inforoot)
12332 if not regen_infodirs:
12333 portage.writemsg_stdout("\n")
12334 out.einfo("GNU info directory index is up-to-date.")
12336 portage.writemsg_stdout("\n")
12337 out.einfo("Regenerating GNU info directory index...")
12339 dir_extensions = ("", ".gz", ".bz2")
12343 for inforoot in regen_infodirs:
12347 if not os.path.isdir(inforoot) or \
12348 not os.access(inforoot, os.W_OK):
12351 file_list = os.listdir(inforoot)
12353 dir_file = os.path.join(inforoot, "dir")
12354 moved_old_dir = False
12355 processed_count = 0
12356 for x in file_list:
12357 if x.startswith(".") or \
12358 os.path.isdir(os.path.join(inforoot, x)):
12360 if x.startswith("dir"):
12362 for ext in dir_extensions:
12363 if x == "dir" + ext or \
12364 x == "dir" + ext + ".old":
12369 if processed_count == 0:
12370 for ext in dir_extensions:
12372 os.rename(dir_file + ext, dir_file + ext + ".old")
12373 moved_old_dir = True
12374 except EnvironmentError, e:
12375 if e.errno != errno.ENOENT:
12378 processed_count += 1
12379 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12380 existsstr="already exists, for file `"
12382 if re.search(existsstr,myso):
12383 # Already exists... Don't increment the count for this.
12385 elif myso[:44]=="install-info: warning: no info dir entry in ":
12386 # This info file doesn't contain a DIR-header: install-info produces this
12387 # (harmless) warning (the --quiet switch doesn't seem to work).
12388 # Don't increment the count for this.
12391 badcount=badcount+1
12392 errmsg += myso + "\n"
12395 if moved_old_dir and not os.path.exists(dir_file):
12396 # We didn't generate a new dir file, so put the old file
12397 # back where it was originally found.
12398 for ext in dir_extensions:
12400 os.rename(dir_file + ext + ".old", dir_file + ext)
12401 except EnvironmentError, e:
12402 if e.errno != errno.ENOENT:
12406 # Clean dir.old cruft so that they don't prevent
12407 # unmerge of otherwise empty directories.
12408 for ext in dir_extensions:
12410 os.unlink(dir_file + ext + ".old")
12411 except EnvironmentError, e:
12412 if e.errno != errno.ENOENT:
12416 #update mtime so we can potentially avoid regenerating.
12417 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12420 out.eerror("Processed %d info files; %d errors." % \
12421 (icount, badcount))
12422 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12425 out.einfo("Processed %d info files." % (icount,))
12428 def display_news_notification(root_config, myopts):
12429 target_root = root_config.root
12430 trees = root_config.trees
12431 settings = trees["vartree"].settings
12432 portdb = trees["porttree"].dbapi
12433 vardb = trees["vartree"].dbapi
12434 NEWS_PATH = os.path.join("metadata", "news")
12435 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12436 newsReaderDisplay = False
12437 update = "--pretend" not in myopts
12439 for repo in portdb.getRepositories():
12440 unreadItems = checkUpdatedNewsItems(
12441 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12443 if not newsReaderDisplay:
12444 newsReaderDisplay = True
12446 print colorize("WARN", " * IMPORTANT:"),
12447 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12450 if newsReaderDisplay:
12451 print colorize("WARN", " *"),
12452 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12455 def display_preserved_libs(vardbapi):
12458 # Ensure the registry is consistent with existing files.
12459 vardbapi.plib_registry.pruneNonExisting()
12461 if vardbapi.plib_registry.hasEntries():
12463 print colorize("WARN", "!!!") + " existing preserved libs:"
12464 plibdata = vardbapi.plib_registry.getPreservedLibs()
12465 linkmap = vardbapi.linkmap
12468 linkmap_broken = False
12472 except portage.exception.CommandNotFound, e:
12473 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12474 level=logging.ERROR, noiselevel=-1)
12476 linkmap_broken = True
12478 search_for_owners = set()
12479 for cpv in plibdata:
12480 internal_plib_keys = set(linkmap._obj_key(f) \
12481 for f in plibdata[cpv])
12482 for f in plibdata[cpv]:
12483 if f in consumer_map:
12486 for c in linkmap.findConsumers(f):
12487 # Filter out any consumers that are also preserved libs
12488 # belonging to the same package as the provider.
12489 if linkmap._obj_key(c) not in internal_plib_keys:
12490 consumers.append(c)
12492 consumer_map[f] = consumers
12493 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12495 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12497 for cpv in plibdata:
12498 print colorize("WARN", ">>>") + " package: %s" % cpv
12500 for f in plibdata[cpv]:
12501 obj_key = linkmap._obj_key(f)
12502 alt_paths = samefile_map.get(obj_key)
12503 if alt_paths is None:
12505 samefile_map[obj_key] = alt_paths
12508 for alt_paths in samefile_map.itervalues():
12509 alt_paths = sorted(alt_paths)
12510 for p in alt_paths:
12511 print colorize("WARN", " * ") + " - %s" % (p,)
12513 consumers = consumer_map.get(f, [])
12514 for c in consumers[:MAX_DISPLAY]:
12515 print colorize("WARN", " * ") + " used by %s (%s)" % \
12516 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12517 if len(consumers) == MAX_DISPLAY + 1:
12518 print colorize("WARN", " * ") + " used by %s (%s)" % \
12519 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12520 for x in owners.get(consumers[MAX_DISPLAY], [])))
12521 elif len(consumers) > MAX_DISPLAY:
12522 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12523 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12526 def _flush_elog_mod_echo():
12528 Dump the mod_echo output now so that our other
12529 notifications are shown last.
12531 @returns: True if messages were shown, False otherwise.
12533 messages_shown = False
12535 from portage.elog import mod_echo
12536 except ImportError:
12537 pass # happens during downgrade to a version without the module
12539 messages_shown = bool(mod_echo._items)
12540 mod_echo.finalize()
12541 return messages_shown
12543 def post_emerge(root_config, myopts, mtimedb, retval):
12545 Misc. things to run at the end of a merge session.
12548 Update Config Files
12551 Display preserved libs warnings
12554 @param trees: A dictionary mapping each ROOT to it's package databases
12556 @param mtimedb: The mtimeDB to store data needed across merge invocations
12557 @type mtimedb: MtimeDB class instance
12558 @param retval: Emerge's return value
12562 1. Calls sys.exit(retval)
12565 target_root = root_config.root
12566 trees = { target_root : root_config.trees }
12567 vardbapi = trees[target_root]["vartree"].dbapi
12568 settings = vardbapi.settings
12569 info_mtimes = mtimedb["info"]
12571 # Load the most current variables from ${ROOT}/etc/profile.env
12574 settings.regenerate()
12577 config_protect = settings.get("CONFIG_PROTECT","").split()
12578 infodirs = settings.get("INFOPATH","").split(":") + \
12579 settings.get("INFODIR","").split(":")
12583 if retval == os.EX_OK:
12584 exit_msg = " *** exiting successfully."
12586 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12587 emergelog("notitles" not in settings.features, exit_msg)
12589 _flush_elog_mod_echo()
12591 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12592 if "--pretend" in myopts or (counter_hash is not None and \
12593 counter_hash == vardbapi._counter_hash()):
12594 display_news_notification(root_config, myopts)
12595 # If vdb state has not changed then there's nothing else to do.
12598 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12599 portage.util.ensure_dirs(vdb_path)
12601 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12602 vdb_lock = portage.locks.lockdir(vdb_path)
12606 if "noinfo" not in settings.features:
12607 chk_updated_info_files(target_root,
12608 infodirs, info_mtimes, retval)
12612 portage.locks.unlockdir(vdb_lock)
12614 chk_updated_cfg_files(target_root, config_protect)
12616 display_news_notification(root_config, myopts)
12617 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12618 display_preserved_libs(vardbapi)
12623 def chk_updated_cfg_files(target_root, config_protect):
12625 #number of directories with some protect files in them
12627 for x in config_protect:
12628 x = os.path.join(target_root, x.lstrip(os.path.sep))
12629 if not os.access(x, os.W_OK):
12630 # Avoid Permission denied errors generated
12634 mymode = os.lstat(x).st_mode
12637 if stat.S_ISLNK(mymode):
12638 # We want to treat it like a directory if it
12639 # is a symlink to an existing directory.
12641 real_mode = os.stat(x).st_mode
12642 if stat.S_ISDIR(real_mode):
12646 if stat.S_ISDIR(mymode):
12647 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12649 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12650 os.path.split(x.rstrip(os.path.sep))
12651 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12652 a = commands.getstatusoutput(mycommand)
12654 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12656 # Show the error message alone, sending stdout to /dev/null.
12657 os.system(mycommand + " 1>/dev/null")
12659 files = a[1].split('\0')
12660 # split always produces an empty string as the last element
12661 if files and not files[-1]:
12665 print "\n"+colorize("WARN", " * IMPORTANT:"),
12666 if stat.S_ISDIR(mymode):
12667 print "%d config files in '%s' need updating." % \
12670 print "config file '%s' needs updating." % x
12673 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12674 " section of the " + bold("emerge")
12675 print " "+yellow("*")+" man page to learn how to update config files."
12677 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12680 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12681 Returns the number of unread (yet relevent) items.
12683 @param portdb: a portage tree database
12684 @type portdb: pordbapi
12685 @param vardb: an installed package database
12686 @type vardb: vardbapi
12689 @param UNREAD_PATH:
12695 1. The number of unread but relevant news items.
12698 from portage.news import NewsManager
12699 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12700 return manager.getUnreadItems( repo_id, update=update )
12702 def insert_category_into_atom(atom, category):
12703 alphanum = re.search(r'\w', atom)
12705 ret = atom[:alphanum.start()] + "%s/" % category + \
12706 atom[alphanum.start():]
12711 def is_valid_package_atom(x):
12713 alphanum = re.search(r'\w', x)
12715 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12716 return portage.isvalidatom(x)
12718 def show_blocker_docs_link():
12720 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12721 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12723 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12726 def show_mask_docs():
12727 print "For more information, see the MASKED PACKAGES section in the emerge"
12728 print "man page or refer to the Gentoo Handbook."
12730 def action_sync(settings, trees, mtimedb, myopts, myaction):
12731 xterm_titles = "notitles" not in settings.features
12732 emergelog(xterm_titles, " === sync")
12733 myportdir = settings.get("PORTDIR", None)
12734 out = portage.output.EOutput()
12736 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12738 if myportdir[-1]=="/":
12739 myportdir=myportdir[:-1]
12741 st = os.stat(myportdir)
12745 print ">>>",myportdir,"not found, creating it."
12746 os.makedirs(myportdir,0755)
12747 st = os.stat(myportdir)
12750 spawn_kwargs["env"] = settings.environ()
12751 if 'usersync' in settings.features and \
12752 portage.data.secpass >= 2 and \
12753 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12754 st.st_gid != os.getgid() and st.st_mode & 0070):
12756 homedir = pwd.getpwuid(st.st_uid).pw_dir
12760 # Drop privileges when syncing, in order to match
12761 # existing uid/gid settings.
12762 spawn_kwargs["uid"] = st.st_uid
12763 spawn_kwargs["gid"] = st.st_gid
12764 spawn_kwargs["groups"] = [st.st_gid]
12765 spawn_kwargs["env"]["HOME"] = homedir
12767 if not st.st_mode & 0020:
12768 umask = umask | 0020
12769 spawn_kwargs["umask"] = umask
12771 syncuri = settings.get("SYNC", "").strip()
12773 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12774 noiselevel=-1, level=logging.ERROR)
12777 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12778 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12781 dosyncuri = syncuri
12782 updatecache_flg = False
12783 if myaction == "metadata":
12784 print "skipping sync"
12785 updatecache_flg = True
12786 elif ".git" in vcs_dirs:
12787 # Update existing git repository, and ignore the syncuri. We are
12788 # going to trust the user and assume that the user is in the branch
12789 # that he/she wants updated. We'll let the user manage branches with
12791 if portage.process.find_binary("git") is None:
12792 msg = ["Command not found: git",
12793 "Type \"emerge dev-util/git\" to enable git support."]
12795 writemsg_level("!!! %s\n" % l,
12796 level=logging.ERROR, noiselevel=-1)
12798 msg = ">>> Starting git pull in %s..." % myportdir
12799 emergelog(xterm_titles, msg )
12800 writemsg_level(msg + "\n")
12801 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12802 (portage._shell_quote(myportdir),), **spawn_kwargs)
12803 if exitcode != os.EX_OK:
12804 msg = "!!! git pull error in %s." % myportdir
12805 emergelog(xterm_titles, msg)
12806 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12808 msg = ">>> Git pull in %s successful" % myportdir
12809 emergelog(xterm_titles, msg)
12810 writemsg_level(msg + "\n")
12811 exitcode = git_sync_timestamps(settings, myportdir)
12812 if exitcode == os.EX_OK:
12813 updatecache_flg = True
12814 elif syncuri[:8]=="rsync://":
12815 for vcs_dir in vcs_dirs:
12816 writemsg_level(("!!! %s appears to be under revision " + \
12817 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12818 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12820 if not os.path.exists("/usr/bin/rsync"):
12821 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12822 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12827 if settings["PORTAGE_RSYNC_OPTS"] == "":
12828 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12829 rsync_opts.extend([
12830 "--recursive", # Recurse directories
12831 "--links", # Consider symlinks
12832 "--safe-links", # Ignore links outside of tree
12833 "--perms", # Preserve permissions
12834 "--times", # Preserive mod times
12835 "--compress", # Compress the data transmitted
12836 "--force", # Force deletion on non-empty dirs
12837 "--whole-file", # Don't do block transfers, only entire files
12838 "--delete", # Delete files that aren't in the master tree
12839 "--stats", # Show final statistics about what was transfered
12840 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12841 "--exclude=/distfiles", # Exclude distfiles from consideration
12842 "--exclude=/local", # Exclude local from consideration
12843 "--exclude=/packages", # Exclude packages from consideration
12847 # The below validation is not needed when using the above hardcoded
12850 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12852 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12853 for opt in ("--recursive", "--times"):
12854 if opt not in rsync_opts:
12855 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12856 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12857 rsync_opts.append(opt)
12859 for exclude in ("distfiles", "local", "packages"):
12860 opt = "--exclude=/%s" % exclude
12861 if opt not in rsync_opts:
12862 portage.writemsg(yellow("WARNING:") + \
12863 " adding required option %s not included in " % opt + \
12864 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12865 rsync_opts.append(opt)
12867 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12868 def rsync_opt_startswith(opt_prefix):
12869 for x in rsync_opts:
12870 if x.startswith(opt_prefix):
12874 if not rsync_opt_startswith("--timeout="):
12875 rsync_opts.append("--timeout=%d" % mytimeout)
12877 for opt in ("--compress", "--whole-file"):
12878 if opt not in rsync_opts:
12879 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12880 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12881 rsync_opts.append(opt)
12883 if "--quiet" in myopts:
12884 rsync_opts.append("--quiet") # Shut up a lot
12886 rsync_opts.append("--verbose") # Print filelist
12888 if "--verbose" in myopts:
12889 rsync_opts.append("--progress") # Progress meter for each file
12891 if "--debug" in myopts:
12892 rsync_opts.append("--checksum") # Force checksum on all files
12894 # Real local timestamp file.
12895 servertimestampfile = os.path.join(
12896 myportdir, "metadata", "timestamp.chk")
12898 content = portage.util.grabfile(servertimestampfile)
12902 mytimestamp = time.mktime(time.strptime(content[0],
12903 "%a, %d %b %Y %H:%M:%S +0000"))
12904 except (OverflowError, ValueError):
12909 rsync_initial_timeout = \
12910 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12912 rsync_initial_timeout = 15
12915 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12916 except SystemExit, e:
12917 raise # Needed else can't exit
12919 maxretries=3 #default number of retries
12922 user_name, hostname, port = re.split(
12923 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12926 if user_name is None:
12928 updatecache_flg=True
12929 all_rsync_opts = set(rsync_opts)
12930 extra_rsync_opts = shlex.split(
12931 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12932 all_rsync_opts.update(extra_rsync_opts)
12933 family = socket.AF_INET
12934 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12935 family = socket.AF_INET
12936 elif socket.has_ipv6 and \
12937 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12938 family = socket.AF_INET6
12940 SERVER_OUT_OF_DATE = -1
12941 EXCEEDED_MAX_RETRIES = -2
12947 for addrinfo in socket.getaddrinfo(
12948 hostname, None, family, socket.SOCK_STREAM):
12949 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12950 # IPv6 addresses need to be enclosed in square brackets
12951 ips.append("[%s]" % addrinfo[4][0])
12953 ips.append(addrinfo[4][0])
12954 from random import shuffle
12956 except SystemExit, e:
12957 raise # Needed else can't exit
12958 except Exception, e:
12959 print "Notice:",str(e)
12964 dosyncuri = syncuri.replace(
12965 "//" + user_name + hostname + port + "/",
12966 "//" + user_name + ips[0] + port + "/", 1)
12967 except SystemExit, e:
12968 raise # Needed else can't exit
12969 except Exception, e:
12970 print "Notice:",str(e)
12974 if "--ask" in myopts:
12975 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12980 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12981 if "--quiet" not in myopts:
12982 print ">>> Starting rsync with "+dosyncuri+"..."
12984 emergelog(xterm_titles,
12985 ">>> Starting retry %d of %d with %s" % \
12986 (retries,maxretries,dosyncuri))
12987 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12989 if mytimestamp != 0 and "--quiet" not in myopts:
12990 print ">>> Checking server timestamp ..."
12992 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12994 if "--debug" in myopts:
12997 exitcode = os.EX_OK
12998 servertimestamp = 0
12999 # Even if there's no timestamp available locally, fetch the
13000 # timestamp anyway as an initial probe to verify that the server is
13001 # responsive. This protects us from hanging indefinitely on a
13002 # connection attempt to an unresponsive server which rsync's
13003 # --timeout option does not prevent.
13005 # Temporary file for remote server timestamp comparison.
13006 from tempfile import mkstemp
13007 fd, tmpservertimestampfile = mkstemp()
13009 mycommand = rsynccommand[:]
13010 mycommand.append(dosyncuri.rstrip("/") + \
13011 "/metadata/timestamp.chk")
13012 mycommand.append(tmpservertimestampfile)
13016 def timeout_handler(signum, frame):
13017 raise portage.exception.PortageException("timed out")
13018 signal.signal(signal.SIGALRM, timeout_handler)
13019 # Timeout here in case the server is unresponsive. The
13020 # --timeout rsync option doesn't apply to the initial
13021 # connection attempt.
13022 if rsync_initial_timeout:
13023 signal.alarm(rsync_initial_timeout)
13025 mypids.extend(portage.process.spawn(
13026 mycommand, env=settings.environ(), returnpid=True))
13027 exitcode = os.waitpid(mypids[0], 0)[1]
13028 content = portage.grabfile(tmpservertimestampfile)
13030 if rsync_initial_timeout:
13033 os.unlink(tmpservertimestampfile)
13036 except portage.exception.PortageException, e:
13040 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13041 os.kill(mypids[0], signal.SIGTERM)
13042 os.waitpid(mypids[0], 0)
13043 # This is the same code rsync uses for timeout.
13046 if exitcode != os.EX_OK:
13047 if exitcode & 0xff:
13048 exitcode = (exitcode & 0xff) << 8
13050 exitcode = exitcode >> 8
13052 portage.process.spawned_pids.remove(mypids[0])
13055 servertimestamp = time.mktime(time.strptime(
13056 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13057 except (OverflowError, ValueError):
13059 del mycommand, mypids, content
13060 if exitcode == os.EX_OK:
13061 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13062 emergelog(xterm_titles,
13063 ">>> Cancelling sync -- Already current.")
13066 print ">>> Timestamps on the server and in the local repository are the same."
13067 print ">>> Cancelling all further sync action. You are already up to date."
13069 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13073 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13074 emergelog(xterm_titles,
13075 ">>> Server out of date: %s" % dosyncuri)
13078 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13080 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13083 exitcode = SERVER_OUT_OF_DATE
13084 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13086 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13087 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13088 if exitcode in [0,1,3,4,11,14,20,21]:
13090 elif exitcode in [1,3,4,11,14,20,21]:
13093 # Code 2 indicates protocol incompatibility, which is expected
13094 # for servers with protocol < 29 that don't support
13095 # --prune-empty-directories. Retry for a server that supports
13096 # at least rsync protocol version 29 (>=rsync-2.6.4).
13101 if retries<=maxretries:
13102 print ">>> Retrying..."
13107 updatecache_flg=False
13108 exitcode = EXCEEDED_MAX_RETRIES
13112 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13113 elif exitcode == SERVER_OUT_OF_DATE:
13115 elif exitcode == EXCEEDED_MAX_RETRIES:
13117 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13122 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13123 msg.append("that your SYNC statement is proper.")
13124 msg.append("SYNC=" + settings["SYNC"])
13126 msg.append("Rsync has reported that there is a File IO error. Normally")
13127 msg.append("this means your disk is full, but can be caused by corruption")
13128 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13129 msg.append("and try again after the problem has been fixed.")
13130 msg.append("PORTDIR=" + settings["PORTDIR"])
13132 msg.append("Rsync was killed before it finished.")
13134 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13135 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13136 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13137 msg.append("temporary problem unless complications exist with your network")
13138 msg.append("(and possibly your system's filesystem) configuration.")
13142 elif syncuri[:6]=="cvs://":
13143 if not os.path.exists("/usr/bin/cvs"):
13144 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13145 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13147 cvsroot=syncuri[6:]
13148 cvsdir=os.path.dirname(myportdir)
13149 if not os.path.exists(myportdir+"/CVS"):
13151 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13152 if os.path.exists(cvsdir+"/gentoo-x86"):
13153 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13156 os.rmdir(myportdir)
13158 if e.errno != errno.ENOENT:
13160 "!!! existing '%s' directory; exiting.\n" % myportdir)
13163 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13164 print "!!! cvs checkout error; exiting."
13166 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13169 print ">>> Starting cvs update with "+syncuri+"..."
13170 retval = portage.process.spawn_bash(
13171 "cd %s; cvs -z0 -q update -dP" % \
13172 (portage._shell_quote(myportdir),), **spawn_kwargs)
13173 if retval != os.EX_OK:
13175 dosyncuri = syncuri
13177 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13178 noiselevel=-1, level=logging.ERROR)
13181 if updatecache_flg and \
13182 myaction != "metadata" and \
13183 "metadata-transfer" not in settings.features:
13184 updatecache_flg = False
13186 # Reload the whole config from scratch.
13187 settings, trees, mtimedb = load_emerge_config(trees=trees)
13188 root_config = trees[settings["ROOT"]]["root_config"]
13189 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13191 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13192 action_metadata(settings, portdb, myopts)
13194 if portage._global_updates(trees, mtimedb["updates"]):
13196 # Reload the whole config from scratch.
13197 settings, trees, mtimedb = load_emerge_config(trees=trees)
13198 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13199 root_config = trees[settings["ROOT"]]["root_config"]
13201 mybestpv = portdb.xmatch("bestmatch-visible",
13202 portage.const.PORTAGE_PACKAGE_ATOM)
13203 mypvs = portage.best(
13204 trees[settings["ROOT"]]["vartree"].dbapi.match(
13205 portage.const.PORTAGE_PACKAGE_ATOM))
13207 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13209 if myaction != "metadata":
13210 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13211 retval = portage.process.spawn(
13212 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13213 dosyncuri], env=settings.environ())
13214 if retval != os.EX_OK:
13215 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13217 if(mybestpv != mypvs) and not "--quiet" in myopts:
13219 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13220 print red(" * ")+"that you update portage now, before any other packages are updated."
13222 print red(" * ")+"To update portage, run 'emerge portage' now."
13225 display_news_notification(root_config, myopts)
13228 def git_sync_timestamps(settings, portdir):
13230 Since git doesn't preserve timestamps, synchronize timestamps between
13231 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13232 for a given file as long as the file in the working tree is not modified
13233 (relative to HEAD).
13235 cache_dir = os.path.join(portdir, "metadata", "cache")
13236 if not os.path.isdir(cache_dir):
13238 writemsg_level(">>> Synchronizing timestamps...\n")
13240 from portage.cache.cache_errors import CacheError
13242 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13243 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13244 except CacheError, e:
13245 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13246 level=logging.ERROR, noiselevel=-1)
13249 ec_dir = os.path.join(portdir, "eclass")
13251 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13252 if f.endswith(".eclass"))
13254 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13255 level=logging.ERROR, noiselevel=-1)
13258 args = [portage.const.BASH_BINARY, "-c",
13259 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13260 portage._shell_quote(portdir)]
13262 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13263 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13265 if rval != os.EX_OK:
13268 modified_eclasses = set(ec for ec in ec_names \
13269 if os.path.join("eclass", ec + ".eclass") in modified_files)
13271 updated_ec_mtimes = {}
13273 for cpv in cache_db:
13274 cpv_split = portage.catpkgsplit(cpv)
13275 if cpv_split is None:
13276 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13277 level=logging.ERROR, noiselevel=-1)
13280 cat, pn, ver, rev = cpv_split
13281 cat, pf = portage.catsplit(cpv)
13282 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13283 if relative_eb_path in modified_files:
13287 cache_entry = cache_db[cpv]
13288 eb_mtime = cache_entry.get("_mtime_")
13289 ec_mtimes = cache_entry.get("_eclasses_")
13291 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13292 level=logging.ERROR, noiselevel=-1)
13294 except CacheError, e:
13295 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13296 (cpv, e), level=logging.ERROR, noiselevel=-1)
13299 if eb_mtime is None:
13300 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13301 level=logging.ERROR, noiselevel=-1)
13305 eb_mtime = long(eb_mtime)
13307 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13308 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13311 if ec_mtimes is None:
13312 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13313 level=logging.ERROR, noiselevel=-1)
13316 if modified_eclasses.intersection(ec_mtimes):
13319 missing_eclasses = set(ec_mtimes).difference(ec_names)
13320 if missing_eclasses:
13321 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13322 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13326 eb_path = os.path.join(portdir, relative_eb_path)
13328 current_eb_mtime = os.stat(eb_path)
13330 writemsg_level("!!! Missing ebuild: %s\n" % \
13331 (cpv,), level=logging.ERROR, noiselevel=-1)
13334 inconsistent = False
13335 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13336 updated_mtime = updated_ec_mtimes.get(ec)
13337 if updated_mtime is not None and updated_mtime != ec_mtime:
13338 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13339 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13340 inconsistent = True
13346 if current_eb_mtime != eb_mtime:
13347 os.utime(eb_path, (eb_mtime, eb_mtime))
13349 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13350 if ec in updated_ec_mtimes:
13352 ec_path = os.path.join(ec_dir, ec + ".eclass")
13353 current_mtime = long(os.stat(ec_path).st_mtime)
13354 if current_mtime != ec_mtime:
13355 os.utime(ec_path, (ec_mtime, ec_mtime))
13356 updated_ec_mtimes[ec] = ec_mtime
13360 def action_metadata(settings, portdb, myopts):
13361 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13362 old_umask = os.umask(0002)
13363 cachedir = os.path.normpath(settings.depcachedir)
13364 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13365 "/lib", "/opt", "/proc", "/root", "/sbin",
13366 "/sys", "/tmp", "/usr", "/var"]:
13367 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13368 "ROOT DIRECTORY ON YOUR SYSTEM."
13369 print >> sys.stderr, \
13370 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13372 if not os.path.exists(cachedir):
13375 ec = portage.eclass_cache.cache(portdb.porttree_root)
13376 myportdir = os.path.realpath(settings["PORTDIR"])
13377 cm = settings.load_best_module("portdbapi.metadbmodule")(
13378 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13380 from portage.cache import util
13382 class percentage_noise_maker(util.quiet_mirroring):
13383 def __init__(self, dbapi):
13385 self.cp_all = dbapi.cp_all()
13386 l = len(self.cp_all)
13387 self.call_update_min = 100000000
13388 self.min_cp_all = l/100.0
13392 def __iter__(self):
13393 for x in self.cp_all:
13395 if self.count > self.min_cp_all:
13396 self.call_update_min = 0
13398 for y in self.dbapi.cp_list(x):
13400 self.call_update_mine = 0
13402 def update(self, *arg):
13404 self.pstr = int(self.pstr) + 1
13407 sys.stdout.write("%s%i%%" % \
13408 ("\b" * (len(str(self.pstr))+1), self.pstr))
13410 self.call_update_min = 10000000
13412 def finish(self, *arg):
13413 sys.stdout.write("\b\b\b\b100%\n")
13416 if "--quiet" in myopts:
13417 def quicky_cpv_generator(cp_all_list):
13418 for x in cp_all_list:
13419 for y in portdb.cp_list(x):
13421 source = quicky_cpv_generator(portdb.cp_all())
13422 noise_maker = portage.cache.util.quiet_mirroring()
13424 noise_maker = source = percentage_noise_maker(portdb)
13425 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13426 eclass_cache=ec, verbose_instance=noise_maker)
13429 os.umask(old_umask)
13431 def action_regen(settings, portdb, max_jobs, max_load):
13432 xterm_titles = "notitles" not in settings.features
13433 emergelog(xterm_titles, " === regen")
13434 #regenerate cache entries
13435 portage.writemsg_stdout("Regenerating cache entries...\n")
13437 os.close(sys.stdin.fileno())
13438 except SystemExit, e:
13439 raise # Needed else can't exit
13444 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13447 portage.writemsg_stdout("done!\n")
13448 return regen.returncode
13450 def action_config(settings, trees, myopts, myfiles):
13451 if len(myfiles) != 1:
13452 print red("!!! config can only take a single package atom at this time\n")
13454 if not is_valid_package_atom(myfiles[0]):
13455 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13457 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13458 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13462 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13463 except portage.exception.AmbiguousPackageName, e:
13464 # Multiple matches thrown from cpv_expand
13467 print "No packages found.\n"
13469 elif len(pkgs) > 1:
13470 if "--ask" in myopts:
13472 print "Please select a package to configure:"
13476 options.append(str(idx))
13477 print options[-1]+") "+pkg
13479 options.append("X")
13480 idx = userquery("Selection?", options)
13483 pkg = pkgs[int(idx)-1]
13485 print "The following packages available:"
13488 print "\nPlease use a specific atom or the --ask option."
13494 if "--ask" in myopts:
13495 if userquery("Ready to configure "+pkg+"?") == "No":
13498 print "Configuring pkg..."
13500 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13501 mysettings = portage.config(clone=settings)
13502 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13503 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13504 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13506 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13507 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13508 if retval == os.EX_OK:
13509 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13510 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13513 def action_info(settings, trees, myopts, myfiles):
13514 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13515 settings.profile_path, settings["CHOST"],
13516 trees[settings["ROOT"]]["vartree"].dbapi)
13518 header_title = "System Settings"
13520 print header_width * "="
13521 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13522 print header_width * "="
13523 print "System uname: "+platform.platform(aliased=1)
13525 lastSync = portage.grabfile(os.path.join(
13526 settings["PORTDIR"], "metadata", "timestamp.chk"))
13527 print "Timestamp of tree:",
13533 output=commands.getstatusoutput("distcc --version")
13535 print str(output[1].split("\n",1)[0]),
13536 if "distcc" in settings.features:
13541 output=commands.getstatusoutput("ccache -V")
13543 print str(output[1].split("\n",1)[0]),
13544 if "ccache" in settings.features:
13549 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13550 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13551 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13552 myvars = portage.util.unique_array(myvars)
13556 if portage.isvalidatom(x):
13557 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13558 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13559 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13561 for pn, ver, rev in pkg_matches:
13563 pkgs.append(ver + "-" + rev)
13567 pkgs = ", ".join(pkgs)
13568 print "%-20s %s" % (x+":", pkgs)
13570 print "%-20s %s" % (x+":", "[NOT VALID]")
13572 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13574 if "--verbose" in myopts:
13575 myvars=settings.keys()
13577 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13578 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13579 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13580 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13582 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13584 myvars = portage.util.unique_array(myvars)
13585 use_expand = settings.get('USE_EXPAND', '').split()
13587 use_expand_hidden = set(
13588 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13589 alphabetical_use = '--alphabetical' in myopts
13590 root_config = trees[settings["ROOT"]]['root_config']
13596 print '%s="%s"' % (x, settings[x])
13598 use = set(settings["USE"].split())
13599 for varname in use_expand:
13600 flag_prefix = varname.lower() + "_"
13601 for f in list(use):
13602 if f.startswith(flag_prefix):
13606 print 'USE="%s"' % " ".join(use),
13607 for varname in use_expand:
13608 myval = settings.get(varname)
13610 print '%s="%s"' % (varname, myval),
13613 unset_vars.append(x)
13615 print "Unset: "+", ".join(unset_vars)
13618 if "--debug" in myopts:
13619 for x in dir(portage):
13620 module = getattr(portage, x)
13621 if "cvs_id_string" in dir(module):
13622 print "%s: %s" % (str(x), str(module.cvs_id_string))
13624 # See if we can find any packages installed matching the strings
13625 # passed on the command line
13627 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13628 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13630 mypkgs.extend(vardb.match(x))
13632 # If some packages were found...
13634 # Get our global settings (we only print stuff if it varies from
13635 # the current config)
13636 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13637 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13638 auxkeys.append('DEFINED_PHASES')
13640 pkgsettings = portage.config(clone=settings)
13642 for myvar in mydesiredvars:
13643 global_vals[myvar] = set(settings.get(myvar, "").split())
13645 # Loop through each package
13646 # Only print settings if they differ from global settings
13647 header_title = "Package Settings"
13648 print header_width * "="
13649 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13650 print header_width * "="
13651 from portage.output import EOutput
13654 # Get all package specific variables
13655 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13656 pkg = Package(built=True, cpv=cpv,
13657 installed=True, metadata=izip(Package.metadata_keys,
13658 (metadata.get(x, '') for x in Package.metadata_keys)),
13659 root_config=root_config, type_name='installed')
13662 valuesmap[k] = set(metadata[k].split())
13665 for myvar in mydesiredvars:
13666 # If the package variable doesn't match the
13667 # current global variable, something has changed
13668 # so set diff_found so we know to print
13669 if valuesmap[myvar] != global_vals[myvar]:
13670 diff_values[myvar] = valuesmap[myvar]
13672 print "\n%s was built with the following:" % \
13673 colorize("INFORM", str(pkg.cpv))
13675 pkgsettings.setcpv(pkg)
13676 forced_flags = set(chain(pkgsettings.useforce,
13677 pkgsettings.usemask))
13678 use = set(pkg.use.enabled)
13679 use.discard(pkgsettings.get('ARCH'))
13680 use_expand_flags = set()
13683 for varname in use_expand:
13684 flag_prefix = varname.lower() + "_"
13686 if f.startswith(flag_prefix):
13687 use_expand_flags.add(f)
13688 use_enabled.setdefault(
13689 varname.upper(), []).append(f[len(flag_prefix):])
13691 for f in pkg.iuse.all:
13692 if f.startswith(flag_prefix):
13693 use_expand_flags.add(f)
13695 use_disabled.setdefault(
13696 varname.upper(), []).append(f[len(flag_prefix):])
13698 var_order = set(use_enabled)
13699 var_order.update(use_disabled)
13700 var_order = sorted(var_order)
13701 var_order.insert(0, 'USE')
13702 use.difference_update(use_expand_flags)
13703 use_enabled['USE'] = list(use)
13704 use_disabled['USE'] = []
13706 for f in pkg.iuse.all:
13707 if f not in use and \
13708 f not in use_expand_flags:
13709 use_disabled['USE'].append(f)
13711 for varname in var_order:
13712 if varname in use_expand_hidden:
13715 for f in use_enabled.get(varname, []):
13716 flags.append(UseFlagDisplay(f, True, f in forced_flags))
13717 for f in use_disabled.get(varname, []):
13718 flags.append(UseFlagDisplay(f, False, f in forced_flags))
13719 if alphabetical_use:
13720 flags.sort(key=UseFlagDisplay.sort_combined)
13722 flags.sort(key=UseFlagDisplay.sort_separated)
13723 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13726 # If a difference was found, print the info for
13729 # Print package info
13730 for myvar in mydesiredvars:
13731 if myvar in diff_values:
13732 mylist = list(diff_values[myvar])
13734 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13737 if metadata['DEFINED_PHASES']:
13738 if 'info' not in metadata['DEFINED_PHASES'].split():
13741 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13742 ebuildpath = vardb.findname(pkg.cpv)
13743 if not ebuildpath or not os.path.exists(ebuildpath):
13744 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13746 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13747 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13748 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13751 def action_search(root_config, myopts, myfiles, spinner):
13753 print "emerge: no search terms provided."
13755 searchinstance = search(root_config,
13756 spinner, "--searchdesc" in myopts,
13757 "--quiet" not in myopts, "--usepkg" in myopts,
13758 "--usepkgonly" in myopts)
13759 for mysearch in myfiles:
13761 searchinstance.execute(mysearch)
13762 except re.error, comment:
13763 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13765 searchinstance.output()
13767 def action_depclean(settings, trees, ldpath_mtimes,
13768 myopts, action, myfiles, spinner):
13769 # Kill packages that aren't explicitly merged or are required as a
13770 # dependency of another package. World file is explicit.
13772 # Global depclean or prune operations are not very safe when there are
13773 # missing dependencies since it's unknown how badly incomplete
13774 # the dependency graph is, and we might accidentally remove packages
13775 # that should have been pulled into the graph. On the other hand, it's
13776 # relatively safe to ignore missing deps when only asked to remove
13777 # specific packages.
13778 allow_missing_deps = len(myfiles) > 0
13781 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13782 msg.append("mistakes. Packages that are part of the world set will always\n")
13783 msg.append("be kept. They can be manually added to this set with\n")
13784 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13785 msg.append("package.provided (see portage(5)) will be removed by\n")
13786 msg.append("depclean, even if they are part of the world set.\n")
13788 msg.append("As a safety measure, depclean will not remove any packages\n")
13789 msg.append("unless *all* required dependencies have been resolved. As a\n")
13790 msg.append("consequence, it is often necessary to run %s\n" % \
13791 good("`emerge --update"))
13792 msg.append(good("--newuse --deep @system @world`") + \
13793 " prior to depclean.\n")
13795 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13796 portage.writemsg_stdout("\n")
13798 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13800 xterm_titles = "notitles" not in settings.features
13801 myroot = settings["ROOT"]
13802 root_config = trees[myroot]["root_config"]
13803 getSetAtoms = root_config.setconfig.getSetAtoms
13804 vardb = trees[myroot]["vartree"].dbapi
13806 required_set_names = ("system", "world")
13810 for s in required_set_names:
13811 required_sets[s] = InternalPackageSet(
13812 initial_atoms=getSetAtoms(s))
13815 # When removing packages, use a temporary version of world
13816 # which excludes packages that are intended to be eligible for
13818 world_temp_set = required_sets["world"]
13819 system_set = required_sets["system"]
13821 if not system_set or not world_temp_set:
13824 writemsg_level("!!! You have no system list.\n",
13825 level=logging.ERROR, noiselevel=-1)
13827 if not world_temp_set:
13828 writemsg_level("!!! You have no world file.\n",
13829 level=logging.WARNING, noiselevel=-1)
13831 writemsg_level("!!! Proceeding is likely to " + \
13832 "break your installation.\n",
13833 level=logging.WARNING, noiselevel=-1)
13834 if "--pretend" not in myopts:
13835 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13837 if action == "depclean":
13838 emergelog(xterm_titles, " >>> depclean")
13841 args_set = InternalPackageSet()
13844 if not is_valid_package_atom(x):
13845 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13846 level=logging.ERROR, noiselevel=-1)
13847 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13850 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13851 except portage.exception.AmbiguousPackageName, e:
13852 msg = "The short ebuild name \"" + x + \
13853 "\" is ambiguous. Please specify " + \
13854 "one of the following " + \
13855 "fully-qualified ebuild names instead:"
13856 for line in textwrap.wrap(msg, 70):
13857 writemsg_level("!!! %s\n" % (line,),
13858 level=logging.ERROR, noiselevel=-1)
13860 writemsg_level(" %s\n" % colorize("INFORM", i),
13861 level=logging.ERROR, noiselevel=-1)
13862 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13865 matched_packages = False
13868 matched_packages = True
13870 if not matched_packages:
13871 writemsg_level(">>> No packages selected for removal by %s\n" % \
13875 writemsg_level("\nCalculating dependencies ")
13876 resolver_params = create_depgraph_params(myopts, "remove")
13877 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13878 vardb = resolver.trees[myroot]["vartree"].dbapi
13880 if action == "depclean":
13883 # Pull in everything that's installed but not matched
13884 # by an argument atom since we don't want to clean any
13885 # package if something depends on it.
13887 world_temp_set.clear()
13892 if args_set.findAtomForPackage(pkg) is None:
13893 world_temp_set.add("=" + pkg.cpv)
13895 except portage.exception.InvalidDependString, e:
13896 show_invalid_depstring_notice(pkg,
13897 pkg.metadata["PROVIDE"], str(e))
13899 world_temp_set.add("=" + pkg.cpv)
13902 elif action == "prune":
13904 # Pull in everything that's installed since we don't
13905 # to prune a package if something depends on it.
13906 world_temp_set.clear()
13907 world_temp_set.update(vardb.cp_all())
13911 # Try to prune everything that's slotted.
13912 for cp in vardb.cp_all():
13913 if len(vardb.cp_list(cp)) > 1:
13916 # Remove atoms from world that match installed packages
13917 # that are also matched by argument atoms, but do not remove
13918 # them if they match the highest installed version.
13921 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13922 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13923 raise AssertionError("package expected in matches: " + \
13924 "cp = %s, cpv = %s matches = %s" % \
13925 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13927 highest_version = pkgs_for_cp[-1]
13928 if pkg == highest_version:
13929 # pkg is the highest version
13930 world_temp_set.add("=" + pkg.cpv)
13933 if len(pkgs_for_cp) <= 1:
13934 raise AssertionError("more packages expected: " + \
13935 "cp = %s, cpv = %s matches = %s" % \
13936 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13939 if args_set.findAtomForPackage(pkg) is None:
13940 world_temp_set.add("=" + pkg.cpv)
13942 except portage.exception.InvalidDependString, e:
13943 show_invalid_depstring_notice(pkg,
13944 pkg.metadata["PROVIDE"], str(e))
13946 world_temp_set.add("=" + pkg.cpv)
13950 for s, package_set in required_sets.iteritems():
13951 set_atom = SETPREFIX + s
13952 set_arg = SetArg(arg=set_atom, set=package_set,
13953 root_config=resolver.roots[myroot])
13954 set_args[s] = set_arg
13955 for atom in set_arg.set:
13956 resolver._dep_stack.append(
13957 Dependency(atom=atom, root=myroot, parent=set_arg))
13958 resolver.digraph.add(set_arg, None)
13960 success = resolver._complete_graph()
13961 writemsg_level("\b\b... done!\n")
13963 resolver.display_problems()
13968 def unresolved_deps():
13970 unresolvable = set()
13971 for dep in resolver._initially_unsatisfied_deps:
13972 if isinstance(dep.parent, Package) and \
13973 (dep.priority > UnmergeDepPriority.SOFT):
13974 unresolvable.add((dep.atom, dep.parent.cpv))
13976 if not unresolvable:
13979 if unresolvable and not allow_missing_deps:
13980 prefix = bad(" * ")
13982 msg.append("Dependencies could not be completely resolved due to")
13983 msg.append("the following required packages not being installed:")
13985 for atom, parent in unresolvable:
13986 msg.append(" %s pulled in by:" % (atom,))
13987 msg.append(" %s" % (parent,))
13989 msg.append("Have you forgotten to run " + \
13990 good("`emerge --update --newuse --deep @system @world`") + " prior")
13991 msg.append(("to %s? It may be necessary to manually " + \
13992 "uninstall packages that no longer") % action)
13993 msg.append("exist in the portage tree since " + \
13994 "it may not be possible to satisfy their")
13995 msg.append("dependencies. Also, be aware of " + \
13996 "the --with-bdeps option that is documented")
13997 msg.append("in " + good("`man emerge`") + ".")
13998 if action == "prune":
14000 msg.append("If you would like to ignore " + \
14001 "dependencies then use %s." % good("--nodeps"))
14002 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14003 level=logging.ERROR, noiselevel=-1)
14007 if unresolved_deps():
14010 graph = resolver.digraph.copy()
14011 required_pkgs_total = 0
14013 if isinstance(node, Package):
14014 required_pkgs_total += 1
14016 def show_parents(child_node):
14017 parent_nodes = graph.parent_nodes(child_node)
14018 if not parent_nodes:
14019 # With --prune, the highest version can be pulled in without any
14020 # real parent since all installed packages are pulled in. In that
14021 # case there's nothing to show here.
14024 for node in parent_nodes:
14025 parent_strs.append(str(getattr(node, "cpv", node)))
14028 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
14029 for parent_str in parent_strs:
14030 msg.append(" %s\n" % (parent_str,))
14032 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14034 def cmp_pkg_cpv(pkg1, pkg2):
14035 """Sort Package instances by cpv."""
14036 if pkg1.cpv > pkg2.cpv:
14038 elif pkg1.cpv == pkg2.cpv:
14043 def create_cleanlist():
14044 pkgs_to_remove = []
14046 if action == "depclean":
14049 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14052 arg_atom = args_set.findAtomForPackage(pkg)
14053 except portage.exception.InvalidDependString:
14054 # this error has already been displayed by now
14058 if pkg not in graph:
14059 pkgs_to_remove.append(pkg)
14060 elif "--verbose" in myopts:
14064 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14065 if pkg not in graph:
14066 pkgs_to_remove.append(pkg)
14067 elif "--verbose" in myopts:
14070 elif action == "prune":
14071 # Prune really uses all installed instead of world. It's not
14072 # a real reverse dependency so don't display it as such.
14073 graph.remove(set_args["world"])
14075 for atom in args_set:
14076 for pkg in vardb.match_pkgs(atom):
14077 if pkg not in graph:
14078 pkgs_to_remove.append(pkg)
14079 elif "--verbose" in myopts:
14082 if not pkgs_to_remove:
14084 ">>> No packages selected for removal by %s\n" % action)
14085 if "--verbose" not in myopts:
14087 ">>> To see reverse dependencies, use %s\n" % \
14089 if action == "prune":
14091 ">>> To ignore dependencies, use %s\n" % \
14094 return pkgs_to_remove
14096 cleanlist = create_cleanlist()
14099 clean_set = set(cleanlist)
14101 # Check if any of these package are the sole providers of libraries
14102 # with consumers that have not been selected for removal. If so, these
14103 # packages and any dependencies need to be added to the graph.
14104 real_vardb = trees[myroot]["vartree"].dbapi
14105 linkmap = real_vardb.linkmap
14106 liblist = linkmap.listLibraryObjects()
14107 consumer_cache = {}
14108 provider_cache = {}
14112 writemsg_level(">>> Checking for lib consumers...\n")
14114 for pkg in cleanlist:
14115 pkg_dblink = real_vardb._dblink(pkg.cpv)
14116 provided_libs = set()
14118 for lib in liblist:
14119 if pkg_dblink.isowner(lib, myroot):
14120 provided_libs.add(lib)
14122 if not provided_libs:
14126 for lib in provided_libs:
14127 lib_consumers = consumer_cache.get(lib)
14128 if lib_consumers is None:
14129 lib_consumers = linkmap.findConsumers(lib)
14130 consumer_cache[lib] = lib_consumers
14132 consumers[lib] = lib_consumers
14137 for lib, lib_consumers in consumers.items():
14138 for consumer_file in list(lib_consumers):
14139 if pkg_dblink.isowner(consumer_file, myroot):
14140 lib_consumers.remove(consumer_file)
14141 if not lib_consumers:
14147 for lib, lib_consumers in consumers.iteritems():
14149 soname = soname_cache.get(lib)
14151 soname = linkmap.getSoname(lib)
14152 soname_cache[lib] = soname
14154 consumer_providers = []
14155 for lib_consumer in lib_consumers:
14156 providers = provider_cache.get(lib)
14157 if providers is None:
14158 providers = linkmap.findProviders(lib_consumer)
14159 provider_cache[lib_consumer] = providers
14160 if soname not in providers:
14161 # Why does this happen?
14163 consumer_providers.append(
14164 (lib_consumer, providers[soname]))
14166 consumers[lib] = consumer_providers
14168 consumer_map[pkg] = consumers
14172 search_files = set()
14173 for consumers in consumer_map.itervalues():
14174 for lib, consumer_providers in consumers.iteritems():
14175 for lib_consumer, providers in consumer_providers:
14176 search_files.add(lib_consumer)
14177 search_files.update(providers)
14179 writemsg_level(">>> Assigning files to packages...\n")
14180 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14182 for pkg, consumers in consumer_map.items():
14183 for lib, consumer_providers in consumers.items():
14184 lib_consumers = set()
14186 for lib_consumer, providers in consumer_providers:
14187 owner_set = file_owners.get(lib_consumer)
14188 provider_dblinks = set()
14189 provider_pkgs = set()
14191 if len(providers) > 1:
14192 for provider in providers:
14193 provider_set = file_owners.get(provider)
14194 if provider_set is not None:
14195 provider_dblinks.update(provider_set)
14197 if len(provider_dblinks) > 1:
14198 for provider_dblink in provider_dblinks:
14199 pkg_key = ("installed", myroot,
14200 provider_dblink.mycpv, "nomerge")
14201 if pkg_key not in clean_set:
14202 provider_pkgs.add(vardb.get(pkg_key))
14207 if owner_set is not None:
14208 lib_consumers.update(owner_set)
14210 for consumer_dblink in list(lib_consumers):
14211 if ("installed", myroot, consumer_dblink.mycpv,
14212 "nomerge") in clean_set:
14213 lib_consumers.remove(consumer_dblink)
14217 consumers[lib] = lib_consumers
14221 del consumer_map[pkg]
14224 # TODO: Implement a package set for rebuilding consumer packages.
14226 msg = "In order to avoid breakage of link level " + \
14227 "dependencies, one or more packages will not be removed. " + \
14228 "This can be solved by rebuilding " + \
14229 "the packages that pulled them in."
14231 prefix = bad(" * ")
14232 from textwrap import wrap
14233 writemsg_level("".join(prefix + "%s\n" % line for \
14234 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14237 for pkg, consumers in consumer_map.iteritems():
14238 unique_consumers = set(chain(*consumers.values()))
14239 unique_consumers = sorted(consumer.mycpv \
14240 for consumer in unique_consumers)
14242 msg.append(" %s pulled in by:" % (pkg.cpv,))
14243 for consumer in unique_consumers:
14244 msg.append(" %s" % (consumer,))
14246 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14247 level=logging.WARNING, noiselevel=-1)
14249 # Add lib providers to the graph as children of lib consumers,
14250 # and also add any dependencies pulled in by the provider.
14251 writemsg_level(">>> Adding lib providers to graph...\n")
14253 for pkg, consumers in consumer_map.iteritems():
14254 for consumer_dblink in set(chain(*consumers.values())):
14255 consumer_pkg = vardb.get(("installed", myroot,
14256 consumer_dblink.mycpv, "nomerge"))
14257 if not resolver._add_pkg(pkg,
14258 Dependency(parent=consumer_pkg,
14259 priority=UnmergeDepPriority(runtime=True),
14261 resolver.display_problems()
14264 writemsg_level("\nCalculating dependencies ")
14265 success = resolver._complete_graph()
14266 writemsg_level("\b\b... done!\n")
14267 resolver.display_problems()
14270 if unresolved_deps():
14273 graph = resolver.digraph.copy()
14274 required_pkgs_total = 0
14276 if isinstance(node, Package):
14277 required_pkgs_total += 1
14278 cleanlist = create_cleanlist()
14281 clean_set = set(cleanlist)
14283 # Use a topological sort to create an unmerge order such that
14284 # each package is unmerged before it's dependencies. This is
14285 # necessary to avoid breaking things that may need to run
14286 # during pkg_prerm or pkg_postrm phases.
14288 # Create a new graph to account for dependencies between the
14289 # packages being unmerged.
14293 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14294 runtime = UnmergeDepPriority(runtime=True)
14295 runtime_post = UnmergeDepPriority(runtime_post=True)
14296 buildtime = UnmergeDepPriority(buildtime=True)
14298 "RDEPEND": runtime,
14299 "PDEPEND": runtime_post,
14300 "DEPEND": buildtime,
14303 for node in clean_set:
14304 graph.add(node, None)
14306 node_use = node.metadata["USE"].split()
14307 for dep_type in dep_keys:
14308 depstr = node.metadata[dep_type]
14312 portage.dep._dep_check_strict = False
14313 success, atoms = portage.dep_check(depstr, None, settings,
14314 myuse=node_use, trees=resolver._graph_trees,
14317 portage.dep._dep_check_strict = True
14319 # Ignore invalid deps of packages that will
14320 # be uninstalled anyway.
14323 priority = priority_map[dep_type]
14325 if not isinstance(atom, portage.dep.Atom):
14326 # Ignore invalid atoms returned from dep_check().
14330 matches = vardb.match_pkgs(atom)
14333 for child_node in matches:
14334 if child_node in clean_set:
14335 graph.add(child_node, node, priority=priority)
14338 if len(graph.order) == len(graph.root_nodes()):
14339 # If there are no dependencies between packages
14340 # let unmerge() group them by cat/pn.
14342 cleanlist = [pkg.cpv for pkg in graph.order]
14344 # Order nodes from lowest to highest overall reference count for
14345 # optimal root node selection.
14346 node_refcounts = {}
14347 for node in graph.order:
14348 node_refcounts[node] = len(graph.parent_nodes(node))
14349 def cmp_reference_count(node1, node2):
14350 return node_refcounts[node1] - node_refcounts[node2]
14351 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14353 ignore_priority_range = [None]
14354 ignore_priority_range.extend(
14355 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14356 while not graph.empty():
14357 for ignore_priority in ignore_priority_range:
14358 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14362 raise AssertionError("no root nodes")
14363 if ignore_priority is not None:
14364 # Some deps have been dropped due to circular dependencies,
14365 # so only pop one node in order do minimize the number that
14370 cleanlist.append(node.cpv)
14372 unmerge(root_config, myopts, "unmerge", cleanlist,
14373 ldpath_mtimes, ordered=ordered)
14375 if action == "prune":
14378 if not cleanlist and "--quiet" in myopts:
14381 print "Packages installed: "+str(len(vardb.cpv_all()))
14382 print "Packages in world: " + \
14383 str(len(root_config.sets["world"].getAtoms()))
14384 print "Packages in system: " + \
14385 str(len(root_config.sets["system"].getAtoms()))
14386 print "Required packages: "+str(required_pkgs_total)
14387 if "--pretend" in myopts:
14388 print "Number to remove: "+str(len(cleanlist))
14390 print "Number removed: "+str(len(cleanlist))
14392 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14394 Construct a depgraph for the given resume list. This will raise
14395 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14397 @returns: (success, depgraph, dropped_tasks)
14400 skip_unsatisfied = True
14401 mergelist = mtimedb["resume"]["mergelist"]
14402 dropped_tasks = set()
14404 mydepgraph = depgraph(settings, trees,
14405 myopts, myparams, spinner)
14407 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14408 skip_masked=skip_masked)
14409 except depgraph.UnsatisfiedResumeDep, e:
14410 if not skip_unsatisfied:
14413 graph = mydepgraph.digraph
14414 unsatisfied_parents = dict((dep.parent, dep.parent) \
14415 for dep in e.value)
14416 traversed_nodes = set()
14417 unsatisfied_stack = list(unsatisfied_parents)
14418 while unsatisfied_stack:
14419 pkg = unsatisfied_stack.pop()
14420 if pkg in traversed_nodes:
14422 traversed_nodes.add(pkg)
14424 # If this package was pulled in by a parent
14425 # package scheduled for merge, removing this
14426 # package may cause the the parent package's
14427 # dependency to become unsatisfied.
14428 for parent_node in graph.parent_nodes(pkg):
14429 if not isinstance(parent_node, Package) \
14430 or parent_node.operation not in ("merge", "nomerge"):
14433 graph.child_nodes(parent_node,
14434 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14435 if pkg in unsatisfied:
14436 unsatisfied_parents[parent_node] = parent_node
14437 unsatisfied_stack.append(parent_node)
14439 pruned_mergelist = []
14440 for x in mergelist:
14441 if isinstance(x, list) and \
14442 tuple(x) not in unsatisfied_parents:
14443 pruned_mergelist.append(x)
14445 # If the mergelist doesn't shrink then this loop is infinite.
14446 if len(pruned_mergelist) == len(mergelist):
14447 # This happens if a package can't be dropped because
14448 # it's already installed, but it has unsatisfied PDEPEND.
14450 mergelist[:] = pruned_mergelist
14452 # Exclude installed packages that have been removed from the graph due
14453 # to failure to build/install runtime dependencies after the dependent
14454 # package has already been installed.
14455 dropped_tasks.update(pkg for pkg in \
14456 unsatisfied_parents if pkg.operation != "nomerge")
14457 mydepgraph.break_refs(unsatisfied_parents)
14459 del e, graph, traversed_nodes, \
14460 unsatisfied_parents, unsatisfied_stack
14464 return (success, mydepgraph, dropped_tasks)
14466 def action_build(settings, trees, mtimedb,
14467 myopts, myaction, myfiles, spinner):
14469 # validate the state of the resume data
14470 # so that we can make assumptions later.
14471 for k in ("resume", "resume_backup"):
14472 if k not in mtimedb:
14474 resume_data = mtimedb[k]
14475 if not isinstance(resume_data, dict):
14478 mergelist = resume_data.get("mergelist")
14479 if not isinstance(mergelist, list):
14482 for x in mergelist:
14483 if not (isinstance(x, list) and len(x) == 4):
14485 pkg_type, pkg_root, pkg_key, pkg_action = x
14486 if pkg_root not in trees:
14487 # Current $ROOT setting differs,
14488 # so the list must be stale.
14494 resume_opts = resume_data.get("myopts")
14495 if not isinstance(resume_opts, (dict, list)):
14498 favorites = resume_data.get("favorites")
14499 if not isinstance(favorites, list):
14504 if "--resume" in myopts and \
14505 ("resume" in mtimedb or
14506 "resume_backup" in mtimedb):
14508 if "resume" not in mtimedb:
14509 mtimedb["resume"] = mtimedb["resume_backup"]
14510 del mtimedb["resume_backup"]
14512 # "myopts" is a list for backward compatibility.
14513 resume_opts = mtimedb["resume"].get("myopts", [])
14514 if isinstance(resume_opts, list):
14515 resume_opts = dict((k,True) for k in resume_opts)
14516 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14517 resume_opts.pop(opt, None)
14519 # Current options always override resume_opts.
14520 resume_opts.update(myopts)
14522 myopts.update(resume_opts)
14524 if "--debug" in myopts:
14525 writemsg_level("myopts %s\n" % (myopts,))
14527 # Adjust config according to options of the command being resumed.
14528 for myroot in trees:
14529 mysettings = trees[myroot]["vartree"].settings
14530 mysettings.unlock()
14531 adjust_config(myopts, mysettings)
14533 del myroot, mysettings
14535 ldpath_mtimes = mtimedb["ldpath"]
14538 buildpkgonly = "--buildpkgonly" in myopts
14539 pretend = "--pretend" in myopts
14540 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14541 ask = "--ask" in myopts
14542 nodeps = "--nodeps" in myopts
14543 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14544 tree = "--tree" in myopts
14545 if nodeps and tree:
14547 del myopts["--tree"]
14548 portage.writemsg(colorize("WARN", " * ") + \
14549 "--tree is broken with --nodeps. Disabling...\n")
14550 debug = "--debug" in myopts
14551 verbose = "--verbose" in myopts
14552 quiet = "--quiet" in myopts
14553 if pretend or fetchonly:
14554 # make the mtimedb readonly
14555 mtimedb.filename = None
14556 if '--digest' in myopts or 'digest' in settings.features:
14557 if '--digest' in myopts:
14558 msg = "The --digest option"
14560 msg = "The FEATURES=digest setting"
14562 msg += " can prevent corruption from being" + \
14563 " noticed. The `repoman manifest` command is the preferred" + \
14564 " way to generate manifests and it is capable of doing an" + \
14565 " entire repository or category at once."
14566 prefix = bad(" * ")
14567 writemsg(prefix + "\n")
14568 from textwrap import wrap
14569 for line in wrap(msg, 72):
14570 writemsg("%s%s\n" % (prefix, line))
14571 writemsg(prefix + "\n")
14573 if "--quiet" not in myopts and \
14574 ("--pretend" in myopts or "--ask" in myopts or \
14575 "--tree" in myopts or "--verbose" in myopts):
14577 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14579 elif "--buildpkgonly" in myopts:
14583 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14585 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14589 print darkgreen("These are the packages that would be %s, in order:") % action
14592 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14593 if not show_spinner:
14594 spinner.update = spinner.update_quiet
14597 favorites = mtimedb["resume"].get("favorites")
14598 if not isinstance(favorites, list):
14602 print "Calculating dependencies ",
14603 myparams = create_depgraph_params(myopts, myaction)
14605 resume_data = mtimedb["resume"]
14606 mergelist = resume_data["mergelist"]
14607 if mergelist and "--skipfirst" in myopts:
14608 for i, task in enumerate(mergelist):
14609 if isinstance(task, list) and \
14610 task and task[-1] == "merge":
14617 success, mydepgraph, dropped_tasks = resume_depgraph(
14618 settings, trees, mtimedb, myopts, myparams, spinner)
14619 except (portage.exception.PackageNotFound,
14620 depgraph.UnsatisfiedResumeDep), e:
14621 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14622 mydepgraph = e.depgraph
14625 from textwrap import wrap
14626 from portage.output import EOutput
14629 resume_data = mtimedb["resume"]
14630 mergelist = resume_data.get("mergelist")
14631 if not isinstance(mergelist, list):
14633 if mergelist and debug or (verbose and not quiet):
14634 out.eerror("Invalid resume list:")
14637 for task in mergelist:
14638 if isinstance(task, list):
14639 out.eerror(indent + str(tuple(task)))
14642 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14643 out.eerror("One or more packages are either masked or " + \
14644 "have missing dependencies:")
14647 for dep in e.value:
14648 if dep.atom is None:
14649 out.eerror(indent + "Masked package:")
14650 out.eerror(2 * indent + str(dep.parent))
14653 out.eerror(indent + str(dep.atom) + " pulled in by:")
14654 out.eerror(2 * indent + str(dep.parent))
14656 msg = "The resume list contains packages " + \
14657 "that are either masked or have " + \
14658 "unsatisfied dependencies. " + \
14659 "Please restart/continue " + \
14660 "the operation manually, or use --skipfirst " + \
14661 "to skip the first package in the list and " + \
14662 "any other packages that may be " + \
14663 "masked or have missing dependencies."
14664 for line in wrap(msg, 72):
14666 elif isinstance(e, portage.exception.PackageNotFound):
14667 out.eerror("An expected package is " + \
14668 "not available: %s" % str(e))
14670 msg = "The resume list contains one or more " + \
14671 "packages that are no longer " + \
14672 "available. Please restart/continue " + \
14673 "the operation manually."
14674 for line in wrap(msg, 72):
14678 print "\b\b... done!"
14682 portage.writemsg("!!! One or more packages have been " + \
14683 "dropped due to\n" + \
14684 "!!! masking or unsatisfied dependencies:\n\n",
14686 for task in dropped_tasks:
14687 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14688 portage.writemsg("\n", noiselevel=-1)
14691 if mydepgraph is not None:
14692 mydepgraph.display_problems()
14693 if not (ask or pretend):
14694 # delete the current list and also the backup
14695 # since it's probably stale too.
14696 for k in ("resume", "resume_backup"):
14697 mtimedb.pop(k, None)
14702 if ("--resume" in myopts):
14703 print darkgreen("emerge: It seems we have nothing to resume...")
14706 myparams = create_depgraph_params(myopts, myaction)
14707 if "--quiet" not in myopts and "--nodeps" not in myopts:
14708 print "Calculating dependencies ",
14710 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14712 retval, favorites = mydepgraph.select_files(myfiles)
14713 except portage.exception.PackageNotFound, e:
14714 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14716 except portage.exception.PackageSetNotFound, e:
14717 root_config = trees[settings["ROOT"]]["root_config"]
14718 display_missing_pkg_set(root_config, e.value)
14721 print "\b\b... done!"
14723 mydepgraph.display_problems()
14726 if "--pretend" not in myopts and \
14727 ("--ask" in myopts or "--tree" in myopts or \
14728 "--verbose" in myopts) and \
14729 not ("--quiet" in myopts and "--ask" not in myopts):
14730 if "--resume" in myopts:
14731 mymergelist = mydepgraph.altlist()
14732 if len(mymergelist) == 0:
14733 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14735 favorites = mtimedb["resume"]["favorites"]
14736 retval = mydepgraph.display(
14737 mydepgraph.altlist(reversed=tree),
14738 favorites=favorites)
14739 mydepgraph.display_problems()
14740 if retval != os.EX_OK:
14742 prompt="Would you like to resume merging these packages?"
14744 retval = mydepgraph.display(
14745 mydepgraph.altlist(reversed=("--tree" in myopts)),
14746 favorites=favorites)
14747 mydepgraph.display_problems()
14748 if retval != os.EX_OK:
14751 for x in mydepgraph.altlist():
14752 if isinstance(x, Package) and x.operation == "merge":
14756 sets = trees[settings["ROOT"]]["root_config"].sets
14757 world_candidates = None
14758 if "--noreplace" in myopts and \
14759 not oneshot and favorites:
14760 # Sets that are not world candidates are filtered
14761 # out here since the favorites list needs to be
14762 # complete for depgraph.loadResumeCommand() to
14763 # operate correctly.
14764 world_candidates = [x for x in favorites \
14765 if not (x.startswith(SETPREFIX) and \
14766 not sets[x[1:]].world_candidate)]
14767 if "--noreplace" in myopts and \
14768 not oneshot and world_candidates:
14770 for x in world_candidates:
14771 print " %s %s" % (good("*"), x)
14772 prompt="Would you like to add these packages to your world favorites?"
14773 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14774 prompt="Nothing to merge; would you like to auto-clean packages?"
14777 print "Nothing to merge; quitting."
14780 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14781 prompt="Would you like to fetch the source files for these packages?"
14783 prompt="Would you like to merge these packages?"
14785 if "--ask" in myopts and userquery(prompt) == "No":
14790 # Don't ask again (e.g. when auto-cleaning packages after merge)
14791 myopts.pop("--ask", None)
14793 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14794 if ("--resume" in myopts):
14795 mymergelist = mydepgraph.altlist()
14796 if len(mymergelist) == 0:
14797 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14799 favorites = mtimedb["resume"]["favorites"]
14800 retval = mydepgraph.display(
14801 mydepgraph.altlist(reversed=tree),
14802 favorites=favorites)
14803 mydepgraph.display_problems()
14804 if retval != os.EX_OK:
14807 retval = mydepgraph.display(
14808 mydepgraph.altlist(reversed=("--tree" in myopts)),
14809 favorites=favorites)
14810 mydepgraph.display_problems()
14811 if retval != os.EX_OK:
14813 if "--buildpkgonly" in myopts:
14814 graph_copy = mydepgraph.digraph.clone()
14815 removed_nodes = set()
14816 for node in graph_copy:
14817 if not isinstance(node, Package) or \
14818 node.operation == "nomerge":
14819 removed_nodes.add(node)
14820 graph_copy.difference_update(removed_nodes)
14821 if not graph_copy.hasallzeros(ignore_priority = \
14822 DepPrioritySatisfiedRange.ignore_medium):
14823 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14824 print "!!! You have to merge the dependencies before you can build this package.\n"
14827 if "--buildpkgonly" in myopts:
14828 graph_copy = mydepgraph.digraph.clone()
14829 removed_nodes = set()
14830 for node in graph_copy:
14831 if not isinstance(node, Package) or \
14832 node.operation == "nomerge":
14833 removed_nodes.add(node)
14834 graph_copy.difference_update(removed_nodes)
14835 if not graph_copy.hasallzeros(ignore_priority = \
14836 DepPrioritySatisfiedRange.ignore_medium):
14837 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14838 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14841 if ("--resume" in myopts):
14842 favorites=mtimedb["resume"]["favorites"]
14843 mymergelist = mydepgraph.altlist()
14844 mydepgraph.break_refs(mymergelist)
14845 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14846 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14847 del mydepgraph, mymergelist
14848 clear_caches(trees)
14850 retval = mergetask.merge()
14851 merge_count = mergetask.curval
14853 if "resume" in mtimedb and \
14854 "mergelist" in mtimedb["resume"] and \
14855 len(mtimedb["resume"]["mergelist"]) > 1:
14856 mtimedb["resume_backup"] = mtimedb["resume"]
14857 del mtimedb["resume"]
14859 mtimedb["resume"]={}
14860 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14861 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14862 # a list type for options.
14863 mtimedb["resume"]["myopts"] = myopts.copy()
14865 # Convert Atom instances to plain str.
14866 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14868 pkglist = mydepgraph.altlist()
14869 mydepgraph.saveNomergeFavorites()
14870 mydepgraph.break_refs(pkglist)
14871 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14872 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14873 del mydepgraph, pkglist
14874 clear_caches(trees)
14876 retval = mergetask.merge()
14877 merge_count = mergetask.curval
14879 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14880 if "yes" == settings.get("AUTOCLEAN"):
14881 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14882 unmerge(trees[settings["ROOT"]]["root_config"],
14883 myopts, "clean", [],
14884 ldpath_mtimes, autoclean=1)
14886 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14887 + " AUTOCLEAN is disabled. This can cause serious"
14888 + " problems due to overlapping packages.\n")
14889 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14893 def multiple_actions(action1, action2):
14894 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14895 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14898 def insert_optional_args(args):
14900 Parse optional arguments and insert a value if one has
14901 not been provided. This is done before feeding the args
14902 to the optparse parser since that parser does not support
14903 this feature natively.
14907 jobs_opts = ("-j", "--jobs")
14908 root_deps_opt = '--root-deps'
14909 root_deps_choices = ('True', 'rdeps')
14910 arg_stack = args[:]
14911 arg_stack.reverse()
14913 arg = arg_stack.pop()
14915 if arg == root_deps_opt:
14916 new_args.append(arg)
14917 if arg_stack and arg_stack[-1] in root_deps_choices:
14918 new_args.append(arg_stack.pop())
14920 # insert default argument
14921 new_args.append('True')
14924 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14925 if not (short_job_opt or arg in jobs_opts):
14926 new_args.append(arg)
14929 # Insert an empty placeholder in order to
14930 # satisfy the requirements of optparse.
14932 new_args.append("--jobs")
14935 if short_job_opt and len(arg) > 2:
14936 if arg[:2] == "-j":
14938 job_count = int(arg[2:])
14940 saved_opts = arg[2:]
14943 saved_opts = arg[1:].replace("j", "")
14945 if job_count is None and arg_stack:
14947 job_count = int(arg_stack[-1])
14951 # Discard the job count from the stack
14952 # since we're consuming it here.
14955 if job_count is None:
14956 # unlimited number of jobs
14957 new_args.append("True")
14959 new_args.append(str(job_count))
14961 if saved_opts is not None:
14962 new_args.append("-" + saved_opts)
14966 def parse_opts(tmpcmdline, silent=False):
14971 global actions, options, shortmapping
14973 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14974 argument_options = {
14976 "help":"specify the location for portage configuration files",
14980 "help":"enable or disable color output",
14982 "choices":("y", "n")
14987 "help" : "Specifies the number of packages to build " + \
14993 "--load-average": {
14995 "help" :"Specifies that no new builds should be started " + \
14996 "if there are other builds running and the load average " + \
14997 "is at least LOAD (a floating-point number).",
15003 "help":"include unnecessary build time dependencies",
15005 "choices":("y", "n")
15008 "help":"specify conditions to trigger package reinstallation",
15010 "choices":["changed-use"]
15013 "help" : "specify the target root filesystem for merging packages",
15018 "help" : "modify interpretation of depedencies",
15020 "choices" :("True", "rdeps")
15024 from optparse import OptionParser
15025 parser = OptionParser()
15026 if parser.has_option("--help"):
15027 parser.remove_option("--help")
15029 for action_opt in actions:
15030 parser.add_option("--" + action_opt, action="store_true",
15031 dest=action_opt.replace("-", "_"), default=False)
15032 for myopt in options:
15033 parser.add_option(myopt, action="store_true",
15034 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15035 for shortopt, longopt in shortmapping.iteritems():
15036 parser.add_option("-" + shortopt, action="store_true",
15037 dest=longopt.lstrip("--").replace("-", "_"), default=False)
15038 for myalias, myopt in longopt_aliases.iteritems():
15039 parser.add_option(myalias, action="store_true",
15040 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15042 for myopt, kwargs in argument_options.iteritems():
15043 parser.add_option(myopt,
15044 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15046 tmpcmdline = insert_optional_args(tmpcmdline)
15048 myoptions, myargs = parser.parse_args(args=tmpcmdline)
15050 if myoptions.root_deps == "True":
15051 myoptions.root_deps = True
15055 if myoptions.jobs == "True":
15059 jobs = int(myoptions.jobs)
15063 if jobs is not True and \
15067 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15068 (myoptions.jobs,), noiselevel=-1)
15070 myoptions.jobs = jobs
15072 if myoptions.load_average:
15074 load_average = float(myoptions.load_average)
15078 if load_average <= 0.0:
15079 load_average = None
15081 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15082 (myoptions.load_average,), noiselevel=-1)
15084 myoptions.load_average = load_average
15086 for myopt in options:
15087 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15089 myopts[myopt] = True
15091 for myopt in argument_options:
15092 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15096 if myoptions.searchdesc:
15097 myoptions.search = True
15099 for action_opt in actions:
15100 v = getattr(myoptions, action_opt.replace("-", "_"))
15103 multiple_actions(myaction, action_opt)
15105 myaction = action_opt
15109 return myaction, myopts, myfiles
15111 def validate_ebuild_environment(trees):
15112 for myroot in trees:
15113 settings = trees[myroot]["vartree"].settings
15114 settings.validate()
15116 def clear_caches(trees):
15117 for d in trees.itervalues():
15118 d["porttree"].dbapi.melt()
15119 d["porttree"].dbapi._aux_cache.clear()
15120 d["bintree"].dbapi._aux_cache.clear()
15121 d["bintree"].dbapi._clear_cache()
15122 d["vartree"].dbapi.linkmap._clear_cache()
15123 portage.dircache.clear()
15126 def load_emerge_config(trees=None):
15128 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15129 v = os.environ.get(envvar, None)
15130 if v and v.strip():
15132 trees = portage.create_trees(trees=trees, **kwargs)
15134 for root, root_trees in trees.iteritems():
15135 settings = root_trees["vartree"].settings
15136 setconfig = load_default_config(settings, root_trees)
15137 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15139 settings = trees["/"]["vartree"].settings
15141 for myroot in trees:
15143 settings = trees[myroot]["vartree"].settings
15146 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15147 mtimedb = portage.MtimeDB(mtimedbfile)
15149 return settings, trees, mtimedb
15151 def adjust_config(myopts, settings):
15152 """Make emerge specific adjustments to the config."""
15154 # To enhance usability, make some vars case insensitive by forcing them to
15156 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15157 if myvar in settings:
15158 settings[myvar] = settings[myvar].lower()
15159 settings.backup_changes(myvar)
15162 # Kill noauto as it will break merges otherwise.
15163 if "noauto" in settings.features:
15164 settings.features.remove('noauto')
15165 settings['FEATURES'] = ' '.join(sorted(settings.features))
15166 settings.backup_changes("FEATURES")
15170 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15171 except ValueError, e:
15172 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15173 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15174 settings["CLEAN_DELAY"], noiselevel=-1)
15175 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15176 settings.backup_changes("CLEAN_DELAY")
15178 EMERGE_WARNING_DELAY = 10
15180 EMERGE_WARNING_DELAY = int(settings.get(
15181 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15182 except ValueError, e:
15183 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15184 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15185 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15186 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15187 settings.backup_changes("EMERGE_WARNING_DELAY")
15189 if "--quiet" in myopts:
15190 settings["PORTAGE_QUIET"]="1"
15191 settings.backup_changes("PORTAGE_QUIET")
15193 if "--verbose" in myopts:
15194 settings["PORTAGE_VERBOSE"] = "1"
15195 settings.backup_changes("PORTAGE_VERBOSE")
15197 # Set so that configs will be merged regardless of remembered status
15198 if ("--noconfmem" in myopts):
15199 settings["NOCONFMEM"]="1"
15200 settings.backup_changes("NOCONFMEM")
15202 # Set various debug markers... They should be merged somehow.
15205 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15206 if PORTAGE_DEBUG not in (0, 1):
15207 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15208 PORTAGE_DEBUG, noiselevel=-1)
15209 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15212 except ValueError, e:
15213 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15214 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15215 settings["PORTAGE_DEBUG"], noiselevel=-1)
15217 if "--debug" in myopts:
15219 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15220 settings.backup_changes("PORTAGE_DEBUG")
15222 if settings.get("NOCOLOR") not in ("yes","true"):
15223 portage.output.havecolor = 1
15225 """The explicit --color < y | n > option overrides the NOCOLOR environment
15226 variable and stdout auto-detection."""
15227 if "--color" in myopts:
15228 if "y" == myopts["--color"]:
15229 portage.output.havecolor = 1
15230 settings["NOCOLOR"] = "false"
15232 portage.output.havecolor = 0
15233 settings["NOCOLOR"] = "true"
15234 settings.backup_changes("NOCOLOR")
15235 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15236 portage.output.havecolor = 0
15237 settings["NOCOLOR"] = "true"
15238 settings.backup_changes("NOCOLOR")
15240 def apply_priorities(settings):
15244 def nice(settings):
15246 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15247 except (OSError, ValueError), e:
15248 out = portage.output.EOutput()
15249 out.eerror("Failed to change nice value to '%s'" % \
15250 settings["PORTAGE_NICENESS"])
15251 out.eerror("%s\n" % str(e))
15253 def ionice(settings):
15255 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15257 ionice_cmd = shlex.split(ionice_cmd)
15261 from portage.util import varexpand
15262 variables = {"PID" : str(os.getpid())}
15263 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15266 rval = portage.process.spawn(cmd, env=os.environ)
15267 except portage.exception.CommandNotFound:
15268 # The OS kernel probably doesn't support ionice,
15269 # so return silently.
15272 if rval != os.EX_OK:
15273 out = portage.output.EOutput()
15274 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15275 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15277 def display_missing_pkg_set(root_config, set_name):
15280 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15281 "The following sets exist:") % \
15282 colorize("INFORM", set_name))
15285 for s in sorted(root_config.sets):
15286 msg.append(" %s" % s)
15289 writemsg_level("".join("%s\n" % l for l in msg),
15290 level=logging.ERROR, noiselevel=-1)
15292 def expand_set_arguments(myfiles, myaction, root_config):
15294 setconfig = root_config.setconfig
15296 sets = setconfig.getSets()
15298 # In order to know exactly which atoms/sets should be added to the
15299 # world file, the depgraph performs set expansion later. It will get
15300 # confused about where the atoms came from if it's not allowed to
15301 # expand them itself.
15302 do_not_expand = (None, )
15305 if a in ("system", "world"):
15306 newargs.append(SETPREFIX+a)
15313 # separators for set arguments
15317 # WARNING: all operators must be of equal length
15319 DIFF_OPERATOR = "-@"
15320 UNION_OPERATOR = "+@"
15322 for i in range(0, len(myfiles)):
15323 if myfiles[i].startswith(SETPREFIX):
15326 x = myfiles[i][len(SETPREFIX):]
15329 start = x.find(ARG_START)
15330 end = x.find(ARG_END)
15331 if start > 0 and start < end:
15332 namepart = x[:start]
15333 argpart = x[start+1:end]
15335 # TODO: implement proper quoting
15336 args = argpart.split(",")
15340 k, v = a.split("=", 1)
15343 options[a] = "True"
15344 setconfig.update(namepart, options)
15345 newset += (x[:start-len(namepart)]+namepart)
15346 x = x[end+len(ARG_END):]
15350 myfiles[i] = SETPREFIX+newset
15352 sets = setconfig.getSets()
15354 # display errors that occured while loading the SetConfig instance
15355 for e in setconfig.errors:
15356 print colorize("BAD", "Error during set creation: %s" % e)
15358 # emerge relies on the existance of sets with names "world" and "system"
15359 required_sets = ("world", "system")
15362 for s in required_sets:
15364 missing_sets.append(s)
15366 if len(missing_sets) > 2:
15367 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15368 missing_sets_str += ', and "%s"' % missing_sets[-1]
15369 elif len(missing_sets) == 2:
15370 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15372 missing_sets_str = '"%s"' % missing_sets[-1]
15373 msg = ["emerge: incomplete set configuration, " + \
15374 "missing set(s): %s" % missing_sets_str]
15376 msg.append(" sets defined: %s" % ", ".join(sets))
15377 msg.append(" This usually means that '%s'" % \
15378 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15379 msg.append(" is missing or corrupt.")
15381 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15383 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15386 if a.startswith(SETPREFIX):
15387 # support simple set operations (intersection, difference and union)
15388 # on the commandline. Expressions are evaluated strictly left-to-right
15389 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15390 expression = a[len(SETPREFIX):]
15393 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15394 is_pos = expression.rfind(IS_OPERATOR)
15395 diff_pos = expression.rfind(DIFF_OPERATOR)
15396 union_pos = expression.rfind(UNION_OPERATOR)
15397 op_pos = max(is_pos, diff_pos, union_pos)
15398 s1 = expression[:op_pos]
15399 s2 = expression[op_pos+len(IS_OPERATOR):]
15400 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15402 display_missing_pkg_set(root_config, s2)
15404 expr_sets.insert(0, s2)
15405 expr_ops.insert(0, op)
15407 if not expression in sets:
15408 display_missing_pkg_set(root_config, expression)
15410 expr_sets.insert(0, expression)
15411 result = set(setconfig.getSetAtoms(expression))
15412 for i in range(0, len(expr_ops)):
15413 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15414 if expr_ops[i] == IS_OPERATOR:
15415 result.intersection_update(s2)
15416 elif expr_ops[i] == DIFF_OPERATOR:
15417 result.difference_update(s2)
15418 elif expr_ops[i] == UNION_OPERATOR:
15421 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15422 newargs.extend(result)
15424 s = a[len(SETPREFIX):]
15426 display_missing_pkg_set(root_config, s)
15428 setconfig.active.append(s)
15430 set_atoms = setconfig.getSetAtoms(s)
15431 except portage.exception.PackageSetNotFound, e:
15432 writemsg_level(("emerge: the given set '%s' " + \
15433 "contains a non-existent set named '%s'.\n") % \
15434 (s, e), level=logging.ERROR, noiselevel=-1)
15436 if myaction in unmerge_actions and \
15437 not sets[s].supportsOperation("unmerge"):
15438 sys.stderr.write("emerge: the given set '%s' does " % s + \
15439 "not support unmerge operations\n")
15441 elif not set_atoms:
15442 print "emerge: '%s' is an empty set" % s
15443 elif myaction not in do_not_expand:
15444 newargs.extend(set_atoms)
15446 newargs.append(SETPREFIX+s)
15447 for e in sets[s].errors:
15451 return (newargs, retval)
15453 def repo_name_check(trees):
15454 missing_repo_names = set()
15455 for root, root_trees in trees.iteritems():
15456 if "porttree" in root_trees:
15457 portdb = root_trees["porttree"].dbapi
15458 missing_repo_names.update(portdb.porttrees)
15459 repos = portdb.getRepositories()
15461 missing_repo_names.discard(portdb.getRepositoryPath(r))
15462 if portdb.porttree_root in missing_repo_names and \
15463 not os.path.exists(os.path.join(
15464 portdb.porttree_root, "profiles")):
15465 # This is normal if $PORTDIR happens to be empty,
15466 # so don't warn about it.
15467 missing_repo_names.remove(portdb.porttree_root)
15469 if missing_repo_names:
15471 msg.append("WARNING: One or more repositories " + \
15472 "have missing repo_name entries:")
15474 for p in missing_repo_names:
15475 msg.append("\t%s/profiles/repo_name" % (p,))
15477 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15478 "should be a plain text file containing a unique " + \
15479 "name for the repository on the first line.", 70))
15480 writemsg_level("".join("%s\n" % l for l in msg),
15481 level=logging.WARNING, noiselevel=-1)
15483 return bool(missing_repo_names)
15485 def config_protect_check(trees):
15486 for root, root_trees in trees.iteritems():
15487 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15488 msg = "!!! CONFIG_PROTECT is empty"
15490 msg += " for '%s'" % root
15491 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15493 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15495 if "--quiet" in myopts:
15496 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15497 print "!!! one of the following fully-qualified ebuild names instead:\n"
15498 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15499 print " " + colorize("INFORM", cp)
15502 s = search(root_config, spinner, "--searchdesc" in myopts,
15503 "--quiet" not in myopts, "--usepkg" in myopts,
15504 "--usepkgonly" in myopts)
15505 null_cp = portage.dep_getkey(insert_category_into_atom(
15507 cat, atom_pn = portage.catsplit(null_cp)
15508 s.searchkey = atom_pn
15509 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15512 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15513 print "!!! one of the above fully-qualified ebuild names instead.\n"
15515 def profile_check(trees, myaction, myopts):
15516 if myaction in ("info", "sync"):
15518 elif "--version" in myopts or "--help" in myopts:
15520 for root, root_trees in trees.iteritems():
15521 if root_trees["root_config"].settings.profiles:
15523 # generate some profile related warning messages
15524 validate_ebuild_environment(trees)
15525 msg = "If you have just changed your profile configuration, you " + \
15526 "should revert back to the previous configuration. Due to " + \
15527 "your current profile being invalid, allowed actions are " + \
15528 "limited to --help, --info, --sync, and --version."
15529 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15530 level=logging.ERROR, noiselevel=-1)
15535 global portage # NFC why this is necessary now - genone
15536 portage._disable_legacy_globals()
15537 # Disable color until we're sure that it should be enabled (after
15538 # EMERGE_DEFAULT_OPTS has been parsed).
15539 portage.output.havecolor = 0
15540 # This first pass is just for options that need to be known as early as
15541 # possible, such as --config-root. They will be parsed again later,
15542 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15543 # the value of --config-root).
15544 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15545 if "--debug" in myopts:
15546 os.environ["PORTAGE_DEBUG"] = "1"
15547 if "--config-root" in myopts:
15548 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15549 if "--root" in myopts:
15550 os.environ["ROOT"] = myopts["--root"]
15552 # Portage needs to ensure a sane umask for the files it creates.
15554 settings, trees, mtimedb = load_emerge_config()
15555 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15556 rval = profile_check(trees, myaction, myopts)
15557 if rval != os.EX_OK:
15560 if portage._global_updates(trees, mtimedb["updates"]):
15562 # Reload the whole config from scratch.
15563 settings, trees, mtimedb = load_emerge_config(trees=trees)
15564 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15566 xterm_titles = "notitles" not in settings.features
15569 if "--ignore-default-opts" not in myopts:
15570 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15571 tmpcmdline.extend(sys.argv[1:])
15572 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15574 if "--digest" in myopts:
15575 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15576 # Reload the whole config from scratch so that the portdbapi internal
15577 # config is updated with new FEATURES.
15578 settings, trees, mtimedb = load_emerge_config(trees=trees)
15579 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15581 for myroot in trees:
15582 mysettings = trees[myroot]["vartree"].settings
15583 mysettings.unlock()
15584 adjust_config(myopts, mysettings)
15585 if '--pretend' not in myopts and myaction in \
15586 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15587 mysettings["PORTAGE_COUNTER_HASH"] = \
15588 trees[myroot]["vartree"].dbapi._counter_hash()
15589 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15591 del myroot, mysettings
15593 apply_priorities(settings)
15595 spinner = stdout_spinner()
15596 if "candy" in settings.features:
15597 spinner.update = spinner.update_scroll
15599 if "--quiet" not in myopts:
15600 portage.deprecated_profile_check(settings=settings)
15601 repo_name_check(trees)
15602 config_protect_check(trees)
15604 for mytrees in trees.itervalues():
15605 mydb = mytrees["porttree"].dbapi
15606 # Freeze the portdbapi for performance (memoize all xmatch results).
15610 if "moo" in myfiles:
15613 Larry loves Gentoo (""" + platform.system() + """)
15615 _______________________
15616 < Have you mooed today? >
15617 -----------------------
15627 ext = os.path.splitext(x)[1]
15628 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15629 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15632 root_config = trees[settings["ROOT"]]["root_config"]
15633 if myaction == "list-sets":
15634 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15638 # only expand sets for actions taking package arguments
15639 oldargs = myfiles[:]
15640 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15641 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15642 if retval != os.EX_OK:
15645 # Need to handle empty sets specially, otherwise emerge will react
15646 # with the help message for empty argument lists
15647 if oldargs and not myfiles:
15648 print "emerge: no targets left after set expansion"
15651 if ("--tree" in myopts) and ("--columns" in myopts):
15652 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15655 if ("--quiet" in myopts):
15656 spinner.update = spinner.update_quiet
15657 portage.util.noiselimit = -1
15659 # Always create packages if FEATURES=buildpkg
15660 # Imply --buildpkg if --buildpkgonly
15661 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15662 if "--buildpkg" not in myopts:
15663 myopts["--buildpkg"] = True
15665 # Always try and fetch binary packages if FEATURES=getbinpkg
15666 if ("getbinpkg" in settings.features):
15667 myopts["--getbinpkg"] = True
15669 if "--buildpkgonly" in myopts:
15670 # --buildpkgonly will not merge anything, so
15671 # it cancels all binary package options.
15672 for opt in ("--getbinpkg", "--getbinpkgonly",
15673 "--usepkg", "--usepkgonly"):
15674 myopts.pop(opt, None)
15676 if "--fetch-all-uri" in myopts:
15677 myopts["--fetchonly"] = True
15679 if "--skipfirst" in myopts and "--resume" not in myopts:
15680 myopts["--resume"] = True
15682 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15683 myopts["--usepkgonly"] = True
15685 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15686 myopts["--getbinpkg"] = True
15688 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15689 myopts["--usepkg"] = True
15691 # Also allow -K to apply --usepkg/-k
15692 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15693 myopts["--usepkg"] = True
15695 # Allow -p to remove --ask
15696 if ("--pretend" in myopts) and ("--ask" in myopts):
15697 print ">>> --pretend disables --ask... removing --ask from options."
15698 del myopts["--ask"]
15700 # forbid --ask when not in a terminal
15701 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15702 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15703 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15707 if settings.get("PORTAGE_DEBUG", "") == "1":
15708 spinner.update = spinner.update_quiet
15710 if "python-trace" in settings.features:
15711 import portage.debug
15712 portage.debug.set_trace(True)
15714 if not ("--quiet" in myopts):
15715 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15716 spinner.update = spinner.update_basic
15718 if myaction == 'version':
15719 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15720 settings.profile_path, settings["CHOST"],
15721 trees[settings["ROOT"]]["vartree"].dbapi)
15723 elif "--help" in myopts:
15724 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15727 if "--debug" in myopts:
15728 print "myaction", myaction
15729 print "myopts", myopts
15731 if not myaction and not myfiles and "--resume" not in myopts:
15732 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15735 pretend = "--pretend" in myopts
15736 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15737 buildpkgonly = "--buildpkgonly" in myopts
15739 # check if root user is the current user for the actions where emerge needs this
15740 if portage.secpass < 2:
15741 # We've already allowed "--version" and "--help" above.
15742 if "--pretend" not in myopts and myaction not in ("search","info"):
15743 need_superuser = not \
15745 (buildpkgonly and secpass >= 1) or \
15746 myaction in ("metadata", "regen") or \
15747 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15748 if portage.secpass < 1 or \
15751 access_desc = "superuser"
15753 access_desc = "portage group"
15754 # Always show portage_group_warning() when only portage group
15755 # access is required but the user is not in the portage group.
15756 from portage.data import portage_group_warning
15757 if "--ask" in myopts:
15758 myopts["--pretend"] = True
15759 del myopts["--ask"]
15760 print ("%s access is required... " + \
15761 "adding --pretend to options.\n") % access_desc
15762 if portage.secpass < 1 and not need_superuser:
15763 portage_group_warning()
15765 sys.stderr.write(("emerge: %s access is " + \
15766 "required.\n\n") % access_desc)
15767 if portage.secpass < 1 and not need_superuser:
15768 portage_group_warning()
15771 disable_emergelog = False
15772 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15774 disable_emergelog = True
15776 if myaction in ("search", "info"):
15777 disable_emergelog = True
15778 if disable_emergelog:
15779 """ Disable emergelog for everything except build or unmerge
15780 operations. This helps minimize parallel emerge.log entries that can
15781 confuse log parsers. We especially want it disabled during
15782 parallel-fetch, which uses --resume --fetchonly."""
15784 def emergelog(*pargs, **kargs):
15787 if not "--pretend" in myopts:
15788 emergelog(xterm_titles, "Started emerge on: "+\
15789 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15792 myelogstr=" ".join(myopts)
15794 myelogstr+=" "+myaction
15796 myelogstr += " " + " ".join(oldargs)
15797 emergelog(xterm_titles, " *** emerge " + myelogstr)
15800 def emergeexitsig(signum, frame):
15801 signal.signal(signal.SIGINT, signal.SIG_IGN)
15802 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15803 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15804 sys.exit(100+signum)
15805 signal.signal(signal.SIGINT, emergeexitsig)
15806 signal.signal(signal.SIGTERM, emergeexitsig)
15809 """This gets out final log message in before we quit."""
15810 if "--pretend" not in myopts:
15811 emergelog(xterm_titles, " *** terminating.")
15812 if "notitles" not in settings.features:
15814 portage.atexit_register(emergeexit)
15816 if myaction in ("config", "metadata", "regen", "sync"):
15817 if "--pretend" in myopts:
15818 sys.stderr.write(("emerge: The '%s' action does " + \
15819 "not support '--pretend'.\n") % myaction)
15822 if "sync" == myaction:
15823 return action_sync(settings, trees, mtimedb, myopts, myaction)
15824 elif "metadata" == myaction:
15825 action_metadata(settings, portdb, myopts)
15826 elif myaction=="regen":
15827 validate_ebuild_environment(trees)
15828 return action_regen(settings, portdb, myopts.get("--jobs"),
15829 myopts.get("--load-average"))
15831 elif "config"==myaction:
15832 validate_ebuild_environment(trees)
15833 action_config(settings, trees, myopts, myfiles)
15836 elif "search"==myaction:
15837 validate_ebuild_environment(trees)
15838 action_search(trees[settings["ROOT"]]["root_config"],
15839 myopts, myfiles, spinner)
15840 elif myaction in ("clean", "unmerge") or \
15841 (myaction == "prune" and "--nodeps" in myopts):
15842 validate_ebuild_environment(trees)
15844 # Ensure atoms are valid before calling unmerge().
15845 # For backward compat, leading '=' is not required.
15847 if is_valid_package_atom(x) or \
15848 is_valid_package_atom("=" + x):
15851 msg.append("'%s' is not a valid package atom." % (x,))
15852 msg.append("Please check ebuild(5) for full details.")
15853 writemsg_level("".join("!!! %s\n" % line for line in msg),
15854 level=logging.ERROR, noiselevel=-1)
15857 # When given a list of atoms, unmerge
15858 # them in the order given.
15859 ordered = myaction == "unmerge"
15860 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15861 mtimedb["ldpath"], ordered=ordered):
15862 if not (buildpkgonly or fetchonly or pretend):
15863 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15865 elif myaction in ("depclean", "info", "prune"):
15867 # Ensure atoms are valid before calling unmerge().
15868 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15871 if is_valid_package_atom(x):
15873 valid_atoms.append(
15874 portage.dep_expand(x, mydb=vardb, settings=settings))
15875 except portage.exception.AmbiguousPackageName, e:
15876 msg = "The short ebuild name \"" + x + \
15877 "\" is ambiguous. Please specify " + \
15878 "one of the following " + \
15879 "fully-qualified ebuild names instead:"
15880 for line in textwrap.wrap(msg, 70):
15881 writemsg_level("!!! %s\n" % (line,),
15882 level=logging.ERROR, noiselevel=-1)
15884 writemsg_level(" %s\n" % colorize("INFORM", i),
15885 level=logging.ERROR, noiselevel=-1)
15886 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15890 msg.append("'%s' is not a valid package atom." % (x,))
15891 msg.append("Please check ebuild(5) for full details.")
15892 writemsg_level("".join("!!! %s\n" % line for line in msg),
15893 level=logging.ERROR, noiselevel=-1)
15896 if myaction == "info":
15897 return action_info(settings, trees, myopts, valid_atoms)
15899 validate_ebuild_environment(trees)
15900 action_depclean(settings, trees, mtimedb["ldpath"],
15901 myopts, myaction, valid_atoms, spinner)
15902 if not (buildpkgonly or fetchonly or pretend):
15903 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15904 # "update", "system", or just process files:
15906 validate_ebuild_environment(trees)
15909 if x.startswith(SETPREFIX) or \
15910 is_valid_package_atom(x):
15912 if x[:1] == os.sep:
15920 msg.append("'%s' is not a valid package atom." % (x,))
15921 msg.append("Please check ebuild(5) for full details.")
15922 writemsg_level("".join("!!! %s\n" % line for line in msg),
15923 level=logging.ERROR, noiselevel=-1)
15926 if "--pretend" not in myopts:
15927 display_news_notification(root_config, myopts)
15928 retval = action_build(settings, trees, mtimedb,
15929 myopts, myaction, myfiles, spinner)
15930 root_config = trees[settings["ROOT"]]["root_config"]
15931 post_emerge(root_config, myopts, mtimedb, retval)