2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if isinstance(mysize, basestring):
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 if setconfig is None:
776 self.sets = self.setconfig.getSets()
777 self.visible_pkgs = PackageVirtualDbapi(self.settings)
779 def create_world_atom(pkg, args_set, root_config):
780 """Create a new atom for the world file if one does not exist. If the
781 argument atom is precise enough to identify a specific slot then a slot
782 atom will be returned. Atoms that are in the system set may also be stored
783 in world since system atoms can only match one slot while world atoms can
784 be greedy with respect to slots. Unslotted system packages will not be
787 arg_atom = args_set.findAtomForPackage(pkg)
790 cp = portage.dep_getkey(arg_atom)
792 sets = root_config.sets
793 portdb = root_config.trees["porttree"].dbapi
794 vardb = root_config.trees["vartree"].dbapi
795 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
796 for cpv in portdb.match(cp))
797 slotted = len(available_slots) > 1 or \
798 (len(available_slots) == 1 and "0" not in available_slots)
800 # check the vdb in case this is multislot
801 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
802 for cpv in vardb.match(cp))
803 slotted = len(available_slots) > 1 or \
804 (len(available_slots) == 1 and "0" not in available_slots)
805 if slotted and arg_atom != cp:
806 # If the user gave a specific atom, store it as a
807 # slot atom in the world file.
808 slot_atom = pkg.slot_atom
810 # For USE=multislot, there are a couple of cases to
813 # 1) SLOT="0", but the real SLOT spontaneously changed to some
814 # unknown value, so just record an unslotted atom.
816 # 2) SLOT comes from an installed package and there is no
817 # matching SLOT in the portage tree.
819 # Make sure that the slot atom is available in either the
820 # portdb or the vardb, since otherwise the user certainly
821 # doesn't want the SLOT atom recorded in the world file
822 # (case 1 above). If it's only available in the vardb,
823 # the user may be trying to prevent a USE=multislot
824 # package from being removed by --depclean (case 2 above).
827 if not portdb.match(slot_atom):
828 # SLOT seems to come from an installed multislot package
830 # If there is no installed package matching the SLOT atom,
831 # it probably changed SLOT spontaneously due to USE=multislot,
832 # so just record an unslotted atom.
833 if vardb.match(slot_atom):
834 # Now verify that the argument is precise
835 # enough to identify a specific slot.
836 matches = mydb.match(arg_atom)
837 matched_slots = set()
839 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
840 if len(matched_slots) == 1:
841 new_world_atom = slot_atom
843 if new_world_atom == sets["world"].findAtomForPackage(pkg):
844 # Both atoms would be identical, so there's nothing to add.
847 # Unlike world atoms, system atoms are not greedy for slots, so they
848 # can't be safely excluded from world if they are slotted.
849 system_atom = sets["system"].findAtomForPackage(pkg)
851 if not portage.dep_getkey(system_atom).startswith("virtual/"):
853 # System virtuals aren't safe to exclude from world since they can
854 # match multiple old-style virtuals but only one of them will be
855 # pulled in by update or depclean.
856 providers = portdb.mysettings.getvirtuals().get(
857 portage.dep_getkey(system_atom))
858 if providers and len(providers) == 1 and providers[0] == cp:
860 return new_world_atom
862 def filter_iuse_defaults(iuse):
864 if flag.startswith("+") or flag.startswith("-"):
869 class SlotObject(object):
870 __slots__ = ("__weakref__",)
872 def __init__(self, **kwargs):
873 classes = [self.__class__]
878 classes.extend(c.__bases__)
879 slots = getattr(c, "__slots__", None)
883 myvalue = kwargs.get(myattr, None)
884 setattr(self, myattr, myvalue)
888 Create a new instance and copy all attributes
889 defined from __slots__ (including those from
892 obj = self.__class__()
894 classes = [self.__class__]
899 classes.extend(c.__bases__)
900 slots = getattr(c, "__slots__", None)
904 setattr(obj, myattr, getattr(self, myattr))
908 class AbstractDepPriority(SlotObject):
909 __slots__ = ("buildtime", "runtime", "runtime_post")
911 def __lt__(self, other):
912 return self.__int__() < other
914 def __le__(self, other):
915 return self.__int__() <= other
917 def __eq__(self, other):
918 return self.__int__() == other
920 def __ne__(self, other):
921 return self.__int__() != other
923 def __gt__(self, other):
924 return self.__int__() > other
926 def __ge__(self, other):
927 return self.__int__() >= other
931 return copy.copy(self)
933 class DepPriority(AbstractDepPriority):
935 __slots__ = ("satisfied", "optional", "rebuild")
947 if self.runtime_post:
948 return "runtime_post"
951 class BlockerDepPriority(DepPriority):
959 BlockerDepPriority.instance = BlockerDepPriority()
961 class UnmergeDepPriority(AbstractDepPriority):
962 __slots__ = ("optional", "satisfied",)
964 Combination of properties Priority Category
969 (none of the above) -2 SOFT
979 if self.runtime_post:
986 myvalue = self.__int__()
987 if myvalue > self.SOFT:
991 class DepPriorityNormalRange(object):
993 DepPriority properties Index Category
997 runtime_post 2 MEDIUM_SOFT
999 (none of the above) 0 NONE
1007 def _ignore_optional(cls, priority):
1008 if priority.__class__ is not DepPriority:
1010 return bool(priority.optional)
1013 def _ignore_runtime_post(cls, priority):
1014 if priority.__class__ is not DepPriority:
1016 return bool(priority.optional or priority.runtime_post)
1019 def _ignore_runtime(cls, priority):
1020 if priority.__class__ is not DepPriority:
1022 return not priority.buildtime
1024 ignore_medium = _ignore_runtime
1025 ignore_medium_soft = _ignore_runtime_post
1026 ignore_soft = _ignore_optional
1028 DepPriorityNormalRange.ignore_priority = (
1030 DepPriorityNormalRange._ignore_optional,
1031 DepPriorityNormalRange._ignore_runtime_post,
1032 DepPriorityNormalRange._ignore_runtime
1035 class DepPrioritySatisfiedRange(object):
1037 DepPriority Index Category
1039 not satisfied and buildtime HARD
1040 not satisfied and runtime 7 MEDIUM
1041 not satisfied and runtime_post 6 MEDIUM_SOFT
1042 satisfied and buildtime and rebuild 5 SOFT
1043 satisfied and buildtime 4 SOFT
1044 satisfied and runtime 3 SOFT
1045 satisfied and runtime_post 2 SOFT
1047 (none of the above) 0 NONE
1055 def _ignore_optional(cls, priority):
1056 if priority.__class__ is not DepPriority:
1058 return bool(priority.optional)
1061 def _ignore_satisfied_runtime_post(cls, priority):
1062 if priority.__class__ is not DepPriority:
1064 if priority.optional:
1066 if not priority.satisfied:
1068 return bool(priority.runtime_post)
1071 def _ignore_satisfied_runtime(cls, priority):
1072 if priority.__class__ is not DepPriority:
1074 if priority.optional:
1076 if not priority.satisfied:
1078 return not priority.buildtime
1081 def _ignore_satisfied_buildtime(cls, priority):
1082 if priority.__class__ is not DepPriority:
1084 if priority.optional:
1086 if not priority.satisfied:
1088 if priority.buildtime:
1089 return not priority.rebuild
1093 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1094 if priority.__class__ is not DepPriority:
1096 if priority.optional:
1098 return bool(priority.satisfied)
1101 def _ignore_runtime_post(cls, priority):
1102 if priority.__class__ is not DepPriority:
1104 return bool(priority.optional or \
1105 priority.satisfied or \
1106 priority.runtime_post)
1109 def _ignore_runtime(cls, priority):
1110 if priority.__class__ is not DepPriority:
1112 return bool(priority.satisfied or \
1113 not priority.buildtime)
1115 ignore_medium = _ignore_runtime
1116 ignore_medium_soft = _ignore_runtime_post
1117 ignore_soft = _ignore_satisfied_buildtime_rebuild
1119 DepPrioritySatisfiedRange.ignore_priority = (
1121 DepPrioritySatisfiedRange._ignore_optional,
1122 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1124 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1125 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1126 DepPrioritySatisfiedRange._ignore_runtime_post,
1127 DepPrioritySatisfiedRange._ignore_runtime
1130 def _find_deep_system_runtime_deps(graph):
1131 deep_system_deps = set()
1134 if not isinstance(node, Package) or \
1135 node.operation == 'uninstall':
1137 if node.root_config.sets['system'].findAtomForPackage(node):
1138 node_stack.append(node)
1140 def ignore_priority(priority):
1142 Ignore non-runtime priorities.
1144 if isinstance(priority, DepPriority) and \
1145 (priority.runtime or priority.runtime_post):
1150 node = node_stack.pop()
1151 if node in deep_system_deps:
1153 deep_system_deps.add(node)
1154 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1155 if not isinstance(child, Package) or \
1156 child.operation == 'uninstall':
1158 node_stack.append(child)
1160 return deep_system_deps
1162 class FakeVartree(portage.vartree):
1163 """This is implements an in-memory copy of a vartree instance that provides
1164 all the interfaces required for use by the depgraph. The vardb is locked
1165 during the constructor call just long enough to read a copy of the
1166 installed package information. This allows the depgraph to do it's
1167 dependency calculations without holding a lock on the vardb. It also
1168 allows things like vardb global updates to be done in memory so that the
1169 user doesn't necessarily need write access to the vardb in cases where
1170 global updates are necessary (updates are performed when necessary if there
1171 is not a matching ebuild in the tree)."""
1172 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1173 self._root_config = root_config
1174 if pkg_cache is None:
1176 real_vartree = root_config.trees["vartree"]
1177 portdb = root_config.trees["porttree"].dbapi
1178 self.root = real_vartree.root
1179 self.settings = real_vartree.settings
1180 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1181 if "_mtime_" not in mykeys:
1182 mykeys.append("_mtime_")
1183 self._db_keys = mykeys
1184 self._pkg_cache = pkg_cache
1185 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1186 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1188 # At least the parent needs to exist for the lock file.
1189 portage.util.ensure_dirs(vdb_path)
1190 except portage.exception.PortageException:
1194 if acquire_lock and os.access(vdb_path, os.W_OK):
1195 vdb_lock = portage.locks.lockdir(vdb_path)
1196 real_dbapi = real_vartree.dbapi
1198 for cpv in real_dbapi.cpv_all():
1199 cache_key = ("installed", self.root, cpv, "nomerge")
1200 pkg = self._pkg_cache.get(cache_key)
1202 metadata = pkg.metadata
1204 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1205 myslot = metadata["SLOT"]
1206 mycp = portage.dep_getkey(cpv)
1207 myslot_atom = "%s:%s" % (mycp, myslot)
1209 mycounter = long(metadata["COUNTER"])
1212 metadata["COUNTER"] = str(mycounter)
1213 other_counter = slot_counters.get(myslot_atom, None)
1214 if other_counter is not None:
1215 if other_counter > mycounter:
1217 slot_counters[myslot_atom] = mycounter
1219 pkg = Package(built=True, cpv=cpv,
1220 installed=True, metadata=metadata,
1221 root_config=root_config, type_name="installed")
1222 self._pkg_cache[pkg] = pkg
1223 self.dbapi.cpv_inject(pkg)
1224 real_dbapi.flush_cache()
1227 portage.locks.unlockdir(vdb_lock)
1228 # Populate the old-style virtuals using the cached values.
1229 if not self.settings.treeVirtuals:
1230 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1231 portage.getCPFromCPV, self.get_all_provides())
1233 # Intialize variables needed for lazy cache pulls of the live ebuild
1234 # metadata. This ensures that the vardb lock is released ASAP, without
1235 # being delayed in case cache generation is triggered.
1236 self._aux_get = self.dbapi.aux_get
1237 self.dbapi.aux_get = self._aux_get_wrapper
1238 self._match = self.dbapi.match
1239 self.dbapi.match = self._match_wrapper
1240 self._aux_get_history = set()
1241 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1242 self._portdb = portdb
1243 self._global_updates = None
1245 def _match_wrapper(self, cpv, use_cache=1):
1247 Make sure the metadata in Package instances gets updated for any
1248 cpv that is returned from a match() call, since the metadata can
1249 be accessed directly from the Package instance instead of via
1252 matches = self._match(cpv, use_cache=use_cache)
1254 if cpv in self._aux_get_history:
1256 self._aux_get_wrapper(cpv, [])
1259 def _aux_get_wrapper(self, pkg, wants):
1260 if pkg in self._aux_get_history:
1261 return self._aux_get(pkg, wants)
1262 self._aux_get_history.add(pkg)
1264 # Use the live ebuild metadata if possible.
1265 live_metadata = dict(izip(self._portdb_keys,
1266 self._portdb.aux_get(pkg, self._portdb_keys)))
1267 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1269 self.dbapi.aux_update(pkg, live_metadata)
1270 except (KeyError, portage.exception.PortageException):
1271 if self._global_updates is None:
1272 self._global_updates = \
1273 grab_global_updates(self._portdb.porttree_root)
1274 perform_global_updates(
1275 pkg, self.dbapi, self._global_updates)
1276 return self._aux_get(pkg, wants)
1278 def sync(self, acquire_lock=1):
1280 Call this method to synchronize state with the real vardb
1281 after one or more packages may have been installed or
1284 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1286 # At least the parent needs to exist for the lock file.
1287 portage.util.ensure_dirs(vdb_path)
1288 except portage.exception.PortageException:
1292 if acquire_lock and os.access(vdb_path, os.W_OK):
1293 vdb_lock = portage.locks.lockdir(vdb_path)
1297 portage.locks.unlockdir(vdb_lock)
1301 real_vardb = self._root_config.trees["vartree"].dbapi
1302 current_cpv_set = frozenset(real_vardb.cpv_all())
1303 pkg_vardb = self.dbapi
1304 aux_get_history = self._aux_get_history
1306 # Remove any packages that have been uninstalled.
1307 for pkg in list(pkg_vardb):
1308 if pkg.cpv not in current_cpv_set:
1309 pkg_vardb.cpv_remove(pkg)
1310 aux_get_history.discard(pkg.cpv)
1312 # Validate counters and timestamps.
1315 validation_keys = ["COUNTER", "_mtime_"]
1316 for cpv in current_cpv_set:
1318 pkg_hash_key = ("installed", root, cpv, "nomerge")
1319 pkg = pkg_vardb.get(pkg_hash_key)
1321 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1323 counter = long(counter)
1327 if counter != pkg.counter or \
1329 pkg_vardb.cpv_remove(pkg)
1330 aux_get_history.discard(pkg.cpv)
1334 pkg = self._pkg(cpv)
1336 other_counter = slot_counters.get(pkg.slot_atom)
1337 if other_counter is not None:
1338 if other_counter > pkg.counter:
1341 slot_counters[pkg.slot_atom] = pkg.counter
1342 pkg_vardb.cpv_inject(pkg)
1344 real_vardb.flush_cache()
1346 def _pkg(self, cpv):
1347 root_config = self._root_config
1348 real_vardb = root_config.trees["vartree"].dbapi
1349 pkg = Package(cpv=cpv, installed=True,
1350 metadata=izip(self._db_keys,
1351 real_vardb.aux_get(cpv, self._db_keys)),
1352 root_config=root_config,
1353 type_name="installed")
1356 mycounter = long(pkg.metadata["COUNTER"])
1359 pkg.metadata["COUNTER"] = str(mycounter)
1363 def grab_global_updates(portdir):
1364 from portage.update import grab_updates, parse_updates
1365 updpath = os.path.join(portdir, "profiles", "updates")
1367 rawupdates = grab_updates(updpath)
1368 except portage.exception.DirectoryNotFound:
1371 for mykey, mystat, mycontent in rawupdates:
1372 commands, errors = parse_updates(mycontent)
1373 upd_commands.extend(commands)
1376 def perform_global_updates(mycpv, mydb, mycommands):
1377 from portage.update import update_dbentries
1378 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1379 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1380 updates = update_dbentries(mycommands, aux_dict)
1382 mydb.aux_update(mycpv, updates)
1384 def visible(pkgsettings, pkg):
1386 Check if a package is visible. This can raise an InvalidDependString
1387 exception if LICENSE is invalid.
1388 TODO: optionally generate a list of masking reasons
1390 @returns: True if the package is visible, False otherwise.
1392 if not pkg.metadata["SLOT"]:
1394 if not pkg.installed:
1395 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1397 eapi = pkg.metadata["EAPI"]
1398 if not portage.eapi_is_supported(eapi):
1400 if not pkg.installed:
1401 if portage._eapi_is_deprecated(eapi):
1403 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1405 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1407 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1410 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1412 except portage.exception.InvalidDependString:
1416 def get_masking_status(pkg, pkgsettings, root_config):
1418 mreasons = portage.getmaskingstatus(
1419 pkg, settings=pkgsettings,
1420 portdb=root_config.trees["porttree"].dbapi)
1422 if not pkg.installed:
1423 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1424 mreasons.append("CHOST: %s" % \
1425 pkg.metadata["CHOST"])
1427 if not pkg.metadata["SLOT"]:
1428 mreasons.append("invalid: SLOT is undefined")
1432 def get_mask_info(root_config, cpv, pkgsettings,
1433 db, pkg_type, built, installed, db_keys):
1436 metadata = dict(izip(db_keys,
1437 db.aux_get(cpv, db_keys)))
1440 if metadata and not built:
1441 pkgsettings.setcpv(cpv, mydb=metadata)
1442 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1443 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1444 if metadata is None:
1445 mreasons = ["corruption"]
1447 eapi = metadata['EAPI']
1450 if not portage.eapi_is_supported(eapi):
1451 mreasons = ['EAPI %s' % eapi]
1453 pkg = Package(type_name=pkg_type, root_config=root_config,
1454 cpv=cpv, built=built, installed=installed, metadata=metadata)
1455 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1456 return metadata, mreasons
1458 def show_masked_packages(masked_packages):
1459 shown_licenses = set()
1460 shown_comments = set()
1461 # Maybe there is both an ebuild and a binary. Only
1462 # show one of them to avoid redundant appearance.
1464 have_eapi_mask = False
1465 for (root_config, pkgsettings, cpv,
1466 metadata, mreasons) in masked_packages:
1467 if cpv in shown_cpvs:
1470 comment, filename = None, None
1471 if "package.mask" in mreasons:
1472 comment, filename = \
1473 portage.getmaskingreason(
1474 cpv, metadata=metadata,
1475 settings=pkgsettings,
1476 portdb=root_config.trees["porttree"].dbapi,
1477 return_location=True)
1478 missing_licenses = []
1480 if not portage.eapi_is_supported(metadata["EAPI"]):
1481 have_eapi_mask = True
1483 missing_licenses = \
1484 pkgsettings._getMissingLicenses(
1486 except portage.exception.InvalidDependString:
1487 # This will have already been reported
1488 # above via mreasons.
1491 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1492 if comment and comment not in shown_comments:
1495 shown_comments.add(comment)
1496 portdb = root_config.trees["porttree"].dbapi
1497 for l in missing_licenses:
1498 l_path = portdb.findLicensePath(l)
1499 if l in shown_licenses:
1501 msg = ("A copy of the '%s' license" + \
1502 " is located at '%s'.") % (l, l_path)
1505 shown_licenses.add(l)
1506 return have_eapi_mask
1508 class Task(SlotObject):
1509 __slots__ = ("_hash_key", "_hash_value")
1511 def _get_hash_key(self):
1512 hash_key = getattr(self, "_hash_key", None)
1513 if hash_key is None:
1514 raise NotImplementedError(self)
1517 def __eq__(self, other):
1518 return self._get_hash_key() == other
1520 def __ne__(self, other):
1521 return self._get_hash_key() != other
1524 hash_value = getattr(self, "_hash_value", None)
1525 if hash_value is None:
1526 self._hash_value = hash(self._get_hash_key())
1527 return self._hash_value
1530 return len(self._get_hash_key())
1532 def __getitem__(self, key):
1533 return self._get_hash_key()[key]
1536 return iter(self._get_hash_key())
1538 def __contains__(self, key):
1539 return key in self._get_hash_key()
1542 return str(self._get_hash_key())
1544 class Blocker(Task):
1546 __hash__ = Task.__hash__
1547 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1549 def __init__(self, **kwargs):
1550 Task.__init__(self, **kwargs)
1551 self.cp = portage.dep_getkey(self.atom)
1553 def _get_hash_key(self):
1554 hash_key = getattr(self, "_hash_key", None)
1555 if hash_key is None:
1557 ("blocks", self.root, self.atom, self.eapi)
1558 return self._hash_key
1560 class Package(Task):
1562 __hash__ = Task.__hash__
1563 __slots__ = ("built", "cpv", "depth",
1564 "installed", "metadata", "onlydeps", "operation",
1565 "root_config", "type_name",
1566 "category", "counter", "cp", "cpv_split",
1567 "inherited", "iuse", "mtime",
1568 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1571 "CHOST", "COUNTER", "DEPEND", "EAPI",
1572 "INHERITED", "IUSE", "KEYWORDS",
1573 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1574 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1576 def __init__(self, **kwargs):
1577 Task.__init__(self, **kwargs)
1578 self.root = self.root_config.root
1579 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1580 self.cp = portage.cpv_getkey(self.cpv)
1583 # Avoid an InvalidAtom exception when creating slot_atom.
1584 # This package instance will be masked due to empty SLOT.
1586 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1587 self.category, self.pf = portage.catsplit(self.cpv)
1588 self.cpv_split = portage.catpkgsplit(self.cpv)
1589 self.pv_split = self.cpv_split[1:]
1593 __slots__ = ("__weakref__", "enabled")
1595 def __init__(self, use):
1596 self.enabled = frozenset(use)
1598 class _iuse(object):
1600 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1602 def __init__(self, tokens, iuse_implicit):
1603 self.tokens = tuple(tokens)
1604 self.iuse_implicit = iuse_implicit
1611 enabled.append(x[1:])
1613 disabled.append(x[1:])
1616 self.enabled = frozenset(enabled)
1617 self.disabled = frozenset(disabled)
1618 self.all = frozenset(chain(enabled, disabled, other))
1620 def __getattribute__(self, name):
1623 return object.__getattribute__(self, "regex")
1624 except AttributeError:
1625 all = object.__getattribute__(self, "all")
1626 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1627 # Escape anything except ".*" which is supposed
1628 # to pass through from _get_implicit_iuse()
1629 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1630 regex = "^(%s)$" % "|".join(regex)
1631 regex = regex.replace("\\.\\*", ".*")
1632 self.regex = re.compile(regex)
1633 return object.__getattribute__(self, name)
1635 def _get_hash_key(self):
1636 hash_key = getattr(self, "_hash_key", None)
1637 if hash_key is None:
1638 if self.operation is None:
1639 self.operation = "merge"
1640 if self.onlydeps or self.installed:
1641 self.operation = "nomerge"
1643 (self.type_name, self.root, self.cpv, self.operation)
1644 return self._hash_key
1646 def __lt__(self, other):
1647 if other.cp != self.cp:
1649 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1653 def __le__(self, other):
1654 if other.cp != self.cp:
1656 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1660 def __gt__(self, other):
1661 if other.cp != self.cp:
1663 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1667 def __ge__(self, other):
1668 if other.cp != self.cp:
1670 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1674 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1675 if not x.startswith("UNUSED_"))
1676 _all_metadata_keys.discard("CDEPEND")
1677 _all_metadata_keys.update(Package.metadata_keys)
1679 from portage.cache.mappings import slot_dict_class
1680 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1682 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1684 Detect metadata updates and synchronize Package attributes.
1687 __slots__ = ("_pkg",)
1688 _wrapped_keys = frozenset(
1689 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1691 def __init__(self, pkg, metadata):
1692 _PackageMetadataWrapperBase.__init__(self)
1694 self.update(metadata)
1696 def __setitem__(self, k, v):
1697 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1698 if k in self._wrapped_keys:
1699 getattr(self, "_set_" + k.lower())(k, v)
1701 def _set_inherited(self, k, v):
1702 if isinstance(v, basestring):
1703 v = frozenset(v.split())
1704 self._pkg.inherited = v
1706 def _set_iuse(self, k, v):
1707 self._pkg.iuse = self._pkg._iuse(
1708 v.split(), self._pkg.root_config.iuse_implicit)
1710 def _set_slot(self, k, v):
1713 def _set_use(self, k, v):
1714 self._pkg.use = self._pkg._use(v.split())
1716 def _set_counter(self, k, v):
1717 if isinstance(v, basestring):
1722 self._pkg.counter = v
1724 def _set__mtime_(self, k, v):
1725 if isinstance(v, basestring):
1732 class EbuildFetchonly(SlotObject):
1734 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1737 settings = self.settings
1739 portdb = pkg.root_config.trees["porttree"].dbapi
1740 ebuild_path = portdb.findname(pkg.cpv)
1741 settings.setcpv(pkg)
1742 debug = settings.get("PORTAGE_DEBUG") == "1"
1743 use_cache = 1 # always true
1744 portage.doebuild_environment(ebuild_path, "fetch",
1745 settings["ROOT"], settings, debug, use_cache, portdb)
1746 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1749 rval = self._execute_with_builddir()
1751 rval = portage.doebuild(ebuild_path, "fetch",
1752 settings["ROOT"], settings, debug=debug,
1753 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1754 mydbapi=portdb, tree="porttree")
1756 if rval != os.EX_OK:
1757 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1758 eerror(msg, phase="unpack", key=pkg.cpv)
1762 def _execute_with_builddir(self):
1763 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1764 # ensuring sane $PWD (bug #239560) and storing elog
1765 # messages. Use a private temp directory, in order
1766 # to avoid locking the main one.
1767 settings = self.settings
1768 global_tmpdir = settings["PORTAGE_TMPDIR"]
1769 from tempfile import mkdtemp
1771 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1773 if e.errno != portage.exception.PermissionDenied.errno:
1775 raise portage.exception.PermissionDenied(global_tmpdir)
1776 settings["PORTAGE_TMPDIR"] = private_tmpdir
1777 settings.backup_changes("PORTAGE_TMPDIR")
1779 retval = self._execute()
1781 settings["PORTAGE_TMPDIR"] = global_tmpdir
1782 settings.backup_changes("PORTAGE_TMPDIR")
1783 shutil.rmtree(private_tmpdir)
1787 settings = self.settings
1789 root_config = pkg.root_config
1790 portdb = root_config.trees["porttree"].dbapi
1791 ebuild_path = portdb.findname(pkg.cpv)
1792 debug = settings.get("PORTAGE_DEBUG") == "1"
1793 retval = portage.doebuild(ebuild_path, "fetch",
1794 self.settings["ROOT"], self.settings, debug=debug,
1795 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796 mydbapi=portdb, tree="porttree")
1798 if retval != os.EX_OK:
1799 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800 eerror(msg, phase="unpack", key=pkg.cpv)
1802 portage.elog.elog_process(self.pkg.cpv, self.settings)
1805 class PollConstants(object):
1808 Provides POLL* constants that are equivalent to those from the
1809 select module, for use by PollSelectAdapter.
1812 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1815 locals()[k] = getattr(select, k, v)
1819 class AsynchronousTask(SlotObject):
1821 Subclasses override _wait() and _poll() so that calls
1822 to public methods can be wrapped for implementing
1823 hooks such as exit listener notification.
1825 Sublasses should call self.wait() to notify exit listeners after
1826 the task is complete and self.returncode has been set.
1829 __slots__ = ("background", "cancelled", "returncode") + \
1830 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1834 Start an asynchronous task and then return as soon as possible.
1840 raise NotImplementedError(self)
1843 return self.returncode is None
1850 return self.returncode
1853 if self.returncode is None:
1856 return self.returncode
1859 return self.returncode
1862 self.cancelled = True
1865 def addStartListener(self, f):
1867 The function will be called with one argument, a reference to self.
1869 if self._start_listeners is None:
1870 self._start_listeners = []
1871 self._start_listeners.append(f)
1873 def removeStartListener(self, f):
1874 if self._start_listeners is None:
1876 self._start_listeners.remove(f)
1878 def _start_hook(self):
1879 if self._start_listeners is not None:
1880 start_listeners = self._start_listeners
1881 self._start_listeners = None
1883 for f in start_listeners:
1886 def addExitListener(self, f):
1888 The function will be called with one argument, a reference to self.
1890 if self._exit_listeners is None:
1891 self._exit_listeners = []
1892 self._exit_listeners.append(f)
1894 def removeExitListener(self, f):
1895 if self._exit_listeners is None:
1896 if self._exit_listener_stack is not None:
1897 self._exit_listener_stack.remove(f)
1899 self._exit_listeners.remove(f)
1901 def _wait_hook(self):
1903 Call this method after the task completes, just before returning
1904 the returncode from wait() or poll(). This hook is
1905 used to trigger exit listeners when the returncode first
1908 if self.returncode is not None and \
1909 self._exit_listeners is not None:
1911 # This prevents recursion, in case one of the
1912 # exit handlers triggers this method again by
1913 # calling wait(). Use a stack that gives
1914 # removeExitListener() an opportunity to consume
1915 # listeners from the stack, before they can get
1916 # called below. This is necessary because a call
1917 # to one exit listener may result in a call to
1918 # removeExitListener() for another listener on
1919 # the stack. That listener needs to be removed
1920 # from the stack since it would be inconsistent
1921 # to call it after it has been been passed into
1922 # removeExitListener().
1923 self._exit_listener_stack = self._exit_listeners
1924 self._exit_listeners = None
1926 self._exit_listener_stack.reverse()
1927 while self._exit_listener_stack:
1928 self._exit_listener_stack.pop()(self)
1930 class AbstractPollTask(AsynchronousTask):
1932 __slots__ = ("scheduler",) + \
1936 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1940 def _unregister(self):
1941 raise NotImplementedError(self)
1943 def _unregister_if_appropriate(self, event):
1944 if self._registered:
1945 if event & self._exceptional_events:
1948 elif event & PollConstants.POLLHUP:
1952 class PipeReader(AbstractPollTask):
1955 Reads output from one or more files and saves it in memory,
1956 for retrieval via the getvalue() method. This is driven by
1957 the scheduler's poll() loop, so it runs entirely within the
1961 __slots__ = ("input_files",) + \
1962 ("_read_data", "_reg_ids")
1965 self._reg_ids = set()
1966 self._read_data = []
1967 for k, f in self.input_files.iteritems():
1968 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970 self._reg_ids.add(self.scheduler.register(f.fileno(),
1971 self._registered_events, self._output_handler))
1972 self._registered = True
1975 return self._registered
1978 if self.returncode is None:
1980 self.cancelled = True
1984 if self.returncode is not None:
1985 return self.returncode
1987 if self._registered:
1988 self.scheduler.schedule(self._reg_ids)
1991 self.returncode = os.EX_OK
1992 return self.returncode
1995 """Retrieve the entire contents"""
1996 if sys.hexversion >= 0x3000000:
1997 return bytes().join(self._read_data)
1998 return "".join(self._read_data)
2001 """Free the memory buffer."""
2002 self._read_data = None
2004 def _output_handler(self, fd, event):
2006 if event & PollConstants.POLLIN:
2008 for f in self.input_files.itervalues():
2009 if fd == f.fileno():
2012 buf = array.array('B')
2014 buf.fromfile(f, self._bufsize)
2019 self._read_data.append(buf.tostring())
2024 self._unregister_if_appropriate(event)
2025 return self._registered
2027 def _unregister(self):
2029 Unregister from the scheduler and close open files.
2032 self._registered = False
2034 if self._reg_ids is not None:
2035 for reg_id in self._reg_ids:
2036 self.scheduler.unregister(reg_id)
2037 self._reg_ids = None
2039 if self.input_files is not None:
2040 for f in self.input_files.itervalues():
2042 self.input_files = None
2044 class CompositeTask(AsynchronousTask):
2046 __slots__ = ("scheduler",) + ("_current_task",)
2049 return self._current_task is not None
2052 self.cancelled = True
2053 if self._current_task is not None:
2054 self._current_task.cancel()
2058 This does a loop calling self._current_task.poll()
2059 repeatedly as long as the value of self._current_task
2060 keeps changing. It calls poll() a maximum of one time
2061 for a given self._current_task instance. This is useful
2062 since calling poll() on a task can trigger advance to
2063 the next task could eventually lead to the returncode
2064 being set in cases when polling only a single task would
2065 not have the same effect.
2070 task = self._current_task
2071 if task is None or task is prev:
2072 # don't poll the same task more than once
2077 return self.returncode
2083 task = self._current_task
2085 # don't wait for the same task more than once
2088 # Before the task.wait() method returned, an exit
2089 # listener should have set self._current_task to either
2090 # a different task or None. Something is wrong.
2091 raise AssertionError("self._current_task has not " + \
2092 "changed since calling wait", self, task)
2096 return self.returncode
2098 def _assert_current(self, task):
2100 Raises an AssertionError if the given task is not the
2101 same one as self._current_task. This can be useful
2104 if task is not self._current_task:
2105 raise AssertionError("Unrecognized task: %s" % (task,))
2107 def _default_exit(self, task):
2109 Calls _assert_current() on the given task and then sets the
2110 composite returncode attribute if task.returncode != os.EX_OK.
2111 If the task failed then self._current_task will be set to None.
2112 Subclasses can use this as a generic task exit callback.
2115 @returns: The task.returncode attribute.
2117 self._assert_current(task)
2118 if task.returncode != os.EX_OK:
2119 self.returncode = task.returncode
2120 self._current_task = None
2121 return task.returncode
2123 def _final_exit(self, task):
2125 Assumes that task is the final task of this composite task.
2126 Calls _default_exit() and sets self.returncode to the task's
2127 returncode and sets self._current_task to None.
2129 self._default_exit(task)
2130 self._current_task = None
2131 self.returncode = task.returncode
2132 return self.returncode
2134 def _default_final_exit(self, task):
2136 This calls _final_exit() and then wait().
2138 Subclasses can use this as a generic final task exit callback.
2141 self._final_exit(task)
2144 def _start_task(self, task, exit_handler):
2146 Register exit handler for the given task, set it
2147 as self._current_task, and call task.start().
2149 Subclasses can use this as a generic way to start
2153 task.addExitListener(exit_handler)
2154 self._current_task = task
2157 class TaskSequence(CompositeTask):
2159 A collection of tasks that executes sequentially. Each task
2160 must have a addExitListener() method that can be used as
2161 a means to trigger movement from one task to the next.
2164 __slots__ = ("_task_queue",)
2166 def __init__(self, **kwargs):
2167 AsynchronousTask.__init__(self, **kwargs)
2168 self._task_queue = deque()
2170 def add(self, task):
2171 self._task_queue.append(task)
2174 self._start_next_task()
2177 self._task_queue.clear()
2178 CompositeTask.cancel(self)
2180 def _start_next_task(self):
2181 self._start_task(self._task_queue.popleft(),
2182 self._task_exit_handler)
2184 def _task_exit_handler(self, task):
2185 if self._default_exit(task) != os.EX_OK:
2187 elif self._task_queue:
2188 self._start_next_task()
2190 self._final_exit(task)
2193 class SubProcess(AbstractPollTask):
2195 __slots__ = ("pid",) + \
2196 ("_files", "_reg_id")
2198 # A file descriptor is required for the scheduler to monitor changes from
2199 # inside a poll() loop. When logging is not enabled, create a pipe just to
2200 # serve this purpose alone.
2204 if self.returncode is not None:
2205 return self.returncode
2206 if self.pid is None:
2207 return self.returncode
2208 if self._registered:
2209 return self.returncode
2212 retval = os.waitpid(self.pid, os.WNOHANG)
2214 if e.errno != errno.ECHILD:
2217 retval = (self.pid, 1)
2219 if retval == (0, 0):
2221 self._set_returncode(retval)
2222 return self.returncode
2227 os.kill(self.pid, signal.SIGTERM)
2229 if e.errno != errno.ESRCH:
2233 self.cancelled = True
2234 if self.pid is not None:
2236 return self.returncode
2239 return self.pid is not None and \
2240 self.returncode is None
2244 if self.returncode is not None:
2245 return self.returncode
2247 if self._registered:
2248 self.scheduler.schedule(self._reg_id)
2250 if self.returncode is not None:
2251 return self.returncode
2254 wait_retval = os.waitpid(self.pid, 0)
2256 if e.errno != errno.ECHILD:
2259 self._set_returncode((self.pid, 1))
2261 self._set_returncode(wait_retval)
2263 return self.returncode
2265 def _unregister(self):
2267 Unregister from the scheduler and close open files.
2270 self._registered = False
2272 if self._reg_id is not None:
2273 self.scheduler.unregister(self._reg_id)
2276 if self._files is not None:
2277 for f in self._files.itervalues():
2281 def _set_returncode(self, wait_retval):
2283 retval = wait_retval[1]
2285 if retval != os.EX_OK:
2287 retval = (retval & 0xff) << 8
2289 retval = retval >> 8
2291 self.returncode = retval
2293 class SpawnProcess(SubProcess):
2296 Constructor keyword args are passed into portage.process.spawn().
2297 The required "args" keyword argument will be passed as the first
2301 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302 "uid", "gid", "groups", "umask", "logfile",
2303 "path_lookup", "pre_exec")
2305 __slots__ = ("args",) + \
2308 _file_names = ("log", "process", "stdout")
2309 _files_dict = slot_dict_class(_file_names, prefix="")
2316 if self.fd_pipes is None:
2318 fd_pipes = self.fd_pipes
2319 fd_pipes.setdefault(0, sys.stdin.fileno())
2320 fd_pipes.setdefault(1, sys.stdout.fileno())
2321 fd_pipes.setdefault(2, sys.stderr.fileno())
2323 # flush any pending output
2324 for fd in fd_pipes.itervalues():
2325 if fd == sys.stdout.fileno():
2327 if fd == sys.stderr.fileno():
2330 logfile = self.logfile
2331 self._files = self._files_dict()
2334 master_fd, slave_fd = self._pipe(fd_pipes)
2335 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2339 fd_pipes_orig = fd_pipes.copy()
2341 # TODO: Use job control functions like tcsetpgrp() to control
2342 # access to stdin. Until then, use /dev/null so that any
2343 # attempts to read from stdin will immediately return EOF
2344 # instead of blocking indefinitely.
2345 null_input = open('/dev/null', 'rb')
2346 fd_pipes[0] = null_input.fileno()
2348 fd_pipes[0] = fd_pipes_orig[0]
2350 files.process = os.fdopen(master_fd, 'rb')
2351 if logfile is not None:
2353 fd_pipes[1] = slave_fd
2354 fd_pipes[2] = slave_fd
2356 files.log = open(logfile, mode='ab')
2357 portage.util.apply_secpass_permissions(logfile,
2358 uid=portage.portage_uid, gid=portage.portage_gid,
2361 if not self.background:
2362 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2364 output_handler = self._output_handler
2368 # Create a dummy pipe so the scheduler can monitor
2369 # the process from inside a poll() loop.
2370 fd_pipes[self._dummy_pipe_fd] = slave_fd
2372 fd_pipes[1] = slave_fd
2373 fd_pipes[2] = slave_fd
2374 output_handler = self._dummy_handler
2377 for k in self._spawn_kwarg_names:
2378 v = getattr(self, k)
2382 kwargs["fd_pipes"] = fd_pipes
2383 kwargs["returnpid"] = True
2384 kwargs.pop("logfile", None)
2386 self._reg_id = self.scheduler.register(files.process.fileno(),
2387 self._registered_events, output_handler)
2388 self._registered = True
2390 retval = self._spawn(self.args, **kwargs)
2393 if null_input is not None:
2396 if isinstance(retval, int):
2399 self.returncode = retval
2403 self.pid = retval[0]
2404 portage.process.spawned_pids.remove(self.pid)
2406 def _pipe(self, fd_pipes):
2408 @type fd_pipes: dict
2409 @param fd_pipes: pipes from which to copy terminal size if desired.
2413 def _spawn(self, args, **kwargs):
2414 return portage.process.spawn(args, **kwargs)
2416 def _output_handler(self, fd, event):
2418 if event & PollConstants.POLLIN:
2421 buf = array.array('B')
2423 buf.fromfile(files.process, self._bufsize)
2428 if not self.background:
2429 buf.tofile(files.stdout)
2430 files.stdout.flush()
2431 buf.tofile(files.log)
2437 self._unregister_if_appropriate(event)
2438 return self._registered
2440 def _dummy_handler(self, fd, event):
2442 This method is mainly interested in detecting EOF, since
2443 the only purpose of the pipe is to allow the scheduler to
2444 monitor the process from inside a poll() loop.
2447 if event & PollConstants.POLLIN:
2449 buf = array.array('B')
2451 buf.fromfile(self._files.process, self._bufsize)
2461 self._unregister_if_appropriate(event)
2462 return self._registered
2464 class MiscFunctionsProcess(SpawnProcess):
2466 Spawns misc-functions.sh with an existing ebuild environment.
2469 __slots__ = ("commands", "phase", "pkg", "settings")
2472 settings = self.settings
2473 settings.pop("EBUILD_PHASE", None)
2474 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2475 misc_sh_binary = os.path.join(portage_bin_path,
2476 os.path.basename(portage.const.MISC_SH_BINARY))
2478 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2479 self.logfile = settings.get("PORTAGE_LOG_FILE")
2481 portage._doebuild_exit_status_unlink(
2482 settings.get("EBUILD_EXIT_STATUS_FILE"))
2484 SpawnProcess._start(self)
2486 def _spawn(self, args, **kwargs):
2487 settings = self.settings
2488 debug = settings.get("PORTAGE_DEBUG") == "1"
2489 return portage.spawn(" ".join(args), settings,
2490 debug=debug, **kwargs)
2492 def _set_returncode(self, wait_retval):
2493 SpawnProcess._set_returncode(self, wait_retval)
2494 self.returncode = portage._doebuild_exit_status_check_and_log(
2495 self.settings, self.phase, self.returncode)
2497 class EbuildFetcher(SpawnProcess):
2499 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2504 root_config = self.pkg.root_config
2505 portdb = root_config.trees["porttree"].dbapi
2506 ebuild_path = portdb.findname(self.pkg.cpv)
2507 settings = self.config_pool.allocate()
2508 settings.setcpv(self.pkg)
2510 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2511 # should not be touched since otherwise it could interfere with
2512 # another instance of the same cpv concurrently being built for a
2513 # different $ROOT (currently, builds only cooperate with prefetchers
2514 # that are spawned for the same $ROOT).
2515 if not self.prefetch:
2516 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2517 self._build_dir.lock()
2518 self._build_dir.clean_log()
2519 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2520 if self.logfile is None:
2521 self.logfile = settings.get("PORTAGE_LOG_FILE")
2527 # If any incremental variables have been overridden
2528 # via the environment, those values need to be passed
2529 # along here so that they are correctly considered by
2530 # the config instance in the subproccess.
2531 fetch_env = os.environ.copy()
2533 nocolor = settings.get("NOCOLOR")
2534 if nocolor is not None:
2535 fetch_env["NOCOLOR"] = nocolor
2537 fetch_env["PORTAGE_NICENESS"] = "0"
2539 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2541 ebuild_binary = os.path.join(
2542 settings["PORTAGE_BIN_PATH"], "ebuild")
2544 fetch_args = [ebuild_binary, ebuild_path, phase]
2545 debug = settings.get("PORTAGE_DEBUG") == "1"
2547 fetch_args.append("--debug")
2549 self.args = fetch_args
2550 self.env = fetch_env
2551 SpawnProcess._start(self)
2553 def _pipe(self, fd_pipes):
2554 """When appropriate, use a pty so that fetcher progress bars,
2555 like wget has, will work properly."""
2556 if self.background or not sys.stdout.isatty():
2557 # When the output only goes to a log file,
2558 # there's no point in creating a pty.
2560 stdout_pipe = fd_pipes.get(1)
2561 got_pty, master_fd, slave_fd = \
2562 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2563 return (master_fd, slave_fd)
2565 def _set_returncode(self, wait_retval):
2566 SpawnProcess._set_returncode(self, wait_retval)
2567 # Collect elog messages that might have been
2568 # created by the pkg_nofetch phase.
2569 if self._build_dir is not None:
2570 # Skip elog messages for prefetch, in order to avoid duplicates.
2571 if not self.prefetch and self.returncode != os.EX_OK:
2573 if self.logfile is not None:
2575 elog_out = open(self.logfile, 'a')
2576 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2577 if self.logfile is not None:
2578 msg += ", Log file:"
2579 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2580 if self.logfile is not None:
2581 eerror(" '%s'" % (self.logfile,),
2582 phase="unpack", key=self.pkg.cpv, out=elog_out)
2583 if elog_out is not None:
2585 if not self.prefetch:
2586 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2587 features = self._build_dir.settings.features
2588 if self.returncode == os.EX_OK:
2589 self._build_dir.clean_log()
2590 self._build_dir.unlock()
2591 self.config_pool.deallocate(self._build_dir.settings)
2592 self._build_dir = None
2594 class EbuildBuildDir(SlotObject):
2596 __slots__ = ("dir_path", "pkg", "settings",
2597 "locked", "_catdir", "_lock_obj")
2599 def __init__(self, **kwargs):
2600 SlotObject.__init__(self, **kwargs)
2605 This raises an AlreadyLocked exception if lock() is called
2606 while a lock is already held. In order to avoid this, call
2607 unlock() or check whether the "locked" attribute is True
2608 or False before calling lock().
2610 if self._lock_obj is not None:
2611 raise self.AlreadyLocked((self._lock_obj,))
2613 dir_path = self.dir_path
2614 if dir_path is None:
2615 root_config = self.pkg.root_config
2616 portdb = root_config.trees["porttree"].dbapi
2617 ebuild_path = portdb.findname(self.pkg.cpv)
2618 settings = self.settings
2619 settings.setcpv(self.pkg)
2620 debug = settings.get("PORTAGE_DEBUG") == "1"
2621 use_cache = 1 # always true
2622 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2623 self.settings, debug, use_cache, portdb)
2624 dir_path = self.settings["PORTAGE_BUILDDIR"]
2626 catdir = os.path.dirname(dir_path)
2627 self._catdir = catdir
2629 portage.util.ensure_dirs(os.path.dirname(catdir),
2630 gid=portage.portage_gid,
2634 catdir_lock = portage.locks.lockdir(catdir)
2635 portage.util.ensure_dirs(catdir,
2636 gid=portage.portage_gid,
2638 self._lock_obj = portage.locks.lockdir(dir_path)
2640 self.locked = self._lock_obj is not None
2641 if catdir_lock is not None:
2642 portage.locks.unlockdir(catdir_lock)
2644 def clean_log(self):
2645 """Discard existing log."""
2646 settings = self.settings
2648 for x in ('.logid', 'temp/build.log'):
2650 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2655 if self._lock_obj is None:
2658 portage.locks.unlockdir(self._lock_obj)
2659 self._lock_obj = None
2662 catdir = self._catdir
2665 catdir_lock = portage.locks.lockdir(catdir)
2671 if e.errno not in (errno.ENOENT,
2672 errno.ENOTEMPTY, errno.EEXIST):
2675 portage.locks.unlockdir(catdir_lock)
2677 class AlreadyLocked(portage.exception.PortageException):
2680 class EbuildBuild(CompositeTask):
2682 __slots__ = ("args_set", "config_pool", "find_blockers",
2683 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2684 "prefetcher", "settings", "world_atom") + \
2685 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2689 logger = self.logger
2692 settings = self.settings
2693 world_atom = self.world_atom
2694 root_config = pkg.root_config
2697 portdb = root_config.trees[tree].dbapi
2698 settings.setcpv(pkg)
2699 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2700 ebuild_path = portdb.findname(self.pkg.cpv)
2701 self._ebuild_path = ebuild_path
2703 prefetcher = self.prefetcher
2704 if prefetcher is None:
2706 elif not prefetcher.isAlive():
2708 elif prefetcher.poll() is None:
2710 waiting_msg = "Fetching files " + \
2711 "in the background. " + \
2712 "To view fetch progress, run `tail -f " + \
2713 "/var/log/emerge-fetch.log` in another " + \
2715 msg_prefix = colorize("GOOD", " * ")
2716 from textwrap import wrap
2717 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2718 for line in wrap(waiting_msg, 65))
2719 if not self.background:
2720 writemsg(waiting_msg, noiselevel=-1)
2722 self._current_task = prefetcher
2723 prefetcher.addExitListener(self._prefetch_exit)
2726 self._prefetch_exit(prefetcher)
2728 def _prefetch_exit(self, prefetcher):
2732 settings = self.settings
2735 fetcher = EbuildFetchonly(
2736 fetch_all=opts.fetch_all_uri,
2737 pkg=pkg, pretend=opts.pretend,
2739 retval = fetcher.execute()
2740 self.returncode = retval
2744 fetcher = EbuildFetcher(config_pool=self.config_pool,
2745 fetchall=opts.fetch_all_uri,
2746 fetchonly=opts.fetchonly,
2747 background=self.background,
2748 pkg=pkg, scheduler=self.scheduler)
2750 self._start_task(fetcher, self._fetch_exit)
2752 def _fetch_exit(self, fetcher):
2756 fetch_failed = False
2758 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2760 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2762 if fetch_failed and fetcher.logfile is not None and \
2763 os.path.exists(fetcher.logfile):
2764 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2766 if not fetch_failed and fetcher.logfile is not None:
2767 # Fetch was successful, so remove the fetch log.
2769 os.unlink(fetcher.logfile)
2773 if fetch_failed or opts.fetchonly:
2777 logger = self.logger
2779 pkg_count = self.pkg_count
2780 scheduler = self.scheduler
2781 settings = self.settings
2782 features = settings.features
2783 ebuild_path = self._ebuild_path
2784 system_set = pkg.root_config.sets["system"]
2786 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2787 self._build_dir.lock()
2789 # Cleaning is triggered before the setup
2790 # phase, in portage.doebuild().
2791 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2792 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2793 short_msg = "emerge: (%s of %s) %s Clean" % \
2794 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2795 logger.log(msg, short_msg=short_msg)
2797 #buildsyspkg: Check if we need to _force_ binary package creation
2798 self._issyspkg = "buildsyspkg" in features and \
2799 system_set.findAtomForPackage(pkg) and \
2802 if opts.buildpkg or self._issyspkg:
2804 self._buildpkg = True
2806 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2807 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2808 short_msg = "emerge: (%s of %s) %s Compile" % \
2809 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2810 logger.log(msg, short_msg=short_msg)
2813 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2814 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2815 short_msg = "emerge: (%s of %s) %s Compile" % \
2816 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2817 logger.log(msg, short_msg=short_msg)
2819 build = EbuildExecuter(background=self.background, pkg=pkg,
2820 scheduler=scheduler, settings=settings)
2821 self._start_task(build, self._build_exit)
2823 def _unlock_builddir(self):
2824 portage.elog.elog_process(self.pkg.cpv, self.settings)
2825 self._build_dir.unlock()
2827 def _build_exit(self, build):
2828 if self._default_exit(build) != os.EX_OK:
2829 self._unlock_builddir()
2834 buildpkg = self._buildpkg
2837 self._final_exit(build)
2842 msg = ">>> This is a system package, " + \
2843 "let's pack a rescue tarball.\n"
2845 log_path = self.settings.get("PORTAGE_LOG_FILE")
2846 if log_path is not None:
2847 log_file = open(log_path, 'a')
2853 if not self.background:
2854 portage.writemsg_stdout(msg, noiselevel=-1)
2856 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2857 scheduler=self.scheduler, settings=self.settings)
2859 self._start_task(packager, self._buildpkg_exit)
2861 def _buildpkg_exit(self, packager):
2863 Released build dir lock when there is a failure or
2864 when in buildpkgonly mode. Otherwise, the lock will
2865 be released when merge() is called.
2868 if self._default_exit(packager) != os.EX_OK:
2869 self._unlock_builddir()
2873 if self.opts.buildpkgonly:
2874 # Need to call "clean" phase for buildpkgonly mode
2875 portage.elog.elog_process(self.pkg.cpv, self.settings)
2877 clean_phase = EbuildPhase(background=self.background,
2878 pkg=self.pkg, phase=phase,
2879 scheduler=self.scheduler, settings=self.settings,
2881 self._start_task(clean_phase, self._clean_exit)
2884 # Continue holding the builddir lock until
2885 # after the package has been installed.
2886 self._current_task = None
2887 self.returncode = packager.returncode
2890 def _clean_exit(self, clean_phase):
2891 if self._final_exit(clean_phase) != os.EX_OK or \
2892 self.opts.buildpkgonly:
2893 self._unlock_builddir()
2898 Install the package and then clean up and release locks.
2899 Only call this after the build has completed successfully
2900 and neither fetchonly nor buildpkgonly mode are enabled.
2903 find_blockers = self.find_blockers
2904 ldpath_mtimes = self.ldpath_mtimes
2905 logger = self.logger
2907 pkg_count = self.pkg_count
2908 settings = self.settings
2909 world_atom = self.world_atom
2910 ebuild_path = self._ebuild_path
2913 merge = EbuildMerge(find_blockers=self.find_blockers,
2914 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2915 pkg_count=pkg_count, pkg_path=ebuild_path,
2916 scheduler=self.scheduler,
2917 settings=settings, tree=tree, world_atom=world_atom)
2919 msg = " === (%s of %s) Merging (%s::%s)" % \
2920 (pkg_count.curval, pkg_count.maxval,
2921 pkg.cpv, ebuild_path)
2922 short_msg = "emerge: (%s of %s) %s Merge" % \
2923 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2924 logger.log(msg, short_msg=short_msg)
2927 rval = merge.execute()
2929 self._unlock_builddir()
2933 class EbuildExecuter(CompositeTask):
2935 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2937 _phases = ("prepare", "configure", "compile", "test", "install")
2939 _live_eclasses = frozenset([
2949 self._tree = "porttree"
2952 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2953 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2954 self._start_task(clean_phase, self._clean_phase_exit)
2956 def _clean_phase_exit(self, clean_phase):
2958 if self._default_exit(clean_phase) != os.EX_OK:
2963 scheduler = self.scheduler
2964 settings = self.settings
2967 # This initializes PORTAGE_LOG_FILE.
2968 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2970 setup_phase = EbuildPhase(background=self.background,
2971 pkg=pkg, phase="setup", scheduler=scheduler,
2972 settings=settings, tree=self._tree)
2974 setup_phase.addExitListener(self._setup_exit)
2975 self._current_task = setup_phase
2976 self.scheduler.scheduleSetup(setup_phase)
2978 def _setup_exit(self, setup_phase):
2980 if self._default_exit(setup_phase) != os.EX_OK:
2984 unpack_phase = EbuildPhase(background=self.background,
2985 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2986 settings=self.settings, tree=self._tree)
2988 if self._live_eclasses.intersection(self.pkg.inherited):
2989 # Serialize $DISTDIR access for live ebuilds since
2990 # otherwise they can interfere with eachother.
2992 unpack_phase.addExitListener(self._unpack_exit)
2993 self._current_task = unpack_phase
2994 self.scheduler.scheduleUnpack(unpack_phase)
2997 self._start_task(unpack_phase, self._unpack_exit)
2999 def _unpack_exit(self, unpack_phase):
3001 if self._default_exit(unpack_phase) != os.EX_OK:
3005 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3008 phases = self._phases
3009 eapi = pkg.metadata["EAPI"]
3010 if eapi in ("0", "1"):
3011 # skip src_prepare and src_configure
3014 for phase in phases:
3015 ebuild_phases.add(EbuildPhase(background=self.background,
3016 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3017 settings=self.settings, tree=self._tree))
3019 self._start_task(ebuild_phases, self._default_final_exit)
3021 class EbuildMetadataPhase(SubProcess):
3024 Asynchronous interface for the ebuild "depend" phase which is
3025 used to extract metadata from the ebuild.
3028 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3029 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3032 _file_names = ("ebuild",)
3033 _files_dict = slot_dict_class(_file_names, prefix="")
3037 settings = self.settings
3038 settings.setcpv(self.cpv)
3039 ebuild_path = self.ebuild_path
3042 if 'parse-eapi-glep-55' in settings.features:
3043 pf, eapi = portage._split_ebuild_name_glep55(
3044 os.path.basename(ebuild_path))
3045 if eapi is None and \
3046 'parse-eapi-ebuild-head' in settings.features:
3047 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3048 mode='r', encoding='utf_8', errors='replace'))
3050 if eapi is not None:
3051 if not portage.eapi_is_supported(eapi):
3052 self.metadata_callback(self.cpv, self.ebuild_path,
3053 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3054 self.returncode = os.EX_OK
3058 settings.configdict['pkg']['EAPI'] = eapi
3060 debug = settings.get("PORTAGE_DEBUG") == "1"
3064 if self.fd_pipes is not None:
3065 fd_pipes = self.fd_pipes.copy()
3069 fd_pipes.setdefault(0, sys.stdin.fileno())
3070 fd_pipes.setdefault(1, sys.stdout.fileno())
3071 fd_pipes.setdefault(2, sys.stderr.fileno())
3073 # flush any pending output
3074 for fd in fd_pipes.itervalues():
3075 if fd == sys.stdout.fileno():
3077 if fd == sys.stderr.fileno():
3080 fd_pipes_orig = fd_pipes.copy()
3081 self._files = self._files_dict()
3084 master_fd, slave_fd = os.pipe()
3085 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3086 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3088 fd_pipes[self._metadata_fd] = slave_fd
3090 self._raw_metadata = []
3091 files.ebuild = os.fdopen(master_fd, 'r')
3092 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3093 self._registered_events, self._output_handler)
3094 self._registered = True
3096 retval = portage.doebuild(ebuild_path, "depend",
3097 settings["ROOT"], settings, debug,
3098 mydbapi=self.portdb, tree="porttree",
3099 fd_pipes=fd_pipes, returnpid=True)
3103 if isinstance(retval, int):
3104 # doebuild failed before spawning
3106 self.returncode = retval
3110 self.pid = retval[0]
3111 portage.process.spawned_pids.remove(self.pid)
3113 def _output_handler(self, fd, event):
3115 if event & PollConstants.POLLIN:
3116 self._raw_metadata.append(self._files.ebuild.read())
3117 if not self._raw_metadata[-1]:
3121 self._unregister_if_appropriate(event)
3122 return self._registered
3124 def _set_returncode(self, wait_retval):
3125 SubProcess._set_returncode(self, wait_retval)
3126 if self.returncode == os.EX_OK:
3127 metadata_lines = "".join(self._raw_metadata).splitlines()
3128 if len(portage.auxdbkeys) != len(metadata_lines):
3129 # Don't trust bash's returncode if the
3130 # number of lines is incorrect.
3133 metadata = izip(portage.auxdbkeys, metadata_lines)
3134 self.metadata = self.metadata_callback(self.cpv,
3135 self.ebuild_path, self.repo_path, metadata,
3138 class EbuildProcess(SpawnProcess):
3140 __slots__ = ("phase", "pkg", "settings", "tree")
3143 # Don't open the log file during the clean phase since the
3144 # open file can result in an nfs lock on $T/build.log which
3145 # prevents the clean phase from removing $T.
3146 if self.phase not in ("clean", "cleanrm"):
3147 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3148 SpawnProcess._start(self)
3150 def _pipe(self, fd_pipes):
3151 stdout_pipe = fd_pipes.get(1)
3152 got_pty, master_fd, slave_fd = \
3153 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3154 return (master_fd, slave_fd)
3156 def _spawn(self, args, **kwargs):
3158 root_config = self.pkg.root_config
3160 mydbapi = root_config.trees[tree].dbapi
3161 settings = self.settings
3162 ebuild_path = settings["EBUILD"]
3163 debug = settings.get("PORTAGE_DEBUG") == "1"
3165 rval = portage.doebuild(ebuild_path, self.phase,
3166 root_config.root, settings, debug,
3167 mydbapi=mydbapi, tree=tree, **kwargs)
3171 def _set_returncode(self, wait_retval):
3172 SpawnProcess._set_returncode(self, wait_retval)
3174 if self.phase not in ("clean", "cleanrm"):
3175 self.returncode = portage._doebuild_exit_status_check_and_log(
3176 self.settings, self.phase, self.returncode)
3178 if self.phase == "test" and self.returncode != os.EX_OK and \
3179 "test-fail-continue" in self.settings.features:
3180 self.returncode = os.EX_OK
3182 portage._post_phase_userpriv_perms(self.settings)
3184 class EbuildPhase(CompositeTask):
3186 __slots__ = ("background", "pkg", "phase",
3187 "scheduler", "settings", "tree")
3189 _post_phase_cmds = portage._post_phase_cmds
3193 ebuild_process = EbuildProcess(background=self.background,
3194 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3195 settings=self.settings, tree=self.tree)
3197 self._start_task(ebuild_process, self._ebuild_exit)
3199 def _ebuild_exit(self, ebuild_process):
3201 if self.phase == "install":
3203 log_path = self.settings.get("PORTAGE_LOG_FILE")
3205 if self.background and log_path is not None:
3206 log_file = open(log_path, 'a')
3209 portage._check_build_log(self.settings, out=out)
3211 if log_file is not None:
3214 if self._default_exit(ebuild_process) != os.EX_OK:
3218 settings = self.settings
3220 if self.phase == "install":
3221 portage._post_src_install_chost_fix(settings)
3222 portage._post_src_install_uid_fix(settings)
3224 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3225 if post_phase_cmds is not None:
3226 post_phase = MiscFunctionsProcess(background=self.background,
3227 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3228 scheduler=self.scheduler, settings=settings)
3229 self._start_task(post_phase, self._post_phase_exit)
3232 self.returncode = ebuild_process.returncode
3233 self._current_task = None
3236 def _post_phase_exit(self, post_phase):
3237 if self._final_exit(post_phase) != os.EX_OK:
3238 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3240 self._current_task = None
3244 class EbuildBinpkg(EbuildProcess):
3246 This assumes that src_install() has successfully completed.
3248 __slots__ = ("_binpkg_tmpfile",)
3251 self.phase = "package"
3252 self.tree = "porttree"
3254 root_config = pkg.root_config
3255 portdb = root_config.trees["porttree"].dbapi
3256 bintree = root_config.trees["bintree"]
3257 ebuild_path = portdb.findname(self.pkg.cpv)
3258 settings = self.settings
3259 debug = settings.get("PORTAGE_DEBUG") == "1"
3261 bintree.prevent_collision(pkg.cpv)
3262 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3263 pkg.cpv + ".tbz2." + str(os.getpid()))
3264 self._binpkg_tmpfile = binpkg_tmpfile
3265 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3266 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3269 EbuildProcess._start(self)
3271 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3273 def _set_returncode(self, wait_retval):
3274 EbuildProcess._set_returncode(self, wait_retval)
3277 bintree = pkg.root_config.trees["bintree"]
3278 binpkg_tmpfile = self._binpkg_tmpfile
3279 if self.returncode == os.EX_OK:
3280 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3282 class EbuildMerge(SlotObject):
3284 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3285 "pkg", "pkg_count", "pkg_path", "pretend",
3286 "scheduler", "settings", "tree", "world_atom")
3289 root_config = self.pkg.root_config
3290 settings = self.settings
3291 retval = portage.merge(settings["CATEGORY"],
3292 settings["PF"], settings["D"],
3293 os.path.join(settings["PORTAGE_BUILDDIR"],
3294 "build-info"), root_config.root, settings,
3295 myebuild=settings["EBUILD"],
3296 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3297 vartree=root_config.trees["vartree"],
3298 prev_mtimes=self.ldpath_mtimes,
3299 scheduler=self.scheduler,
3300 blockers=self.find_blockers)
3302 if retval == os.EX_OK:
3303 self.world_atom(self.pkg)
3308 def _log_success(self):
3310 pkg_count = self.pkg_count
3311 pkg_path = self.pkg_path
3312 logger = self.logger
3313 if "noclean" not in self.settings.features:
3314 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3315 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3316 logger.log((" === (%s of %s) " + \
3317 "Post-Build Cleaning (%s::%s)") % \
3318 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3319 short_msg=short_msg)
3320 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3321 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3323 class PackageUninstall(AsynchronousTask):
3325 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3329 unmerge(self.pkg.root_config, self.opts, "unmerge",
3330 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3331 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3332 writemsg_level=self._writemsg_level)
3333 except UninstallFailure, e:
3334 self.returncode = e.status
3336 self.returncode = os.EX_OK
3339 def _writemsg_level(self, msg, level=0, noiselevel=0):
3341 log_path = self.settings.get("PORTAGE_LOG_FILE")
3342 background = self.background
3344 if log_path is None:
3345 if not (background and level < logging.WARNING):
3346 portage.util.writemsg_level(msg,
3347 level=level, noiselevel=noiselevel)
3350 portage.util.writemsg_level(msg,
3351 level=level, noiselevel=noiselevel)
3353 f = open(log_path, 'a')
3359 class Binpkg(CompositeTask):
3361 __slots__ = ("find_blockers",
3362 "ldpath_mtimes", "logger", "opts",
3363 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3364 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3365 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3367 def _writemsg_level(self, msg, level=0, noiselevel=0):
3369 if not self.background:
3370 portage.util.writemsg_level(msg,
3371 level=level, noiselevel=noiselevel)
3373 log_path = self.settings.get("PORTAGE_LOG_FILE")
3374 if log_path is not None:
3375 f = open(log_path, 'a')
3384 settings = self.settings
3385 settings.setcpv(pkg)
3386 self._tree = "bintree"
3387 self._bintree = self.pkg.root_config.trees[self._tree]
3388 self._verify = not self.opts.pretend
3390 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3391 "portage", pkg.category, pkg.pf)
3392 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3393 pkg=pkg, settings=settings)
3394 self._image_dir = os.path.join(dir_path, "image")
3395 self._infloc = os.path.join(dir_path, "build-info")
3396 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3397 settings["EBUILD"] = self._ebuild_path
3398 debug = settings.get("PORTAGE_DEBUG") == "1"
3399 portage.doebuild_environment(self._ebuild_path, "setup",
3400 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3401 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3403 # The prefetcher has already completed or it
3404 # could be running now. If it's running now,
3405 # wait for it to complete since it holds
3406 # a lock on the file being fetched. The
3407 # portage.locks functions are only designed
3408 # to work between separate processes. Since
3409 # the lock is held by the current process,
3410 # use the scheduler and fetcher methods to
3411 # synchronize with the fetcher.
3412 prefetcher = self.prefetcher
3413 if prefetcher is None:
3415 elif not prefetcher.isAlive():
3417 elif prefetcher.poll() is None:
3419 waiting_msg = ("Fetching '%s' " + \
3420 "in the background. " + \
3421 "To view fetch progress, run `tail -f " + \
3422 "/var/log/emerge-fetch.log` in another " + \
3423 "terminal.") % prefetcher.pkg_path
3424 msg_prefix = colorize("GOOD", " * ")
3425 from textwrap import wrap
3426 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3427 for line in wrap(waiting_msg, 65))
3428 if not self.background:
3429 writemsg(waiting_msg, noiselevel=-1)
3431 self._current_task = prefetcher
3432 prefetcher.addExitListener(self._prefetch_exit)
3435 self._prefetch_exit(prefetcher)
3437 def _prefetch_exit(self, prefetcher):
3440 pkg_count = self.pkg_count
3441 if not (self.opts.pretend or self.opts.fetchonly):
3442 self._build_dir.lock()
3443 # If necessary, discard old log so that we don't
3445 self._build_dir.clean_log()
3446 # Initialze PORTAGE_LOG_FILE.
3447 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3448 fetcher = BinpkgFetcher(background=self.background,
3449 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3450 pretend=self.opts.pretend, scheduler=self.scheduler)
3451 pkg_path = fetcher.pkg_path
3452 self._pkg_path = pkg_path
3454 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3456 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3457 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3458 short_msg = "emerge: (%s of %s) %s Fetch" % \
3459 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3460 self.logger.log(msg, short_msg=short_msg)
3461 self._start_task(fetcher, self._fetcher_exit)
3464 self._fetcher_exit(fetcher)
3466 def _fetcher_exit(self, fetcher):
3468 # The fetcher only has a returncode when
3469 # --getbinpkg is enabled.
3470 if fetcher.returncode is not None:
3471 self._fetched_pkg = True
3472 if self._default_exit(fetcher) != os.EX_OK:
3473 self._unlock_builddir()
3477 if self.opts.pretend:
3478 self._current_task = None
3479 self.returncode = os.EX_OK
3487 logfile = self.settings.get("PORTAGE_LOG_FILE")
3488 verifier = BinpkgVerifier(background=self.background,
3489 logfile=logfile, pkg=self.pkg)
3490 self._start_task(verifier, self._verifier_exit)
3493 self._verifier_exit(verifier)
3495 def _verifier_exit(self, verifier):
3496 if verifier is not None and \
3497 self._default_exit(verifier) != os.EX_OK:
3498 self._unlock_builddir()
3502 logger = self.logger
3504 pkg_count = self.pkg_count
3505 pkg_path = self._pkg_path
3507 if self._fetched_pkg:
3508 self._bintree.inject(pkg.cpv, filename=pkg_path)
3510 if self.opts.fetchonly:
3511 self._current_task = None
3512 self.returncode = os.EX_OK
3516 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3517 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3518 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3519 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3520 logger.log(msg, short_msg=short_msg)
3523 settings = self.settings
3524 ebuild_phase = EbuildPhase(background=self.background,
3525 pkg=pkg, phase=phase, scheduler=self.scheduler,
3526 settings=settings, tree=self._tree)
3528 self._start_task(ebuild_phase, self._clean_exit)
3530 def _clean_exit(self, clean_phase):
3531 if self._default_exit(clean_phase) != os.EX_OK:
3532 self._unlock_builddir()
3536 dir_path = self._build_dir.dir_path
3538 infloc = self._infloc
3540 pkg_path = self._pkg_path
3543 for mydir in (dir_path, self._image_dir, infloc):
3544 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3545 gid=portage.data.portage_gid, mode=dir_mode)
3547 # This initializes PORTAGE_LOG_FILE.
3548 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3549 self._writemsg_level(">>> Extracting info\n")
3551 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3552 check_missing_metadata = ("CATEGORY", "PF")
3553 missing_metadata = set()
3554 for k in check_missing_metadata:
3555 v = pkg_xpak.getfile(k)
3557 missing_metadata.add(k)
3559 pkg_xpak.unpackinfo(infloc)
3560 for k in missing_metadata:
3568 f = open(os.path.join(infloc, k), 'wb')
3574 # Store the md5sum in the vdb.
3575 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3577 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3581 # This gives bashrc users an opportunity to do various things
3582 # such as remove binary packages after they're installed.
3583 settings = self.settings
3584 settings.setcpv(self.pkg)
3585 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3586 settings.backup_changes("PORTAGE_BINPKG_FILE")
3589 setup_phase = EbuildPhase(background=self.background,
3590 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3591 settings=settings, tree=self._tree)
3593 setup_phase.addExitListener(self._setup_exit)
3594 self._current_task = setup_phase
3595 self.scheduler.scheduleSetup(setup_phase)
3597 def _setup_exit(self, setup_phase):
3598 if self._default_exit(setup_phase) != os.EX_OK:
3599 self._unlock_builddir()
3603 extractor = BinpkgExtractorAsync(background=self.background,
3604 image_dir=self._image_dir,
3605 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3606 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3607 self._start_task(extractor, self._extractor_exit)
3609 def _extractor_exit(self, extractor):
3610 if self._final_exit(extractor) != os.EX_OK:
3611 self._unlock_builddir()
3612 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3616 def _unlock_builddir(self):
3617 if self.opts.pretend or self.opts.fetchonly:
3619 portage.elog.elog_process(self.pkg.cpv, self.settings)
3620 self._build_dir.unlock()
3624 # This gives bashrc users an opportunity to do various things
3625 # such as remove binary packages after they're installed.
3626 settings = self.settings
3627 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3628 settings.backup_changes("PORTAGE_BINPKG_FILE")
3630 merge = EbuildMerge(find_blockers=self.find_blockers,
3631 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3632 pkg=self.pkg, pkg_count=self.pkg_count,
3633 pkg_path=self._pkg_path, scheduler=self.scheduler,
3634 settings=settings, tree=self._tree, world_atom=self.world_atom)
3637 retval = merge.execute()
3639 settings.pop("PORTAGE_BINPKG_FILE", None)
3640 self._unlock_builddir()
3643 class BinpkgFetcher(SpawnProcess):
3645 __slots__ = ("pkg", "pretend",
3646 "locked", "pkg_path", "_lock_obj")
3648 def __init__(self, **kwargs):
3649 SpawnProcess.__init__(self, **kwargs)
3651 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3659 pretend = self.pretend
3660 bintree = pkg.root_config.trees["bintree"]
3661 settings = bintree.settings
3662 use_locks = "distlocks" in settings.features
3663 pkg_path = self.pkg_path
3666 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3669 exists = os.path.exists(pkg_path)
3670 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3671 if not (pretend or resume):
3672 # Remove existing file or broken symlink.
3678 # urljoin doesn't work correctly with
3679 # unrecognized protocols like sftp
3680 if bintree._remote_has_index:
3681 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3683 rel_uri = pkg.cpv + ".tbz2"
3684 uri = bintree._remote_base_uri.rstrip("/") + \
3685 "/" + rel_uri.lstrip("/")
3687 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3688 "/" + pkg.pf + ".tbz2"
3691 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3692 self.returncode = os.EX_OK
3696 protocol = urlparse.urlparse(uri)[0]
3697 fcmd_prefix = "FETCHCOMMAND"
3699 fcmd_prefix = "RESUMECOMMAND"
3700 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3702 fcmd = settings.get(fcmd_prefix)
3705 "DISTDIR" : os.path.dirname(pkg_path),
3707 "FILE" : os.path.basename(pkg_path)
3710 fetch_env = dict(settings.iteritems())
3711 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3712 for x in shlex.split(fcmd)]
3714 if self.fd_pipes is None:
3716 fd_pipes = self.fd_pipes
3718 # Redirect all output to stdout since some fetchers like
3719 # wget pollute stderr (if portage detects a problem then it
3720 # can send it's own message to stderr).
3721 fd_pipes.setdefault(0, sys.stdin.fileno())
3722 fd_pipes.setdefault(1, sys.stdout.fileno())
3723 fd_pipes.setdefault(2, sys.stdout.fileno())
3725 self.args = fetch_args
3726 self.env = fetch_env
3727 SpawnProcess._start(self)
3729 def _set_returncode(self, wait_retval):
3730 SpawnProcess._set_returncode(self, wait_retval)
3731 if self.returncode == os.EX_OK:
3732 # If possible, update the mtime to match the remote package if
3733 # the fetcher didn't already do it automatically.
3734 bintree = self.pkg.root_config.trees["bintree"]
3735 if bintree._remote_has_index:
3736 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3737 if remote_mtime is not None:
3739 remote_mtime = long(remote_mtime)
3744 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3748 if remote_mtime != local_mtime:
3750 os.utime(self.pkg_path,
3751 (remote_mtime, remote_mtime))
3760 This raises an AlreadyLocked exception if lock() is called
3761 while a lock is already held. In order to avoid this, call
3762 unlock() or check whether the "locked" attribute is True
3763 or False before calling lock().
3765 if self._lock_obj is not None:
3766 raise self.AlreadyLocked((self._lock_obj,))
3768 self._lock_obj = portage.locks.lockfile(
3769 self.pkg_path, wantnewlockfile=1)
3772 class AlreadyLocked(portage.exception.PortageException):
3776 if self._lock_obj is None:
3778 portage.locks.unlockfile(self._lock_obj)
3779 self._lock_obj = None
3782 class BinpkgVerifier(AsynchronousTask):
3783 __slots__ = ("logfile", "pkg",)
3787 Note: Unlike a normal AsynchronousTask.start() method,
3788 this one does all work is synchronously. The returncode
3789 attribute will be set before it returns.
3793 root_config = pkg.root_config
3794 bintree = root_config.trees["bintree"]
3796 stdout_orig = sys.stdout
3797 stderr_orig = sys.stderr
3799 if self.background and self.logfile is not None:
3800 log_file = open(self.logfile, 'a')
3802 if log_file is not None:
3803 sys.stdout = log_file
3804 sys.stderr = log_file
3806 bintree.digestCheck(pkg)
3807 except portage.exception.FileNotFound:
3808 writemsg("!!! Fetching Binary failed " + \
3809 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3811 except portage.exception.DigestException, e:
3812 writemsg("\n!!! Digest verification failed:\n",
3814 writemsg("!!! %s\n" % e.value[0],
3816 writemsg("!!! Reason: %s\n" % e.value[1],
3818 writemsg("!!! Got: %s\n" % e.value[2],
3820 writemsg("!!! Expected: %s\n" % e.value[3],
3823 if rval != os.EX_OK:
3824 pkg_path = bintree.getname(pkg.cpv)
3825 head, tail = os.path.split(pkg_path)
3826 temp_filename = portage._checksum_failure_temp_file(head, tail)
3827 writemsg("File renamed to '%s'\n" % (temp_filename,),
3830 sys.stdout = stdout_orig
3831 sys.stderr = stderr_orig
3832 if log_file is not None:
3835 self.returncode = rval
3838 class BinpkgPrefetcher(CompositeTask):
3840 __slots__ = ("pkg",) + \
3841 ("pkg_path", "_bintree",)
3844 self._bintree = self.pkg.root_config.trees["bintree"]
3845 fetcher = BinpkgFetcher(background=self.background,
3846 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3847 scheduler=self.scheduler)
3848 self.pkg_path = fetcher.pkg_path
3849 self._start_task(fetcher, self._fetcher_exit)
3851 def _fetcher_exit(self, fetcher):
3853 if self._default_exit(fetcher) != os.EX_OK:
3857 verifier = BinpkgVerifier(background=self.background,
3858 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3859 self._start_task(verifier, self._verifier_exit)
3861 def _verifier_exit(self, verifier):
3862 if self._default_exit(verifier) != os.EX_OK:
3866 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3868 self._current_task = None
3869 self.returncode = os.EX_OK
3872 class BinpkgExtractorAsync(SpawnProcess):
3874 __slots__ = ("image_dir", "pkg", "pkg_path")
3876 _shell_binary = portage.const.BASH_BINARY
3879 self.args = [self._shell_binary, "-c",
3880 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3881 (portage._shell_quote(self.pkg_path),
3882 portage._shell_quote(self.image_dir))]
3884 self.env = self.pkg.root_config.settings.environ()
3885 SpawnProcess._start(self)
3887 class MergeListItem(CompositeTask):
3890 TODO: For parallel scheduling, everything here needs asynchronous
3891 execution support (start, poll, and wait methods).
3894 __slots__ = ("args_set",
3895 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3896 "find_blockers", "logger", "mtimedb", "pkg",
3897 "pkg_count", "pkg_to_replace", "prefetcher",
3898 "settings", "statusMessage", "world_atom") + \
3904 build_opts = self.build_opts
3907 # uninstall, executed by self.merge()
3908 self.returncode = os.EX_OK
3912 args_set = self.args_set
3913 find_blockers = self.find_blockers
3914 logger = self.logger
3915 mtimedb = self.mtimedb
3916 pkg_count = self.pkg_count
3917 scheduler = self.scheduler
3918 settings = self.settings
3919 world_atom = self.world_atom
3920 ldpath_mtimes = mtimedb["ldpath"]
3922 action_desc = "Emerging"
3924 if pkg.type_name == "binary":
3925 action_desc += " binary"
3927 if build_opts.fetchonly:
3928 action_desc = "Fetching"
3930 msg = "%s (%s of %s) %s" % \
3932 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3933 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3934 colorize("GOOD", pkg.cpv))
3936 portdb = pkg.root_config.trees["porttree"].dbapi
3937 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3938 if portdir_repo_name:
3939 pkg_repo_name = pkg.metadata.get("repository")
3940 if pkg_repo_name != portdir_repo_name:
3941 if not pkg_repo_name:
3942 pkg_repo_name = "unknown repo"
3943 msg += " from %s" % pkg_repo_name
3946 msg += " %s %s" % (preposition, pkg.root)
3948 if not build_opts.pretend:
3949 self.statusMessage(msg)
3950 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3951 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3953 if pkg.type_name == "ebuild":
3955 build = EbuildBuild(args_set=args_set,
3956 background=self.background,
3957 config_pool=self.config_pool,
3958 find_blockers=find_blockers,
3959 ldpath_mtimes=ldpath_mtimes, logger=logger,
3960 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3961 prefetcher=self.prefetcher, scheduler=scheduler,
3962 settings=settings, world_atom=world_atom)
3964 self._install_task = build
3965 self._start_task(build, self._default_final_exit)
3968 elif pkg.type_name == "binary":
3970 binpkg = Binpkg(background=self.background,
3971 find_blockers=find_blockers,
3972 ldpath_mtimes=ldpath_mtimes, logger=logger,
3973 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3974 prefetcher=self.prefetcher, settings=settings,
3975 scheduler=scheduler, world_atom=world_atom)
3977 self._install_task = binpkg
3978 self._start_task(binpkg, self._default_final_exit)
3982 self._install_task.poll()
3983 return self.returncode
3986 self._install_task.wait()
3987 return self.returncode
3992 build_opts = self.build_opts
3993 find_blockers = self.find_blockers
3994 logger = self.logger
3995 mtimedb = self.mtimedb
3996 pkg_count = self.pkg_count
3997 prefetcher = self.prefetcher
3998 scheduler = self.scheduler
3999 settings = self.settings
4000 world_atom = self.world_atom
4001 ldpath_mtimes = mtimedb["ldpath"]
4004 if not (build_opts.buildpkgonly or \
4005 build_opts.fetchonly or build_opts.pretend):
4007 uninstall = PackageUninstall(background=self.background,
4008 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4009 pkg=pkg, scheduler=scheduler, settings=settings)
4012 retval = uninstall.wait()
4013 if retval != os.EX_OK:
4017 if build_opts.fetchonly or \
4018 build_opts.buildpkgonly:
4019 return self.returncode
4021 retval = self._install_task.install()
4024 class PackageMerge(AsynchronousTask):
4026 TODO: Implement asynchronous merge so that the scheduler can
4027 run while a merge is executing.
4030 __slots__ = ("merge",)
4034 pkg = self.merge.pkg
4035 pkg_count = self.merge.pkg_count
4038 action_desc = "Uninstalling"
4039 preposition = "from"
4042 action_desc = "Installing"
4044 counter_str = "(%s of %s) " % \
4045 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4046 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4051 colorize("GOOD", pkg.cpv))
4054 msg += " %s %s" % (preposition, pkg.root)
4056 if not self.merge.build_opts.fetchonly and \
4057 not self.merge.build_opts.pretend and \
4058 not self.merge.build_opts.buildpkgonly:
4059 self.merge.statusMessage(msg)
4061 self.returncode = self.merge.merge()
4064 class DependencyArg(object):
4065 def __init__(self, arg=None, root_config=None):
4067 self.root_config = root_config
4070 return str(self.arg)
4072 class AtomArg(DependencyArg):
4073 def __init__(self, atom=None, **kwargs):
4074 DependencyArg.__init__(self, **kwargs)
4076 if not isinstance(self.atom, portage.dep.Atom):
4077 self.atom = portage.dep.Atom(self.atom)
4078 self.set = (self.atom, )
4080 class PackageArg(DependencyArg):
4081 def __init__(self, package=None, **kwargs):
4082 DependencyArg.__init__(self, **kwargs)
4083 self.package = package
4084 self.atom = portage.dep.Atom("=" + package.cpv)
4085 self.set = (self.atom, )
4087 class SetArg(DependencyArg):
4088 def __init__(self, set=None, **kwargs):
4089 DependencyArg.__init__(self, **kwargs)
4091 self.name = self.arg[len(SETPREFIX):]
4093 class Dependency(SlotObject):
4094 __slots__ = ("atom", "blocker", "depth",
4095 "parent", "onlydeps", "priority", "root")
4096 def __init__(self, **kwargs):
4097 SlotObject.__init__(self, **kwargs)
4098 if self.priority is None:
4099 self.priority = DepPriority()
4100 if self.depth is None:
4103 class BlockerCache(portage.cache.mappings.MutableMapping):
4104 """This caches blockers of installed packages so that dep_check does not
4105 have to be done for every single installed package on every invocation of
4106 emerge. The cache is invalidated whenever it is detected that something
4107 has changed that might alter the results of dep_check() calls:
4108 1) the set of installed packages (including COUNTER) has changed
4109 2) the old-style virtuals have changed
4112 # Number of uncached packages to trigger cache update, since
4113 # it's wasteful to update it for every vdb change.
4114 _cache_threshold = 5
4116 class BlockerData(object):
4118 __slots__ = ("__weakref__", "atoms", "counter")
4120 def __init__(self, counter, atoms):
4121 self.counter = counter
4124 def __init__(self, myroot, vardb):
4126 self._virtuals = vardb.settings.getvirtuals()
4127 self._cache_filename = os.path.join(myroot,
4128 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4129 self._cache_version = "1"
4130 self._cache_data = None
4131 self._modified = set()
4136 f = open(self._cache_filename, mode='rb')
4137 mypickle = pickle.Unpickler(f)
4139 mypickle.find_global = None
4140 except AttributeError:
4141 # TODO: If py3k, override Unpickler.find_class().
4143 self._cache_data = mypickle.load()
4146 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4147 if isinstance(e, pickle.UnpicklingError):
4148 writemsg("!!! Error loading '%s': %s\n" % \
4149 (self._cache_filename, str(e)), noiselevel=-1)
4152 cache_valid = self._cache_data and \
4153 isinstance(self._cache_data, dict) and \
4154 self._cache_data.get("version") == self._cache_version and \
4155 isinstance(self._cache_data.get("blockers"), dict)
4157 # Validate all the atoms and counters so that
4158 # corruption is detected as soon as possible.
4159 invalid_items = set()
4160 for k, v in self._cache_data["blockers"].iteritems():
4161 if not isinstance(k, basestring):
4162 invalid_items.add(k)
4165 if portage.catpkgsplit(k) is None:
4166 invalid_items.add(k)
4168 except portage.exception.InvalidData:
4169 invalid_items.add(k)
4171 if not isinstance(v, tuple) or \
4173 invalid_items.add(k)
4176 if not isinstance(counter, (int, long)):
4177 invalid_items.add(k)
4179 if not isinstance(atoms, (list, tuple)):
4180 invalid_items.add(k)
4182 invalid_atom = False
4184 if not isinstance(atom, basestring):
4187 if atom[:1] != "!" or \
4188 not portage.isvalidatom(
4189 atom, allow_blockers=True):
4193 invalid_items.add(k)
4196 for k in invalid_items:
4197 del self._cache_data["blockers"][k]
4198 if not self._cache_data["blockers"]:
4202 self._cache_data = {"version":self._cache_version}
4203 self._cache_data["blockers"] = {}
4204 self._cache_data["virtuals"] = self._virtuals
4205 self._modified.clear()
4208 """If the current user has permission and the internal blocker cache
4209 been updated, save it to disk and mark it unmodified. This is called
4210 by emerge after it has proccessed blockers for all installed packages.
4211 Currently, the cache is only written if the user has superuser
4212 privileges (since that's required to obtain a lock), but all users
4213 have read access and benefit from faster blocker lookups (as long as
4214 the entire cache is still valid). The cache is stored as a pickled
4215 dict object with the following format:
4219 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4220 "virtuals" : vardb.settings.getvirtuals()
4223 if len(self._modified) >= self._cache_threshold and \
4226 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4227 pickle.dump(self._cache_data, f, protocol=2)
4229 portage.util.apply_secpass_permissions(
4230 self._cache_filename, gid=portage.portage_gid, mode=0644)
4231 except (IOError, OSError), e:
4233 self._modified.clear()
4235 def __setitem__(self, cpv, blocker_data):
4237 Update the cache and mark it as modified for a future call to
4240 @param cpv: Package for which to cache blockers.
4242 @param blocker_data: An object with counter and atoms attributes.
4243 @type blocker_data: BlockerData
4245 self._cache_data["blockers"][cpv] = \
4246 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4247 self._modified.add(cpv)
4250 if self._cache_data is None:
4251 # triggered by python-trace
4253 return iter(self._cache_data["blockers"])
4255 def __delitem__(self, cpv):
4256 del self._cache_data["blockers"][cpv]
4258 def __getitem__(self, cpv):
4261 @returns: An object with counter and atoms attributes.
4263 return self.BlockerData(*self._cache_data["blockers"][cpv])
4265 class BlockerDB(object):
4267 def __init__(self, root_config):
4268 self._root_config = root_config
4269 self._vartree = root_config.trees["vartree"]
4270 self._portdb = root_config.trees["porttree"].dbapi
4272 self._dep_check_trees = None
4273 self._fake_vartree = None
4275 def _get_fake_vartree(self, acquire_lock=0):
4276 fake_vartree = self._fake_vartree
4277 if fake_vartree is None:
4278 fake_vartree = FakeVartree(self._root_config,
4279 acquire_lock=acquire_lock)
4280 self._fake_vartree = fake_vartree
4281 self._dep_check_trees = { self._vartree.root : {
4282 "porttree" : fake_vartree,
4283 "vartree" : fake_vartree,
4286 fake_vartree.sync(acquire_lock=acquire_lock)
4289 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4290 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4291 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4292 settings = self._vartree.settings
4293 stale_cache = set(blocker_cache)
4294 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4295 dep_check_trees = self._dep_check_trees
4296 vardb = fake_vartree.dbapi
4297 installed_pkgs = list(vardb)
4299 for inst_pkg in installed_pkgs:
4300 stale_cache.discard(inst_pkg.cpv)
4301 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4302 if cached_blockers is not None and \
4303 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4304 cached_blockers = None
4305 if cached_blockers is not None:
4306 blocker_atoms = cached_blockers.atoms
4308 # Use aux_get() to trigger FakeVartree global
4309 # updates on *DEPEND when appropriate.
4310 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4312 portage.dep._dep_check_strict = False
4313 success, atoms = portage.dep_check(depstr,
4314 vardb, settings, myuse=inst_pkg.use.enabled,
4315 trees=dep_check_trees, myroot=inst_pkg.root)
4317 portage.dep._dep_check_strict = True
4319 pkg_location = os.path.join(inst_pkg.root,
4320 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4321 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4322 (pkg_location, atoms), noiselevel=-1)
4325 blocker_atoms = [atom for atom in atoms \
4326 if atom.startswith("!")]
4327 blocker_atoms.sort()
4328 counter = long(inst_pkg.metadata["COUNTER"])
4329 blocker_cache[inst_pkg.cpv] = \
4330 blocker_cache.BlockerData(counter, blocker_atoms)
4331 for cpv in stale_cache:
4332 del blocker_cache[cpv]
4333 blocker_cache.flush()
4335 blocker_parents = digraph()
4337 for pkg in installed_pkgs:
4338 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4339 blocker_atom = blocker_atom.lstrip("!")
4340 blocker_atoms.append(blocker_atom)
4341 blocker_parents.add(blocker_atom, pkg)
4343 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4344 blocking_pkgs = set()
4345 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4346 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4348 # Check for blockers in the other direction.
4349 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4351 portage.dep._dep_check_strict = False
4352 success, atoms = portage.dep_check(depstr,
4353 vardb, settings, myuse=new_pkg.use.enabled,
4354 trees=dep_check_trees, myroot=new_pkg.root)
4356 portage.dep._dep_check_strict = True
4358 # We should never get this far with invalid deps.
4359 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4362 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4365 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4366 for inst_pkg in installed_pkgs:
4368 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4369 except (portage.exception.InvalidDependString, StopIteration):
4371 blocking_pkgs.add(inst_pkg)
4373 return blocking_pkgs
4375 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4377 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4378 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4379 p_type, p_root, p_key, p_status = parent_node
4381 if p_status == "nomerge":
4382 category, pf = portage.catsplit(p_key)
4383 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4384 msg.append("Portage is unable to process the dependencies of the ")
4385 msg.append("'%s' package. " % p_key)
4386 msg.append("In order to correct this problem, the package ")
4387 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4388 msg.append("As a temporary workaround, the --nodeps option can ")
4389 msg.append("be used to ignore all dependencies. For reference, ")
4390 msg.append("the problematic dependencies can be found in the ")
4391 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4393 msg.append("This package can not be installed. ")
4394 msg.append("Please notify the '%s' package maintainer " % p_key)
4395 msg.append("about this problem.")
4397 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4398 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4400 class PackageVirtualDbapi(portage.dbapi):
4402 A dbapi-like interface class that represents the state of the installed
4403 package database as new packages are installed, replacing any packages
4404 that previously existed in the same slot. The main difference between
4405 this class and fakedbapi is that this one uses Package instances
4406 internally (passed in via cpv_inject() and cpv_remove() calls).
4408 def __init__(self, settings):
4409 portage.dbapi.__init__(self)
4410 self.settings = settings
4411 self._match_cache = {}
4417 Remove all packages.
4421 self._cp_map.clear()
4422 self._cpv_map.clear()
4425 obj = PackageVirtualDbapi(self.settings)
4426 obj._match_cache = self._match_cache.copy()
4427 obj._cp_map = self._cp_map.copy()
4428 for k, v in obj._cp_map.iteritems():
4429 obj._cp_map[k] = v[:]
4430 obj._cpv_map = self._cpv_map.copy()
4434 return self._cpv_map.itervalues()
4436 def __contains__(self, item):
4437 existing = self._cpv_map.get(item.cpv)
4438 if existing is not None and \
4443 def get(self, item, default=None):
4444 cpv = getattr(item, "cpv", None)
4448 type_name, root, cpv, operation = item
4450 existing = self._cpv_map.get(cpv)
4451 if existing is not None and \
4456 def match_pkgs(self, atom):
4457 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4459 def _clear_cache(self):
4460 if self._categories is not None:
4461 self._categories = None
4462 if self._match_cache:
4463 self._match_cache = {}
4465 def match(self, origdep, use_cache=1):
4466 result = self._match_cache.get(origdep)
4467 if result is not None:
4469 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4470 self._match_cache[origdep] = result
4473 def cpv_exists(self, cpv):
4474 return cpv in self._cpv_map
4476 def cp_list(self, mycp, use_cache=1):
4477 cachelist = self._match_cache.get(mycp)
4478 # cp_list() doesn't expand old-style virtuals
4479 if cachelist and cachelist[0].startswith(mycp):
4481 cpv_list = self._cp_map.get(mycp)
4482 if cpv_list is None:
4485 cpv_list = [pkg.cpv for pkg in cpv_list]
4486 self._cpv_sort_ascending(cpv_list)
4487 if not (not cpv_list and mycp.startswith("virtual/")):
4488 self._match_cache[mycp] = cpv_list
4492 return list(self._cp_map)
4495 return list(self._cpv_map)
4497 def cpv_inject(self, pkg):
4498 cp_list = self._cp_map.get(pkg.cp)
4501 self._cp_map[pkg.cp] = cp_list
4502 e_pkg = self._cpv_map.get(pkg.cpv)
4503 if e_pkg is not None:
4506 self.cpv_remove(e_pkg)
4507 for e_pkg in cp_list:
4508 if e_pkg.slot_atom == pkg.slot_atom:
4511 self.cpv_remove(e_pkg)
4514 self._cpv_map[pkg.cpv] = pkg
4517 def cpv_remove(self, pkg):
4518 old_pkg = self._cpv_map.get(pkg.cpv)
4521 self._cp_map[pkg.cp].remove(pkg)
4522 del self._cpv_map[pkg.cpv]
4525 def aux_get(self, cpv, wants):
4526 metadata = self._cpv_map[cpv].metadata
4527 return [metadata.get(x, "") for x in wants]
4529 def aux_update(self, cpv, values):
4530 self._cpv_map[cpv].metadata.update(values)
4533 class depgraph(object):
4535 pkg_tree_map = RootConfig.pkg_tree_map
4537 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4539 def __init__(self, settings, trees, myopts, myparams, spinner):
4540 self.settings = settings
4541 self.target_root = settings["ROOT"]
4542 self.myopts = myopts
4543 self.myparams = myparams
4545 if settings.get("PORTAGE_DEBUG", "") == "1":
4547 self.spinner = spinner
4548 self._running_root = trees["/"]["root_config"]
4549 self._opts_no_restart = Scheduler._opts_no_restart
4550 self.pkgsettings = {}
4551 # Maps slot atom to package for each Package added to the graph.
4552 self._slot_pkg_map = {}
4553 # Maps nodes to the reasons they were selected for reinstallation.
4554 self._reinstall_nodes = {}
4557 self._trees_orig = trees
4559 # Contains a filtered view of preferred packages that are selected
4560 # from available repositories.
4561 self._filtered_trees = {}
4562 # Contains installed packages and new packages that have been added
4564 self._graph_trees = {}
4565 # All Package instances
4566 self._pkg_cache = {}
4567 for myroot in trees:
4568 self.trees[myroot] = {}
4569 # Create a RootConfig instance that references
4570 # the FakeVartree instead of the real one.
4571 self.roots[myroot] = RootConfig(
4572 trees[myroot]["vartree"].settings,
4574 trees[myroot]["root_config"].setconfig)
4575 for tree in ("porttree", "bintree"):
4576 self.trees[myroot][tree] = trees[myroot][tree]
4577 self.trees[myroot]["vartree"] = \
4578 FakeVartree(trees[myroot]["root_config"],
4579 pkg_cache=self._pkg_cache)
4580 self.pkgsettings[myroot] = portage.config(
4581 clone=self.trees[myroot]["vartree"].settings)
4582 self._slot_pkg_map[myroot] = {}
4583 vardb = self.trees[myroot]["vartree"].dbapi
4584 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4585 "--buildpkgonly" not in self.myopts
4586 # This fakedbapi instance will model the state that the vdb will
4587 # have after new packages have been installed.
4588 fakedb = PackageVirtualDbapi(vardb.settings)
4589 if preload_installed_pkgs:
4591 self.spinner.update()
4592 # This triggers metadata updates via FakeVartree.
4593 vardb.aux_get(pkg.cpv, [])
4594 fakedb.cpv_inject(pkg)
4596 # Now that the vardb state is cached in our FakeVartree,
4597 # we won't be needing the real vartree cache for awhile.
4598 # To make some room on the heap, clear the vardbapi
4600 trees[myroot]["vartree"].dbapi._clear_cache()
4603 self.mydbapi[myroot] = fakedb
4606 graph_tree.dbapi = fakedb
4607 self._graph_trees[myroot] = {}
4608 self._filtered_trees[myroot] = {}
4609 # Substitute the graph tree for the vartree in dep_check() since we
4610 # want atom selections to be consistent with package selections
4611 # have already been made.
4612 self._graph_trees[myroot]["porttree"] = graph_tree
4613 self._graph_trees[myroot]["vartree"] = graph_tree
4614 def filtered_tree():
4616 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4617 self._filtered_trees[myroot]["porttree"] = filtered_tree
4619 # Passing in graph_tree as the vartree here could lead to better
4620 # atom selections in some cases by causing atoms for packages that
4621 # have been added to the graph to be preferred over other choices.
4622 # However, it can trigger atom selections that result in
4623 # unresolvable direct circular dependencies. For example, this
4624 # happens with gwydion-dylan which depends on either itself or
4625 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4626 # gwydion-dylan-bin needs to be selected in order to avoid a
4627 # an unresolvable direct circular dependency.
4629 # To solve the problem described above, pass in "graph_db" so that
4630 # packages that have been added to the graph are distinguishable
4631 # from other available packages and installed packages. Also, pass
4632 # the parent package into self._select_atoms() calls so that
4633 # unresolvable direct circular dependencies can be detected and
4634 # avoided when possible.
4635 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4636 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4639 portdb = self.trees[myroot]["porttree"].dbapi
4640 bindb = self.trees[myroot]["bintree"].dbapi
4641 vardb = self.trees[myroot]["vartree"].dbapi
4642 # (db, pkg_type, built, installed, db_keys)
4643 if "--usepkgonly" not in self.myopts:
4644 db_keys = list(portdb._aux_cache_keys)
4645 dbs.append((portdb, "ebuild", False, False, db_keys))
4646 if "--usepkg" in self.myopts:
4647 db_keys = list(bindb._aux_cache_keys)
4648 dbs.append((bindb, "binary", True, False, db_keys))
4649 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4650 dbs.append((vardb, "installed", True, True, db_keys))
4651 self._filtered_trees[myroot]["dbs"] = dbs
4652 if "--usepkg" in self.myopts:
4653 self.trees[myroot]["bintree"].populate(
4654 "--getbinpkg" in self.myopts,
4655 "--getbinpkgonly" in self.myopts)
4658 self.digraph=portage.digraph()
4659 # contains all sets added to the graph
4661 # contains atoms given as arguments
4662 self._sets["args"] = InternalPackageSet()
4663 # contains all atoms from all sets added to the graph, including
4664 # atoms given as arguments
4665 self._set_atoms = InternalPackageSet()
4666 self._atom_arg_map = {}
4667 # contains all nodes pulled in by self._set_atoms
4668 self._set_nodes = set()
4669 # Contains only Blocker -> Uninstall edges
4670 self._blocker_uninstalls = digraph()
4671 # Contains only Package -> Blocker edges
4672 self._blocker_parents = digraph()
4673 # Contains only irrelevant Package -> Blocker edges
4674 self._irrelevant_blockers = digraph()
4675 # Contains only unsolvable Package -> Blocker edges
4676 self._unsolvable_blockers = digraph()
4677 # Contains all Blocker -> Blocked Package edges
4678 self._blocked_pkgs = digraph()
4679 # Contains world packages that have been protected from
4680 # uninstallation but may not have been added to the graph
4681 # if the graph is not complete yet.
4682 self._blocked_world_pkgs = {}
4683 self._slot_collision_info = {}
4684 # Slot collision nodes are not allowed to block other packages since
4685 # blocker validation is only able to account for one package per slot.
4686 self._slot_collision_nodes = set()
4687 self._parent_atoms = {}
4688 self._slot_conflict_parent_atoms = set()
4689 self._serialized_tasks_cache = None
4690 self._scheduler_graph = None
4691 self._displayed_list = None
4692 self._pprovided_args = []
4693 self._missing_args = []
4694 self._masked_installed = set()
4695 self._unsatisfied_deps_for_display = []
4696 self._unsatisfied_blockers_for_display = None
4697 self._circular_deps_for_display = None
4698 self._dep_stack = []
4699 self._unsatisfied_deps = []
4700 self._initially_unsatisfied_deps = []
4701 self._ignored_deps = []
4702 self._required_set_names = set(["system", "world"])
4703 self._select_atoms = self._select_atoms_highest_available
4704 self._select_package = self._select_pkg_highest_available
4705 self._highest_pkg_cache = {}
4707 def _show_slot_collision_notice(self):
4708 """Show an informational message advising the user to mask one of the
4709 the packages. In some cases it may be possible to resolve this
4710 automatically, but support for backtracking (removal nodes that have
4711 already been selected) will be required in order to handle all possible
4715 if not self._slot_collision_info:
4718 self._show_merge_list()
4721 msg.append("\n!!! Multiple package instances within a single " + \
4722 "package slot have been pulled\n")
4723 msg.append("!!! into the dependency graph, resulting" + \
4724 " in a slot conflict:\n\n")
4726 # Max number of parents shown, to avoid flooding the display.
4728 explanation_columns = 70
4730 for (slot_atom, root), slot_nodes \
4731 in self._slot_collision_info.iteritems():
4732 msg.append(str(slot_atom))
4735 for node in slot_nodes:
4737 msg.append(str(node))
4738 parent_atoms = self._parent_atoms.get(node)
4741 # Prefer conflict atoms over others.
4742 for parent_atom in parent_atoms:
4743 if len(pruned_list) >= max_parents:
4745 if parent_atom in self._slot_conflict_parent_atoms:
4746 pruned_list.add(parent_atom)
4748 # If this package was pulled in by conflict atoms then
4749 # show those alone since those are the most interesting.
4751 # When generating the pruned list, prefer instances
4752 # of DependencyArg over instances of Package.
4753 for parent_atom in parent_atoms:
4754 if len(pruned_list) >= max_parents:
4756 parent, atom = parent_atom
4757 if isinstance(parent, DependencyArg):
4758 pruned_list.add(parent_atom)
4759 # Prefer Packages instances that themselves have been
4760 # pulled into collision slots.
4761 for parent_atom in parent_atoms:
4762 if len(pruned_list) >= max_parents:
4764 parent, atom = parent_atom
4765 if isinstance(parent, Package) and \
4766 (parent.slot_atom, parent.root) \
4767 in self._slot_collision_info:
4768 pruned_list.add(parent_atom)
4769 for parent_atom in parent_atoms:
4770 if len(pruned_list) >= max_parents:
4772 pruned_list.add(parent_atom)
4773 omitted_parents = len(parent_atoms) - len(pruned_list)
4774 parent_atoms = pruned_list
4775 msg.append(" pulled in by\n")
4776 for parent_atom in parent_atoms:
4777 parent, atom = parent_atom
4778 msg.append(2*indent)
4779 if isinstance(parent,
4780 (PackageArg, AtomArg)):
4781 # For PackageArg and AtomArg types, it's
4782 # redundant to display the atom attribute.
4783 msg.append(str(parent))
4785 # Display the specific atom from SetArg or
4787 msg.append("%s required by %s" % (atom, parent))
4790 msg.append(2*indent)
4791 msg.append("(and %d more)\n" % omitted_parents)
4793 msg.append(" (no parents)\n")
4795 explanation = self._slot_conflict_explanation(slot_nodes)
4798 msg.append(indent + "Explanation:\n\n")
4799 for line in textwrap.wrap(explanation, explanation_columns):
4800 msg.append(2*indent + line + "\n")
4803 sys.stderr.write("".join(msg))
4806 explanations_for_all = explanations == len(self._slot_collision_info)
4808 if explanations_for_all or "--quiet" in self.myopts:
4812 msg.append("It may be possible to solve this problem ")
4813 msg.append("by using package.mask to prevent one of ")
4814 msg.append("those packages from being selected. ")
4815 msg.append("However, it is also possible that conflicting ")
4816 msg.append("dependencies exist such that they are impossible to ")
4817 msg.append("satisfy simultaneously. If such a conflict exists in ")
4818 msg.append("the dependencies of two different packages, then those ")
4819 msg.append("packages can not be installed simultaneously.")
4821 from formatter import AbstractFormatter, DumbWriter
4822 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4824 f.add_flowing_data(x)
4828 msg.append("For more information, see MASKED PACKAGES ")
4829 msg.append("section in the emerge man page or refer ")
4830 msg.append("to the Gentoo Handbook.")
4832 f.add_flowing_data(x)
4836 def _slot_conflict_explanation(self, slot_nodes):
4838 When a slot conflict occurs due to USE deps, there are a few
4839 different cases to consider:
4841 1) New USE are correctly set but --newuse wasn't requested so an
4842 installed package with incorrect USE happened to get pulled
4843 into graph before the new one.
4845 2) New USE are incorrectly set but an installed package has correct
4846 USE so it got pulled into the graph, and a new instance also got
4847 pulled in due to --newuse or an upgrade.
4849 3) Multiple USE deps exist that can't be satisfied simultaneously,
4850 and multiple package instances got pulled into the same slot to
4851 satisfy the conflicting deps.
4853 Currently, explanations and suggested courses of action are generated
4854 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4857 if len(slot_nodes) != 2:
4858 # Suggestions are only implemented for
4859 # conflicts between two packages.
4862 all_conflict_atoms = self._slot_conflict_parent_atoms
4864 matched_atoms = None
4865 unmatched_node = None
4866 for node in slot_nodes:
4867 parent_atoms = self._parent_atoms.get(node)
4868 if not parent_atoms:
4869 # Normally, there are always parent atoms. If there are
4870 # none then something unexpected is happening and there's
4871 # currently no suggestion for this case.
4873 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4874 for parent_atom in conflict_atoms:
4875 parent, atom = parent_atom
4877 # Suggestions are currently only implemented for cases
4878 # in which all conflict atoms have USE deps.
4881 if matched_node is not None:
4882 # If conflict atoms match multiple nodes
4883 # then there's no suggestion.
4886 matched_atoms = conflict_atoms
4888 if unmatched_node is not None:
4889 # Neither node is matched by conflict atoms, and
4890 # there is no suggestion for this case.
4892 unmatched_node = node
4894 if matched_node is None or unmatched_node is None:
4895 # This shouldn't happen.
4898 if unmatched_node.installed and not matched_node.installed and \
4899 unmatched_node.cpv == matched_node.cpv:
4900 # If the conflicting packages are the same version then
4901 # --newuse should be all that's needed. If they are different
4902 # versions then there's some other problem.
4903 return "New USE are correctly set, but --newuse wasn't" + \
4904 " requested, so an installed package with incorrect USE " + \
4905 "happened to get pulled into the dependency graph. " + \
4906 "In order to solve " + \
4907 "this, either specify the --newuse option or explicitly " + \
4908 " reinstall '%s'." % matched_node.slot_atom
4910 if matched_node.installed and not unmatched_node.installed:
4911 atoms = sorted(set(atom for parent, atom in matched_atoms))
4912 explanation = ("New USE for '%s' are incorrectly set. " + \
4913 "In order to solve this, adjust USE to satisfy '%s'") % \
4914 (matched_node.slot_atom, atoms[0])
4916 for atom in atoms[1:-1]:
4917 explanation += ", '%s'" % (atom,)
4920 explanation += " and '%s'" % (atoms[-1],)
4926 def _process_slot_conflicts(self):
4928 Process slot conflict data to identify specific atoms which
4929 lead to conflict. These atoms only match a subset of the
4930 packages that have been pulled into a given slot.
4932 for (slot_atom, root), slot_nodes \
4933 in self._slot_collision_info.iteritems():
4935 all_parent_atoms = set()
4936 for pkg in slot_nodes:
4937 parent_atoms = self._parent_atoms.get(pkg)
4938 if not parent_atoms:
4940 all_parent_atoms.update(parent_atoms)
4942 for pkg in slot_nodes:
4943 parent_atoms = self._parent_atoms.get(pkg)
4944 if parent_atoms is None:
4945 parent_atoms = set()
4946 self._parent_atoms[pkg] = parent_atoms
4947 for parent_atom in all_parent_atoms:
4948 if parent_atom in parent_atoms:
4950 # Use package set for matching since it will match via
4951 # PROVIDE when necessary, while match_from_list does not.
4952 parent, atom = parent_atom
4953 atom_set = InternalPackageSet(
4954 initial_atoms=(atom,))
4955 if atom_set.findAtomForPackage(pkg):
4956 parent_atoms.add(parent_atom)
4958 self._slot_conflict_parent_atoms.add(parent_atom)
4960 def _reinstall_for_flags(self, forced_flags,
4961 orig_use, orig_iuse, cur_use, cur_iuse):
4962 """Return a set of flags that trigger reinstallation, or None if there
4963 are no such flags."""
4964 if "--newuse" in self.myopts:
4965 flags = set(orig_iuse.symmetric_difference(
4966 cur_iuse).difference(forced_flags))
4967 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4968 cur_iuse.intersection(cur_use)))
4971 elif "changed-use" == self.myopts.get("--reinstall"):
4972 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4973 cur_iuse.intersection(cur_use))
4978 def _create_graph(self, allow_unsatisfied=False):
4979 dep_stack = self._dep_stack
4981 self.spinner.update()
4982 dep = dep_stack.pop()
4983 if isinstance(dep, Package):
4984 if not self._add_pkg_deps(dep,
4985 allow_unsatisfied=allow_unsatisfied):
4988 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4992 def _add_dep(self, dep, allow_unsatisfied=False):
4993 debug = "--debug" in self.myopts
4994 buildpkgonly = "--buildpkgonly" in self.myopts
4995 nodeps = "--nodeps" in self.myopts
4996 empty = "empty" in self.myparams
4997 deep = "deep" in self.myparams
4998 update = "--update" in self.myopts and dep.depth <= 1
5000 if not buildpkgonly and \
5002 dep.parent not in self._slot_collision_nodes:
5003 if dep.parent.onlydeps:
5004 # It's safe to ignore blockers if the
5005 # parent is an --onlydeps node.
5007 # The blocker applies to the root where
5008 # the parent is or will be installed.
5009 blocker = Blocker(atom=dep.atom,
5010 eapi=dep.parent.metadata["EAPI"],
5011 root=dep.parent.root)
5012 self._blocker_parents.add(blocker, dep.parent)
5014 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5015 onlydeps=dep.onlydeps)
5017 if dep.priority.optional:
5018 # This could be an unecessary build-time dep
5019 # pulled in by --with-bdeps=y.
5021 if allow_unsatisfied:
5022 self._unsatisfied_deps.append(dep)
5024 self._unsatisfied_deps_for_display.append(
5025 ((dep.root, dep.atom), {"myparent":dep.parent}))
5027 # In some cases, dep_check will return deps that shouldn't
5028 # be proccessed any further, so they are identified and
5029 # discarded here. Try to discard as few as possible since
5030 # discarded dependencies reduce the amount of information
5031 # available for optimization of merge order.
5032 if dep.priority.satisfied and \
5033 not dep_pkg.installed and \
5034 not (existing_node or empty or deep or update):
5036 if dep.root == self.target_root:
5038 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5039 except StopIteration:
5041 except portage.exception.InvalidDependString:
5042 if not dep_pkg.installed:
5043 # This shouldn't happen since the package
5044 # should have been masked.
5047 self._ignored_deps.append(dep)
5050 if not self._add_pkg(dep_pkg, dep):
5054 def _add_pkg(self, pkg, dep):
5061 myparent = dep.parent
5062 priority = dep.priority
5064 if priority is None:
5065 priority = DepPriority()
5067 Fills the digraph with nodes comprised of packages to merge.
5068 mybigkey is the package spec of the package to merge.
5069 myparent is the package depending on mybigkey ( or None )
5070 addme = Should we add this package to the digraph or are we just looking at it's deps?
5071 Think --onlydeps, we need to ignore packages in that case.
5074 #IUSE-aware emerge -> USE DEP aware depgraph
5075 #"no downgrade" emerge
5077 # Ensure that the dependencies of the same package
5078 # are never processed more than once.
5079 previously_added = pkg in self.digraph
5081 # select the correct /var database that we'll be checking against
5082 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5083 pkgsettings = self.pkgsettings[pkg.root]
5088 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5089 except portage.exception.InvalidDependString, e:
5090 if not pkg.installed:
5091 show_invalid_depstring_notice(
5092 pkg, pkg.metadata["PROVIDE"], str(e))
5096 if not pkg.onlydeps:
5097 if not pkg.installed and \
5098 "empty" not in self.myparams and \
5099 vardbapi.match(pkg.slot_atom):
5100 # Increase the priority of dependencies on packages that
5101 # are being rebuilt. This optimizes merge order so that
5102 # dependencies are rebuilt/updated as soon as possible,
5103 # which is needed especially when emerge is called by
5104 # revdep-rebuild since dependencies may be affected by ABI
5105 # breakage that has rendered them useless. Don't adjust
5106 # priority here when in "empty" mode since all packages
5107 # are being merged in that case.
5108 priority.rebuild = True
5110 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5111 slot_collision = False
5113 existing_node_matches = pkg.cpv == existing_node.cpv
5114 if existing_node_matches and \
5115 pkg != existing_node and \
5116 dep.atom is not None:
5117 # Use package set for matching since it will match via
5118 # PROVIDE when necessary, while match_from_list does not.
5119 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5120 if not atom_set.findAtomForPackage(existing_node):
5121 existing_node_matches = False
5122 if existing_node_matches:
5123 # The existing node can be reused.
5125 for parent_atom in arg_atoms:
5126 parent, atom = parent_atom
5127 self.digraph.add(existing_node, parent,
5129 self._add_parent_atom(existing_node, parent_atom)
5130 # If a direct circular dependency is not an unsatisfied
5131 # buildtime dependency then drop it here since otherwise
5132 # it can skew the merge order calculation in an unwanted
5134 if existing_node != myparent or \
5135 (priority.buildtime and not priority.satisfied):
5136 self.digraph.addnode(existing_node, myparent,
5138 if dep.atom is not None and dep.parent is not None:
5139 self._add_parent_atom(existing_node,
5140 (dep.parent, dep.atom))
5144 # A slot collision has occurred. Sometimes this coincides
5145 # with unresolvable blockers, so the slot collision will be
5146 # shown later if there are no unresolvable blockers.
5147 self._add_slot_conflict(pkg)
5148 slot_collision = True
5151 # Now add this node to the graph so that self.display()
5152 # can show use flags and --tree portage.output. This node is
5153 # only being partially added to the graph. It must not be
5154 # allowed to interfere with the other nodes that have been
5155 # added. Do not overwrite data for existing nodes in
5156 # self.mydbapi since that data will be used for blocker
5158 # Even though the graph is now invalid, continue to process
5159 # dependencies so that things like --fetchonly can still
5160 # function despite collisions.
5162 elif not previously_added:
5163 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5164 self.mydbapi[pkg.root].cpv_inject(pkg)
5165 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5167 if not pkg.installed:
5168 # Allow this package to satisfy old-style virtuals in case it
5169 # doesn't already. Any pre-existing providers will be preferred
5172 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5173 # For consistency, also update the global virtuals.
5174 settings = self.roots[pkg.root].settings
5176 settings.setinst(pkg.cpv, pkg.metadata)
5178 except portage.exception.InvalidDependString, e:
5179 show_invalid_depstring_notice(
5180 pkg, pkg.metadata["PROVIDE"], str(e))
5185 self._set_nodes.add(pkg)
5187 # Do this even when addme is False (--onlydeps) so that the
5188 # parent/child relationship is always known in case
5189 # self._show_slot_collision_notice() needs to be called later.
5190 self.digraph.add(pkg, myparent, priority=priority)
5191 if dep.atom is not None and dep.parent is not None:
5192 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5195 for parent_atom in arg_atoms:
5196 parent, atom = parent_atom
5197 self.digraph.add(pkg, parent, priority=priority)
5198 self._add_parent_atom(pkg, parent_atom)
5200 """ This section determines whether we go deeper into dependencies or not.
5201 We want to go deeper on a few occasions:
5202 Installing package A, we need to make sure package A's deps are met.
5203 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5204 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5206 dep_stack = self._dep_stack
5207 if "recurse" not in self.myparams:
5209 elif pkg.installed and \
5210 "deep" not in self.myparams:
5211 dep_stack = self._ignored_deps
5213 self.spinner.update()
5218 if not previously_added:
5219 dep_stack.append(pkg)
5222 def _add_parent_atom(self, pkg, parent_atom):
5223 parent_atoms = self._parent_atoms.get(pkg)
5224 if parent_atoms is None:
5225 parent_atoms = set()
5226 self._parent_atoms[pkg] = parent_atoms
5227 parent_atoms.add(parent_atom)
5229 def _add_slot_conflict(self, pkg):
5230 self._slot_collision_nodes.add(pkg)
5231 slot_key = (pkg.slot_atom, pkg.root)
5232 slot_nodes = self._slot_collision_info.get(slot_key)
5233 if slot_nodes is None:
5235 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5236 self._slot_collision_info[slot_key] = slot_nodes
5239 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5241 mytype = pkg.type_name
5244 metadata = pkg.metadata
5245 myuse = pkg.use.enabled
5247 depth = pkg.depth + 1
5248 removal_action = "remove" in self.myparams
5251 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5253 edepend[k] = metadata[k]
5255 if not pkg.built and \
5256 "--buildpkgonly" in self.myopts and \
5257 "deep" not in self.myparams and \
5258 "empty" not in self.myparams:
5259 edepend["RDEPEND"] = ""
5260 edepend["PDEPEND"] = ""
5261 bdeps_optional = False
5263 if pkg.built and not removal_action:
5264 if self.myopts.get("--with-bdeps", "n") == "y":
5265 # Pull in build time deps as requested, but marked them as
5266 # "optional" since they are not strictly required. This allows
5267 # more freedom in the merge order calculation for solving
5268 # circular dependencies. Don't convert to PDEPEND since that
5269 # could make --with-bdeps=y less effective if it is used to
5270 # adjust merge order to prevent built_with_use() calls from
5272 bdeps_optional = True
5274 # built packages do not have build time dependencies.
5275 edepend["DEPEND"] = ""
5277 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5278 edepend["DEPEND"] = ""
5281 root_deps = self.myopts.get("--root-deps")
5282 if root_deps is not None:
5283 if root_deps is True:
5285 elif root_deps == "rdeps":
5286 edepend["DEPEND"] = ""
5289 (bdeps_root, edepend["DEPEND"],
5290 self._priority(buildtime=(not bdeps_optional),
5291 optional=bdeps_optional)),
5292 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5293 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5296 debug = "--debug" in self.myopts
5297 strict = mytype != "installed"
5299 for dep_root, dep_string, dep_priority in deps:
5304 print "Parent: ", jbigkey
5305 print "Depstring:", dep_string
5306 print "Priority:", dep_priority
5307 vardb = self.roots[dep_root].trees["vartree"].dbapi
5309 selected_atoms = self._select_atoms(dep_root,
5310 dep_string, myuse=myuse, parent=pkg, strict=strict,
5311 priority=dep_priority)
5312 except portage.exception.InvalidDependString, e:
5313 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5316 print "Candidates:", selected_atoms
5318 for atom in selected_atoms:
5321 atom = portage.dep.Atom(atom)
5323 mypriority = dep_priority.copy()
5324 if not atom.blocker and vardb.match(atom):
5325 mypriority.satisfied = True
5327 if not self._add_dep(Dependency(atom=atom,
5328 blocker=atom.blocker, depth=depth, parent=pkg,
5329 priority=mypriority, root=dep_root),
5330 allow_unsatisfied=allow_unsatisfied):
5333 except portage.exception.InvalidAtom, e:
5334 show_invalid_depstring_notice(
5335 pkg, dep_string, str(e))
5337 if not pkg.installed:
5341 print "Exiting...", jbigkey
5342 except portage.exception.AmbiguousPackageName, e:
5344 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5345 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5347 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5348 portage.writemsg("\n", noiselevel=-1)
5349 if mytype == "binary":
5351 "!!! This binary package cannot be installed: '%s'\n" % \
5352 mykey, noiselevel=-1)
5353 elif mytype == "ebuild":
5354 portdb = self.roots[myroot].trees["porttree"].dbapi
5355 myebuild, mylocation = portdb.findname2(mykey)
5356 portage.writemsg("!!! This ebuild cannot be installed: " + \
5357 "'%s'\n" % myebuild, noiselevel=-1)
5358 portage.writemsg("!!! Please notify the package maintainer " + \
5359 "that atoms must be fully-qualified.\n", noiselevel=-1)
5363 def _priority(self, **kwargs):
5364 if "remove" in self.myparams:
5365 priority_constructor = UnmergeDepPriority
5367 priority_constructor = DepPriority
5368 return priority_constructor(**kwargs)
5370 def _dep_expand(self, root_config, atom_without_category):
5372 @param root_config: a root config instance
5373 @type root_config: RootConfig
5374 @param atom_without_category: an atom without a category component
5375 @type atom_without_category: String
5377 @returns: a list of atoms containing categories (possibly empty)
5379 null_cp = portage.dep_getkey(insert_category_into_atom(
5380 atom_without_category, "null"))
5381 cat, atom_pn = portage.catsplit(null_cp)
5383 dbs = self._filtered_trees[root_config.root]["dbs"]
5385 for db, pkg_type, built, installed, db_keys in dbs:
5386 for cat in db.categories:
5387 if db.cp_list("%s/%s" % (cat, atom_pn)):
5391 for cat in categories:
5392 deps.append(insert_category_into_atom(
5393 atom_without_category, cat))
5396 def _have_new_virt(self, root, atom_cp):
5398 for db, pkg_type, built, installed, db_keys in \
5399 self._filtered_trees[root]["dbs"]:
5400 if db.cp_list(atom_cp):
5405 def _iter_atoms_for_pkg(self, pkg):
5406 # TODO: add multiple $ROOT support
5407 if pkg.root != self.target_root:
5409 atom_arg_map = self._atom_arg_map
5410 root_config = self.roots[pkg.root]
5411 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5412 atom_cp = portage.dep_getkey(atom)
5413 if atom_cp != pkg.cp and \
5414 self._have_new_virt(pkg.root, atom_cp):
5416 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5417 visible_pkgs.reverse() # descending order
5419 for visible_pkg in visible_pkgs:
5420 if visible_pkg.cp != atom_cp:
5422 if pkg >= visible_pkg:
5423 # This is descending order, and we're not
5424 # interested in any versions <= pkg given.
5426 if pkg.slot_atom != visible_pkg.slot_atom:
5427 higher_slot = visible_pkg
5429 if higher_slot is not None:
5431 for arg in atom_arg_map[(atom, pkg.root)]:
5432 if isinstance(arg, PackageArg) and \
5437 def select_files(self, myfiles):
5438 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5439 appropriate depgraph and return a favorite list."""
5440 debug = "--debug" in self.myopts
5441 root_config = self.roots[self.target_root]
5442 sets = root_config.sets
5443 getSetAtoms = root_config.setconfig.getSetAtoms
5445 myroot = self.target_root
5446 dbs = self._filtered_trees[myroot]["dbs"]
5447 vardb = self.trees[myroot]["vartree"].dbapi
5448 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5449 portdb = self.trees[myroot]["porttree"].dbapi
5450 bindb = self.trees[myroot]["bintree"].dbapi
5451 pkgsettings = self.pkgsettings[myroot]
5453 onlydeps = "--onlydeps" in self.myopts
5456 ext = os.path.splitext(x)[1]
5458 if not os.path.exists(x):
5460 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5461 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5462 elif os.path.exists(
5463 os.path.join(pkgsettings["PKGDIR"], x)):
5464 x = os.path.join(pkgsettings["PKGDIR"], x)
5466 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5467 print "!!! Please ensure the tbz2 exists as specified.\n"
5468 return 0, myfavorites
5469 mytbz2=portage.xpak.tbz2(x)
5470 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5471 if os.path.realpath(x) != \
5472 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5473 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5474 return 0, myfavorites
5475 db_keys = list(bindb._aux_cache_keys)
5476 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5477 pkg = Package(type_name="binary", root_config=root_config,
5478 cpv=mykey, built=True, metadata=metadata,
5480 self._pkg_cache[pkg] = pkg
5481 args.append(PackageArg(arg=x, package=pkg,
5482 root_config=root_config))
5483 elif ext==".ebuild":
5484 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5485 pkgdir = os.path.dirname(ebuild_path)
5486 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5487 cp = pkgdir[len(tree_root)+1:]
5488 e = portage.exception.PackageNotFound(
5489 ("%s is not in a valid portage tree " + \
5490 "hierarchy or does not exist") % x)
5491 if not portage.isvalidatom(cp):
5493 cat = portage.catsplit(cp)[0]
5494 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5495 if not portage.isvalidatom("="+mykey):
5497 ebuild_path = portdb.findname(mykey)
5499 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5500 cp, os.path.basename(ebuild_path)):
5501 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5502 return 0, myfavorites
5503 if mykey not in portdb.xmatch(
5504 "match-visible", portage.dep_getkey(mykey)):
5505 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5506 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5507 print colorize("BAD", "*** page for details.")
5508 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5511 raise portage.exception.PackageNotFound(
5512 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5513 db_keys = list(portdb._aux_cache_keys)
5514 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5515 pkg = Package(type_name="ebuild", root_config=root_config,
5516 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5517 pkgsettings.setcpv(pkg)
5518 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5519 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5520 self._pkg_cache[pkg] = pkg
5521 args.append(PackageArg(arg=x, package=pkg,
5522 root_config=root_config))
5523 elif x.startswith(os.path.sep):
5524 if not x.startswith(myroot):
5525 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5526 " $ROOT.\n") % x, noiselevel=-1)
5528 # Queue these up since it's most efficient to handle
5529 # multiple files in a single iter_owners() call.
5530 lookup_owners.append(x)
5532 if x in ("system", "world"):
5534 if x.startswith(SETPREFIX):
5535 s = x[len(SETPREFIX):]
5537 raise portage.exception.PackageSetNotFound(s)
5540 # Recursively expand sets so that containment tests in
5541 # self._get_parent_sets() properly match atoms in nested
5542 # sets (like if world contains system).
5543 expanded_set = InternalPackageSet(
5544 initial_atoms=getSetAtoms(s))
5545 self._sets[s] = expanded_set
5546 args.append(SetArg(arg=x, set=expanded_set,
5547 root_config=root_config))
5549 if not is_valid_package_atom(x):
5550 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5552 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5553 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5555 # Don't expand categories or old-style virtuals here unless
5556 # necessary. Expansion of old-style virtuals here causes at
5557 # least the following problems:
5558 # 1) It's more difficult to determine which set(s) an atom
5559 # came from, if any.
5560 # 2) It takes away freedom from the resolver to choose other
5561 # possible expansions when necessary.
5563 args.append(AtomArg(arg=x, atom=x,
5564 root_config=root_config))
5566 expanded_atoms = self._dep_expand(root_config, x)
5567 installed_cp_set = set()
5568 for atom in expanded_atoms:
5569 atom_cp = portage.dep_getkey(atom)
5570 if vardb.cp_list(atom_cp):
5571 installed_cp_set.add(atom_cp)
5572 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5573 installed_cp = iter(installed_cp_set).next()
5574 expanded_atoms = [atom for atom in expanded_atoms \
5575 if portage.dep_getkey(atom) == installed_cp]
5577 if len(expanded_atoms) > 1:
5580 ambiguous_package_name(x, expanded_atoms, root_config,
5581 self.spinner, self.myopts)
5582 return False, myfavorites
5584 atom = expanded_atoms[0]
5586 null_atom = insert_category_into_atom(x, "null")
5587 null_cp = portage.dep_getkey(null_atom)
5588 cat, atom_pn = portage.catsplit(null_cp)
5589 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5591 # Allow the depgraph to choose which virtual.
5592 atom = insert_category_into_atom(x, "virtual")
5594 atom = insert_category_into_atom(x, "null")
5596 args.append(AtomArg(arg=x, atom=atom,
5597 root_config=root_config))
5601 search_for_multiple = False
5602 if len(lookup_owners) > 1:
5603 search_for_multiple = True
5605 for x in lookup_owners:
5606 if not search_for_multiple and os.path.isdir(x):
5607 search_for_multiple = True
5608 relative_paths.append(x[len(myroot):])
5611 for pkg, relative_path in \
5612 real_vardb._owners.iter_owners(relative_paths):
5613 owners.add(pkg.mycpv)
5614 if not search_for_multiple:
5618 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5619 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5623 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5625 # portage now masks packages with missing slot, but it's
5626 # possible that one was installed by an older version
5627 atom = portage.cpv_getkey(cpv)
5629 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5630 args.append(AtomArg(arg=atom, atom=atom,
5631 root_config=root_config))
5633 if "--update" in self.myopts:
5634 # In some cases, the greedy slots behavior can pull in a slot that
5635 # the user would want to uninstall due to it being blocked by a
5636 # newer version in a different slot. Therefore, it's necessary to
5637 # detect and discard any that should be uninstalled. Each time
5638 # that arguments are updated, package selections are repeated in
5639 # order to ensure consistency with the current arguments:
5641 # 1) Initialize args
5642 # 2) Select packages and generate initial greedy atoms
5643 # 3) Update args with greedy atoms
5644 # 4) Select packages and generate greedy atoms again, while
5645 # accounting for any blockers between selected packages
5646 # 5) Update args with revised greedy atoms
5648 self._set_args(args)
5651 greedy_args.append(arg)
5652 if not isinstance(arg, AtomArg):
5654 for atom in self._greedy_slots(arg.root_config, arg.atom):
5656 AtomArg(arg=arg.arg, atom=atom,
5657 root_config=arg.root_config))
5659 self._set_args(greedy_args)
5662 # Revise greedy atoms, accounting for any blockers
5663 # between selected packages.
5664 revised_greedy_args = []
5666 revised_greedy_args.append(arg)
5667 if not isinstance(arg, AtomArg):
5669 for atom in self._greedy_slots(arg.root_config, arg.atom,
5670 blocker_lookahead=True):
5671 revised_greedy_args.append(
5672 AtomArg(arg=arg.arg, atom=atom,
5673 root_config=arg.root_config))
5674 args = revised_greedy_args
5675 del revised_greedy_args
5677 self._set_args(args)
5679 myfavorites = set(myfavorites)
5681 if isinstance(arg, (AtomArg, PackageArg)):
5682 myfavorites.add(arg.atom)
5683 elif isinstance(arg, SetArg):
5684 myfavorites.add(arg.arg)
5685 myfavorites = list(myfavorites)
5687 pprovideddict = pkgsettings.pprovideddict
5689 portage.writemsg("\n", noiselevel=-1)
5690 # Order needs to be preserved since a feature of --nodeps
5691 # is to allow the user to force a specific merge order.
5695 for atom in arg.set:
5696 self.spinner.update()
5697 dep = Dependency(atom=atom, onlydeps=onlydeps,
5698 root=myroot, parent=arg)
5699 atom_cp = portage.dep_getkey(atom)
5701 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5702 if pprovided and portage.match_from_list(atom, pprovided):
5703 # A provided package has been specified on the command line.
5704 self._pprovided_args.append((arg, atom))
5706 if isinstance(arg, PackageArg):
5707 if not self._add_pkg(arg.package, dep) or \
5708 not self._create_graph():
5709 sys.stderr.write(("\n\n!!! Problem resolving " + \
5710 "dependencies for %s\n") % arg.arg)
5711 return 0, myfavorites
5714 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5715 (arg, atom), noiselevel=-1)
5716 pkg, existing_node = self._select_package(
5717 myroot, atom, onlydeps=onlydeps)
5719 if not (isinstance(arg, SetArg) and \
5720 arg.name in ("system", "world")):
5721 self._unsatisfied_deps_for_display.append(
5722 ((myroot, atom), {}))
5723 return 0, myfavorites
5724 self._missing_args.append((arg, atom))
5726 if atom_cp != pkg.cp:
5727 # For old-style virtuals, we need to repeat the
5728 # package.provided check against the selected package.
5729 expanded_atom = atom.replace(atom_cp, pkg.cp)
5730 pprovided = pprovideddict.get(pkg.cp)
5732 portage.match_from_list(expanded_atom, pprovided):
5733 # A provided package has been
5734 # specified on the command line.
5735 self._pprovided_args.append((arg, atom))
5737 if pkg.installed and "selective" not in self.myparams:
5738 self._unsatisfied_deps_for_display.append(
5739 ((myroot, atom), {}))
5740 # Previous behavior was to bail out in this case, but
5741 # since the dep is satisfied by the installed package,
5742 # it's more friendly to continue building the graph
5743 # and just show a warning message. Therefore, only bail
5744 # out here if the atom is not from either the system or
5746 if not (isinstance(arg, SetArg) and \
5747 arg.name in ("system", "world")):
5748 return 0, myfavorites
5750 # Add the selected package to the graph as soon as possible
5751 # so that later dep_check() calls can use it as feedback
5752 # for making more consistent atom selections.
5753 if not self._add_pkg(pkg, dep):
5754 if isinstance(arg, SetArg):
5755 sys.stderr.write(("\n\n!!! Problem resolving " + \
5756 "dependencies for %s from %s\n") % \
5759 sys.stderr.write(("\n\n!!! Problem resolving " + \
5760 "dependencies for %s\n") % atom)
5761 return 0, myfavorites
5763 except portage.exception.MissingSignature, e:
5764 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5765 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5766 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5767 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5768 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5769 return 0, myfavorites
5770 except portage.exception.InvalidSignature, e:
5771 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5772 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5773 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5774 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5775 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5776 return 0, myfavorites
5777 except SystemExit, e:
5778 raise # Needed else can't exit
5779 except Exception, e:
5780 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5781 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5784 # Now that the root packages have been added to the graph,
5785 # process the dependencies.
5786 if not self._create_graph():
5787 return 0, myfavorites
5790 if "--usepkgonly" in self.myopts:
5791 for xs in self.digraph.all_nodes():
5792 if not isinstance(xs, Package):
5794 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5798 print "Missing binary for:",xs[2]
5802 except self._unknown_internal_error:
5803 return False, myfavorites
5805 # We're true here unless we are missing binaries.
5806 return (not missing,myfavorites)
5808 def _set_args(self, args):
5810 Create the "args" package set from atoms and packages given as
5811 arguments. This method can be called multiple times if necessary.
5812 The package selection cache is automatically invalidated, since
5813 arguments influence package selections.
5815 args_set = self._sets["args"]
5818 if not isinstance(arg, (AtomArg, PackageArg)):
5821 if atom in args_set:
5825 self._set_atoms.clear()
5826 self._set_atoms.update(chain(*self._sets.itervalues()))
5827 atom_arg_map = self._atom_arg_map
5828 atom_arg_map.clear()
5830 for atom in arg.set:
5831 atom_key = (atom, arg.root_config.root)
5832 refs = atom_arg_map.get(atom_key)
5835 atom_arg_map[atom_key] = refs
5839 # Invalidate the package selection cache, since
5840 # arguments influence package selections.
5841 self._highest_pkg_cache.clear()
5842 for trees in self._filtered_trees.itervalues():
5843 trees["porttree"].dbapi._clear_cache()
5845 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5847 Return a list of slot atoms corresponding to installed slots that
5848 differ from the slot of the highest visible match. When
5849 blocker_lookahead is True, slot atoms that would trigger a blocker
5850 conflict are automatically discarded, potentially allowing automatic
5851 uninstallation of older slots when appropriate.
5853 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5854 if highest_pkg is None:
5856 vardb = root_config.trees["vartree"].dbapi
5858 for cpv in vardb.match(atom):
5859 # don't mix new virtuals with old virtuals
5860 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5861 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5863 slots.add(highest_pkg.metadata["SLOT"])
5867 slots.remove(highest_pkg.metadata["SLOT"])
5870 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5871 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5872 if pkg is not None and \
5873 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5874 greedy_pkgs.append(pkg)
5877 if not blocker_lookahead:
5878 return [pkg.slot_atom for pkg in greedy_pkgs]
5881 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5882 for pkg in greedy_pkgs + [highest_pkg]:
5883 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5885 atoms = self._select_atoms(
5886 pkg.root, dep_str, pkg.use.enabled,
5887 parent=pkg, strict=True)
5888 except portage.exception.InvalidDependString:
5890 blocker_atoms = (x for x in atoms if x.blocker)
5891 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5893 if highest_pkg not in blockers:
5896 # filter packages with invalid deps
5897 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5899 # filter packages that conflict with highest_pkg
5900 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5901 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5902 blockers[pkg].findAtomForPackage(highest_pkg))]
5907 # If two packages conflict, discard the lower version.
5908 discard_pkgs = set()
5909 greedy_pkgs.sort(reverse=True)
5910 for i in xrange(len(greedy_pkgs) - 1):
5911 pkg1 = greedy_pkgs[i]
5912 if pkg1 in discard_pkgs:
5914 for j in xrange(i + 1, len(greedy_pkgs)):
5915 pkg2 = greedy_pkgs[j]
5916 if pkg2 in discard_pkgs:
5918 if blockers[pkg1].findAtomForPackage(pkg2) or \
5919 blockers[pkg2].findAtomForPackage(pkg1):
5921 discard_pkgs.add(pkg2)
5923 return [pkg.slot_atom for pkg in greedy_pkgs \
5924 if pkg not in discard_pkgs]
5926 def _select_atoms_from_graph(self, *pargs, **kwargs):
5928 Prefer atoms matching packages that have already been
5929 added to the graph or those that are installed and have
5930 not been scheduled for replacement.
5932 kwargs["trees"] = self._graph_trees
5933 return self._select_atoms_highest_available(*pargs, **kwargs)
5935 def _select_atoms_highest_available(self, root, depstring,
5936 myuse=None, parent=None, strict=True, trees=None, priority=None):
5937 """This will raise InvalidDependString if necessary. If trees is
5938 None then self._filtered_trees is used."""
5939 pkgsettings = self.pkgsettings[root]
5941 trees = self._filtered_trees
5942 if not getattr(priority, "buildtime", False):
5943 # The parent should only be passed to dep_check() for buildtime
5944 # dependencies since that's the only case when it's appropriate
5945 # to trigger the circular dependency avoidance code which uses it.
5946 # It's important not to trigger the same circular dependency
5947 # avoidance code for runtime dependencies since it's not needed
5948 # and it can promote an incorrect package choice.
5952 if parent is not None:
5953 trees[root]["parent"] = parent
5955 portage.dep._dep_check_strict = False
5956 mycheck = portage.dep_check(depstring, None,
5957 pkgsettings, myuse=myuse,
5958 myroot=root, trees=trees)
5960 if parent is not None:
5961 trees[root].pop("parent")
5962 portage.dep._dep_check_strict = True
5964 raise portage.exception.InvalidDependString(mycheck[1])
5965 selected_atoms = mycheck[1]
5966 return selected_atoms
5968 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5969 atom = portage.dep.Atom(atom)
5970 atom_set = InternalPackageSet(initial_atoms=(atom,))
5971 atom_without_use = atom
5973 atom_without_use = portage.dep.remove_slot(atom)
5975 atom_without_use += ":" + atom.slot
5976 atom_without_use = portage.dep.Atom(atom_without_use)
5977 xinfo = '"%s"' % atom
5980 # Discard null/ from failed cpv_expand category expansion.
5981 xinfo = xinfo.replace("null/", "")
5982 masked_packages = []
5984 masked_pkg_instances = set()
5985 missing_licenses = []
5986 have_eapi_mask = False
5987 pkgsettings = self.pkgsettings[root]
5988 implicit_iuse = pkgsettings._get_implicit_iuse()
5989 root_config = self.roots[root]
5990 portdb = self.roots[root].trees["porttree"].dbapi
5991 dbs = self._filtered_trees[root]["dbs"]
5992 for db, pkg_type, built, installed, db_keys in dbs:
5996 if hasattr(db, "xmatch"):
5997 cpv_list = db.xmatch("match-all", atom_without_use)
5999 cpv_list = db.match(atom_without_use)
6002 for cpv in cpv_list:
6003 metadata, mreasons = get_mask_info(root_config, cpv,
6004 pkgsettings, db, pkg_type, built, installed, db_keys)
6005 if metadata is not None:
6006 pkg = Package(built=built, cpv=cpv,
6007 installed=installed, metadata=metadata,
6008 root_config=root_config)
6009 if pkg.cp != atom.cp:
6010 # A cpv can be returned from dbapi.match() as an
6011 # old-style virtual match even in cases when the
6012 # package does not actually PROVIDE the virtual.
6013 # Filter out any such false matches here.
6014 if not atom_set.findAtomForPackage(pkg):
6017 masked_pkg_instances.add(pkg)
6019 missing_use.append(pkg)
6022 masked_packages.append(
6023 (root_config, pkgsettings, cpv, metadata, mreasons))
6025 missing_use_reasons = []
6026 missing_iuse_reasons = []
6027 for pkg in missing_use:
6028 use = pkg.use.enabled
6029 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6030 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6032 for x in atom.use.required:
6033 if iuse_re.match(x) is None:
6034 missing_iuse.append(x)
6037 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6038 missing_iuse_reasons.append((pkg, mreasons))
6040 need_enable = sorted(atom.use.enabled.difference(use))
6041 need_disable = sorted(atom.use.disabled.intersection(use))
6042 if need_enable or need_disable:
6044 changes.extend(colorize("red", "+" + x) \
6045 for x in need_enable)
6046 changes.extend(colorize("blue", "-" + x) \
6047 for x in need_disable)
6048 mreasons.append("Change USE: %s" % " ".join(changes))
6049 missing_use_reasons.append((pkg, mreasons))
6051 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6052 in missing_use_reasons if pkg not in masked_pkg_instances]
6054 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6055 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6057 show_missing_use = False
6058 if unmasked_use_reasons:
6059 # Only show the latest version.
6060 show_missing_use = unmasked_use_reasons[:1]
6061 elif unmasked_iuse_reasons:
6062 if missing_use_reasons:
6063 # All packages with required IUSE are masked,
6064 # so display a normal masking message.
6067 show_missing_use = unmasked_iuse_reasons
6069 if show_missing_use:
6070 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6071 print "!!! One of the following packages is required to complete your request:"
6072 for pkg, mreasons in show_missing_use:
6073 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6075 elif masked_packages:
6077 colorize("BAD", "All ebuilds that could satisfy ") + \
6078 colorize("INFORM", xinfo) + \
6079 colorize("BAD", " have been masked.")
6080 print "!!! One of the following masked packages is required to complete your request:"
6081 have_eapi_mask = show_masked_packages(masked_packages)
6084 msg = ("The current version of portage supports " + \
6085 "EAPI '%s'. You must upgrade to a newer version" + \
6086 " of portage before EAPI masked packages can" + \
6087 " be installed.") % portage.const.EAPI
6088 from textwrap import wrap
6089 for line in wrap(msg, 75):
6094 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6096 # Show parent nodes and the argument that pulled them in.
6097 traversed_nodes = set()
6100 while node is not None:
6101 traversed_nodes.add(node)
6102 msg.append('(dependency required by "%s" [%s])' % \
6103 (colorize('INFORM', str(node.cpv)), node.type_name))
6104 # When traversing to parents, prefer arguments over packages
6105 # since arguments are root nodes. Never traverse the same
6106 # package twice, in order to prevent an infinite loop.
6107 selected_parent = None
6108 for parent in self.digraph.parent_nodes(node):
6109 if isinstance(parent, DependencyArg):
6110 msg.append('(dependency required by "%s" [argument])' % \
6111 (colorize('INFORM', str(parent))))
6112 selected_parent = None
6114 if parent not in traversed_nodes:
6115 selected_parent = parent
6116 node = selected_parent
6122 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6123 cache_key = (root, atom, onlydeps)
6124 ret = self._highest_pkg_cache.get(cache_key)
6127 if pkg and not existing:
6128 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6129 if existing and existing == pkg:
6130 # Update the cache to reflect that the
6131 # package has been added to the graph.
6133 self._highest_pkg_cache[cache_key] = ret
6135 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6136 self._highest_pkg_cache[cache_key] = ret
6139 settings = pkg.root_config.settings
6140 if visible(settings, pkg) and not (pkg.installed and \
6141 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6142 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6145 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6146 root_config = self.roots[root]
6147 pkgsettings = self.pkgsettings[root]
6148 dbs = self._filtered_trees[root]["dbs"]
6149 vardb = self.roots[root].trees["vartree"].dbapi
6150 portdb = self.roots[root].trees["porttree"].dbapi
6151 # List of acceptable packages, ordered by type preference.
6152 matched_packages = []
6153 highest_version = None
6154 if not isinstance(atom, portage.dep.Atom):
6155 atom = portage.dep.Atom(atom)
6157 atom_set = InternalPackageSet(initial_atoms=(atom,))
6158 existing_node = None
6160 usepkgonly = "--usepkgonly" in self.myopts
6161 empty = "empty" in self.myparams
6162 selective = "selective" in self.myparams
6164 noreplace = "--noreplace" in self.myopts
6165 # Behavior of the "selective" parameter depends on
6166 # whether or not a package matches an argument atom.
6167 # If an installed package provides an old-style
6168 # virtual that is no longer provided by an available
6169 # package, the installed package may match an argument
6170 # atom even though none of the available packages do.
6171 # Therefore, "selective" logic does not consider
6172 # whether or not an installed package matches an
6173 # argument atom. It only considers whether or not
6174 # available packages match argument atoms, which is
6175 # represented by the found_available_arg flag.
6176 found_available_arg = False
6177 for find_existing_node in True, False:
6180 for db, pkg_type, built, installed, db_keys in dbs:
6183 if installed and not find_existing_node:
6184 want_reinstall = reinstall or empty or \
6185 (found_available_arg and not selective)
6186 if want_reinstall and matched_packages:
6188 if hasattr(db, "xmatch"):
6189 cpv_list = db.xmatch("match-all", atom)
6191 cpv_list = db.match(atom)
6193 # USE=multislot can make an installed package appear as if
6194 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6195 # won't do any good as long as USE=multislot is enabled since
6196 # the newly built package still won't have the expected slot.
6197 # Therefore, assume that such SLOT dependencies are already
6198 # satisfied rather than forcing a rebuild.
6199 if installed and not cpv_list and atom.slot:
6200 for cpv in db.match(atom.cp):
6201 slot_available = False
6202 for other_db, other_type, other_built, \
6203 other_installed, other_keys in dbs:
6206 other_db.aux_get(cpv, ["SLOT"])[0]:
6207 slot_available = True
6211 if not slot_available:
6213 inst_pkg = self._pkg(cpv, "installed",
6214 root_config, installed=installed)
6215 # Remove the slot from the atom and verify that
6216 # the package matches the resulting atom.
6217 atom_without_slot = portage.dep.remove_slot(atom)
6219 atom_without_slot += str(atom.use)
6220 atom_without_slot = portage.dep.Atom(atom_without_slot)
6221 if portage.match_from_list(
6222 atom_without_slot, [inst_pkg]):
6223 cpv_list = [inst_pkg.cpv]
6228 pkg_status = "merge"
6229 if installed or onlydeps:
6230 pkg_status = "nomerge"
6233 for cpv in cpv_list:
6234 # Make --noreplace take precedence over --newuse.
6235 if not installed and noreplace and \
6236 cpv in vardb.match(atom):
6237 # If the installed version is masked, it may
6238 # be necessary to look at lower versions,
6239 # in case there is a visible downgrade.
6241 reinstall_for_flags = None
6242 cache_key = (pkg_type, root, cpv, pkg_status)
6243 calculated_use = True
6244 pkg = self._pkg_cache.get(cache_key)
6246 calculated_use = False
6248 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6251 pkg = Package(built=built, cpv=cpv,
6252 installed=installed, metadata=metadata,
6253 onlydeps=onlydeps, root_config=root_config,
6255 metadata = pkg.metadata
6257 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6258 if not built and ("?" in metadata["LICENSE"] or \
6259 "?" in metadata["PROVIDE"]):
6260 # This is avoided whenever possible because
6261 # it's expensive. It only needs to be done here
6262 # if it has an effect on visibility.
6263 pkgsettings.setcpv(pkg)
6264 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6265 calculated_use = True
6266 self._pkg_cache[pkg] = pkg
6268 if not installed or (built and matched_packages):
6269 # Only enforce visibility on installed packages
6270 # if there is at least one other visible package
6271 # available. By filtering installed masked packages
6272 # here, packages that have been masked since they
6273 # were installed can be automatically downgraded
6274 # to an unmasked version.
6276 if not visible(pkgsettings, pkg):
6278 except portage.exception.InvalidDependString:
6282 # Enable upgrade or downgrade to a version
6283 # with visible KEYWORDS when the installed
6284 # version is masked by KEYWORDS, but never
6285 # reinstall the same exact version only due
6286 # to a KEYWORDS mask.
6287 if built and matched_packages:
6289 different_version = None
6290 for avail_pkg in matched_packages:
6291 if not portage.dep.cpvequal(
6292 pkg.cpv, avail_pkg.cpv):
6293 different_version = avail_pkg
6295 if different_version is not None:
6298 pkgsettings._getMissingKeywords(
6299 pkg.cpv, pkg.metadata):
6302 # If the ebuild no longer exists or it's
6303 # keywords have been dropped, reject built
6304 # instances (installed or binary).
6305 # If --usepkgonly is enabled, assume that
6306 # the ebuild status should be ignored.
6310 pkg.cpv, "ebuild", root_config)
6311 except portage.exception.PackageNotFound:
6314 if not visible(pkgsettings, pkg_eb):
6317 if not pkg.built and not calculated_use:
6318 # This is avoided whenever possible because
6320 pkgsettings.setcpv(pkg)
6321 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6323 if pkg.cp != atom.cp:
6324 # A cpv can be returned from dbapi.match() as an
6325 # old-style virtual match even in cases when the
6326 # package does not actually PROVIDE the virtual.
6327 # Filter out any such false matches here.
6328 if not atom_set.findAtomForPackage(pkg):
6332 if root == self.target_root:
6334 # Ebuild USE must have been calculated prior
6335 # to this point, in case atoms have USE deps.
6336 myarg = self._iter_atoms_for_pkg(pkg).next()
6337 except StopIteration:
6339 except portage.exception.InvalidDependString:
6341 # masked by corruption
6343 if not installed and myarg:
6344 found_available_arg = True
6346 if atom.use and not pkg.built:
6347 use = pkg.use.enabled
6348 if atom.use.enabled.difference(use):
6350 if atom.use.disabled.intersection(use):
6352 if pkg.cp == atom_cp:
6353 if highest_version is None:
6354 highest_version = pkg
6355 elif pkg > highest_version:
6356 highest_version = pkg
6357 # At this point, we've found the highest visible
6358 # match from the current repo. Any lower versions
6359 # from this repo are ignored, so this so the loop
6360 # will always end with a break statement below
6362 if find_existing_node:
6363 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6366 if portage.dep.match_from_list(atom, [e_pkg]):
6367 if highest_version and \
6368 e_pkg.cp == atom_cp and \
6369 e_pkg < highest_version and \
6370 e_pkg.slot_atom != highest_version.slot_atom:
6371 # There is a higher version available in a
6372 # different slot, so this existing node is
6376 matched_packages.append(e_pkg)
6377 existing_node = e_pkg
6379 # Compare built package to current config and
6380 # reject the built package if necessary.
6381 if built and not installed and \
6382 ("--newuse" in self.myopts or \
6383 "--reinstall" in self.myopts):
6384 iuses = pkg.iuse.all
6385 old_use = pkg.use.enabled
6387 pkgsettings.setcpv(myeb)
6389 pkgsettings.setcpv(pkg)
6390 now_use = pkgsettings["PORTAGE_USE"].split()
6391 forced_flags = set()
6392 forced_flags.update(pkgsettings.useforce)
6393 forced_flags.update(pkgsettings.usemask)
6395 if myeb and not usepkgonly:
6396 cur_iuse = myeb.iuse.all
6397 if self._reinstall_for_flags(forced_flags,
6401 # Compare current config to installed package
6402 # and do not reinstall if possible.
6403 if not installed and \
6404 ("--newuse" in self.myopts or \
6405 "--reinstall" in self.myopts) and \
6406 cpv in vardb.match(atom):
6407 pkgsettings.setcpv(pkg)
6408 forced_flags = set()
6409 forced_flags.update(pkgsettings.useforce)
6410 forced_flags.update(pkgsettings.usemask)
6411 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6412 old_iuse = set(filter_iuse_defaults(
6413 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6414 cur_use = pkg.use.enabled
6415 cur_iuse = pkg.iuse.all
6416 reinstall_for_flags = \
6417 self._reinstall_for_flags(
6418 forced_flags, old_use, old_iuse,
6420 if reinstall_for_flags:
6424 matched_packages.append(pkg)
6425 if reinstall_for_flags:
6426 self._reinstall_nodes[pkg] = \
6430 if not matched_packages:
6433 if "--debug" in self.myopts:
6434 for pkg in matched_packages:
6435 portage.writemsg("%s %s\n" % \
6436 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6438 # Filter out any old-style virtual matches if they are
6439 # mixed with new-style virtual matches.
6440 cp = portage.dep_getkey(atom)
6441 if len(matched_packages) > 1 and \
6442 "virtual" == portage.catsplit(cp)[0]:
6443 for pkg in matched_packages:
6446 # Got a new-style virtual, so filter
6447 # out any old-style virtuals.
6448 matched_packages = [pkg for pkg in matched_packages \
6452 if len(matched_packages) > 1:
6453 bestmatch = portage.best(
6454 [pkg.cpv for pkg in matched_packages])
6455 matched_packages = [pkg for pkg in matched_packages \
6456 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6458 # ordered by type preference ("ebuild" type is the last resort)
6459 return matched_packages[-1], existing_node
6461 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6463 Select packages that have already been added to the graph or
6464 those that are installed and have not been scheduled for
6467 graph_db = self._graph_trees[root]["porttree"].dbapi
6468 matches = graph_db.match_pkgs(atom)
6471 pkg = matches[-1] # highest match
6472 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6473 return pkg, in_graph
6475 def _complete_graph(self):
6477 Add any deep dependencies of required sets (args, system, world) that
6478 have not been pulled into the graph yet. This ensures that the graph
6479 is consistent such that initially satisfied deep dependencies are not
6480 broken in the new graph. Initially unsatisfied dependencies are
6481 irrelevant since we only want to avoid breaking dependencies that are
6484 Since this method can consume enough time to disturb users, it is
6485 currently only enabled by the --complete-graph option.
6487 if "--buildpkgonly" in self.myopts or \
6488 "recurse" not in self.myparams:
6491 if "complete" not in self.myparams:
6492 # Skip this to avoid consuming enough time to disturb users.
6495 # Put the depgraph into a mode that causes it to only
6496 # select packages that have already been added to the
6497 # graph or those that are installed and have not been
6498 # scheduled for replacement. Also, toggle the "deep"
6499 # parameter so that all dependencies are traversed and
6501 self._select_atoms = self._select_atoms_from_graph
6502 self._select_package = self._select_pkg_from_graph
6503 already_deep = "deep" in self.myparams
6504 if not already_deep:
6505 self.myparams.add("deep")
6507 for root in self.roots:
6508 required_set_names = self._required_set_names.copy()
6509 if root == self.target_root and \
6510 (already_deep or "empty" in self.myparams):
6511 required_set_names.difference_update(self._sets)
6512 if not required_set_names and not self._ignored_deps:
6514 root_config = self.roots[root]
6515 setconfig = root_config.setconfig
6517 # Reuse existing SetArg instances when available.
6518 for arg in self.digraph.root_nodes():
6519 if not isinstance(arg, SetArg):
6521 if arg.root_config != root_config:
6523 if arg.name in required_set_names:
6525 required_set_names.remove(arg.name)
6526 # Create new SetArg instances only when necessary.
6527 for s in required_set_names:
6528 expanded_set = InternalPackageSet(
6529 initial_atoms=setconfig.getSetAtoms(s))
6530 atom = SETPREFIX + s
6531 args.append(SetArg(arg=atom, set=expanded_set,
6532 root_config=root_config))
6533 vardb = root_config.trees["vartree"].dbapi
6535 for atom in arg.set:
6536 self._dep_stack.append(
6537 Dependency(atom=atom, root=root, parent=arg))
6538 if self._ignored_deps:
6539 self._dep_stack.extend(self._ignored_deps)
6540 self._ignored_deps = []
6541 if not self._create_graph(allow_unsatisfied=True):
6543 # Check the unsatisfied deps to see if any initially satisfied deps
6544 # will become unsatisfied due to an upgrade. Initially unsatisfied
6545 # deps are irrelevant since we only want to avoid breaking deps
6546 # that are initially satisfied.
6547 while self._unsatisfied_deps:
6548 dep = self._unsatisfied_deps.pop()
6549 matches = vardb.match_pkgs(dep.atom)
6551 self._initially_unsatisfied_deps.append(dep)
6553 # An scheduled installation broke a deep dependency.
6554 # Add the installed package to the graph so that it
6555 # will be appropriately reported as a slot collision
6556 # (possibly solvable via backtracking).
6557 pkg = matches[-1] # highest match
6558 if not self._add_pkg(pkg, dep):
6560 if not self._create_graph(allow_unsatisfied=True):
6564 def _pkg(self, cpv, type_name, root_config, installed=False):
6566 Get a package instance from the cache, or create a new
6567 one if necessary. Raises KeyError from aux_get if it
6568 failures for some reason (package does not exist or is
6573 operation = "nomerge"
6574 pkg = self._pkg_cache.get(
6575 (type_name, root_config.root, cpv, operation))
6577 tree_type = self.pkg_tree_map[type_name]
6578 db = root_config.trees[tree_type].dbapi
6579 db_keys = list(self._trees_orig[root_config.root][
6580 tree_type].dbapi._aux_cache_keys)
6582 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6584 raise portage.exception.PackageNotFound(cpv)
6585 pkg = Package(cpv=cpv, metadata=metadata,
6586 root_config=root_config, installed=installed)
6587 if type_name == "ebuild":
6588 settings = self.pkgsettings[root_config.root]
6589 settings.setcpv(pkg)
6590 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6591 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6592 self._pkg_cache[pkg] = pkg
6595 def validate_blockers(self):
6596 """Remove any blockers from the digraph that do not match any of the
6597 packages within the graph. If necessary, create hard deps to ensure
6598 correct merge order such that mutually blocking packages are never
6599 installed simultaneously."""
6601 if "--buildpkgonly" in self.myopts or \
6602 "--nodeps" in self.myopts:
6605 #if "deep" in self.myparams:
6607 # Pull in blockers from all installed packages that haven't already
6608 # been pulled into the depgraph. This is not enabled by default
6609 # due to the performance penalty that is incurred by all the
6610 # additional dep_check calls that are required.
6612 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6613 for myroot in self.trees:
6614 vardb = self.trees[myroot]["vartree"].dbapi
6615 portdb = self.trees[myroot]["porttree"].dbapi
6616 pkgsettings = self.pkgsettings[myroot]
6617 final_db = self.mydbapi[myroot]
6619 blocker_cache = BlockerCache(myroot, vardb)
6620 stale_cache = set(blocker_cache)
6623 stale_cache.discard(cpv)
6624 pkg_in_graph = self.digraph.contains(pkg)
6626 # Check for masked installed packages. Only warn about
6627 # packages that are in the graph in order to avoid warning
6628 # about those that will be automatically uninstalled during
6629 # the merge process or by --depclean.
6631 if pkg_in_graph and not visible(pkgsettings, pkg):
6632 self._masked_installed.add(pkg)
6634 blocker_atoms = None
6640 self._blocker_parents.child_nodes(pkg))
6645 self._irrelevant_blockers.child_nodes(pkg))
6648 if blockers is not None:
6649 blockers = set(str(blocker.atom) \
6650 for blocker in blockers)
6652 # If this node has any blockers, create a "nomerge"
6653 # node for it so that they can be enforced.
6654 self.spinner.update()
6655 blocker_data = blocker_cache.get(cpv)
6656 if blocker_data is not None and \
6657 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6660 # If blocker data from the graph is available, use
6661 # it to validate the cache and update the cache if
6663 if blocker_data is not None and \
6664 blockers is not None:
6665 if not blockers.symmetric_difference(
6666 blocker_data.atoms):
6670 if blocker_data is None and \
6671 blockers is not None:
6672 # Re-use the blockers from the graph.
6673 blocker_atoms = sorted(blockers)
6674 counter = long(pkg.metadata["COUNTER"])
6676 blocker_cache.BlockerData(counter, blocker_atoms)
6677 blocker_cache[pkg.cpv] = blocker_data
6681 blocker_atoms = blocker_data.atoms
6683 # Use aux_get() to trigger FakeVartree global
6684 # updates on *DEPEND when appropriate.
6685 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6686 # It is crucial to pass in final_db here in order to
6687 # optimize dep_check calls by eliminating atoms via
6688 # dep_wordreduce and dep_eval calls.
6690 portage.dep._dep_check_strict = False
6692 success, atoms = portage.dep_check(depstr,
6693 final_db, pkgsettings, myuse=pkg.use.enabled,
6694 trees=self._graph_trees, myroot=myroot)
6695 except Exception, e:
6696 if isinstance(e, SystemExit):
6698 # This is helpful, for example, if a ValueError
6699 # is thrown from cpv_expand due to multiple
6700 # matches (this can happen if an atom lacks a
6702 show_invalid_depstring_notice(
6703 pkg, depstr, str(e))
6707 portage.dep._dep_check_strict = True
6709 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6710 if replacement_pkg and \
6711 replacement_pkg[0].operation == "merge":
6712 # This package is being replaced anyway, so
6713 # ignore invalid dependencies so as not to
6714 # annoy the user too much (otherwise they'd be
6715 # forced to manually unmerge it first).
6717 show_invalid_depstring_notice(pkg, depstr, atoms)
6719 blocker_atoms = [myatom for myatom in atoms \
6720 if myatom.startswith("!")]
6721 blocker_atoms.sort()
6722 counter = long(pkg.metadata["COUNTER"])
6723 blocker_cache[cpv] = \
6724 blocker_cache.BlockerData(counter, blocker_atoms)
6727 for atom in blocker_atoms:
6728 blocker = Blocker(atom=portage.dep.Atom(atom),
6729 eapi=pkg.metadata["EAPI"], root=myroot)
6730 self._blocker_parents.add(blocker, pkg)
6731 except portage.exception.InvalidAtom, e:
6732 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6733 show_invalid_depstring_notice(
6734 pkg, depstr, "Invalid Atom: %s" % (e,))
6736 for cpv in stale_cache:
6737 del blocker_cache[cpv]
6738 blocker_cache.flush()
6741 # Discard any "uninstall" tasks scheduled by previous calls
6742 # to this method, since those tasks may not make sense given
6743 # the current graph state.
6744 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6745 if previous_uninstall_tasks:
6746 self._blocker_uninstalls = digraph()
6747 self.digraph.difference_update(previous_uninstall_tasks)
6749 for blocker in self._blocker_parents.leaf_nodes():
6750 self.spinner.update()
6751 root_config = self.roots[blocker.root]
6752 virtuals = root_config.settings.getvirtuals()
6753 myroot = blocker.root
6754 initial_db = self.trees[myroot]["vartree"].dbapi
6755 final_db = self.mydbapi[myroot]
6757 provider_virtual = False
6758 if blocker.cp in virtuals and \
6759 not self._have_new_virt(blocker.root, blocker.cp):
6760 provider_virtual = True
6762 # Use this to check PROVIDE for each matched package
6764 atom_set = InternalPackageSet(
6765 initial_atoms=[blocker.atom])
6767 if provider_virtual:
6769 for provider_entry in virtuals[blocker.cp]:
6771 portage.dep_getkey(provider_entry)
6772 atoms.append(blocker.atom.replace(
6773 blocker.cp, provider_cp))
6775 atoms = [blocker.atom]
6777 blocked_initial = set()
6779 for pkg in initial_db.match_pkgs(atom):
6780 if atom_set.findAtomForPackage(pkg):
6781 blocked_initial.add(pkg)
6783 blocked_final = set()
6785 for pkg in final_db.match_pkgs(atom):
6786 if atom_set.findAtomForPackage(pkg):
6787 blocked_final.add(pkg)
6789 if not blocked_initial and not blocked_final:
6790 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6791 self._blocker_parents.remove(blocker)
6792 # Discard any parents that don't have any more blockers.
6793 for pkg in parent_pkgs:
6794 self._irrelevant_blockers.add(blocker, pkg)
6795 if not self._blocker_parents.child_nodes(pkg):
6796 self._blocker_parents.remove(pkg)
6798 for parent in self._blocker_parents.parent_nodes(blocker):
6799 unresolved_blocks = False
6800 depends_on_order = set()
6801 for pkg in blocked_initial:
6802 if pkg.slot_atom == parent.slot_atom:
6803 # TODO: Support blocks within slots in cases where it
6804 # might make sense. For example, a new version might
6805 # require that the old version be uninstalled at build
6808 if parent.installed:
6809 # Two currently installed packages conflict with
6810 # eachother. Ignore this case since the damage
6811 # is already done and this would be likely to
6812 # confuse users if displayed like a normal blocker.
6815 self._blocked_pkgs.add(pkg, blocker)
6817 if parent.operation == "merge":
6818 # Maybe the blocked package can be replaced or simply
6819 # unmerged to resolve this block.
6820 depends_on_order.add((pkg, parent))
6822 # None of the above blocker resolutions techniques apply,
6823 # so apparently this one is unresolvable.
6824 unresolved_blocks = True
6825 for pkg in blocked_final:
6826 if pkg.slot_atom == parent.slot_atom:
6827 # TODO: Support blocks within slots.
6829 if parent.operation == "nomerge" and \
6830 pkg.operation == "nomerge":
6831 # This blocker will be handled the next time that a
6832 # merge of either package is triggered.
6835 self._blocked_pkgs.add(pkg, blocker)
6837 # Maybe the blocking package can be
6838 # unmerged to resolve this block.
6839 if parent.operation == "merge" and pkg.installed:
6840 depends_on_order.add((pkg, parent))
6842 elif parent.operation == "nomerge":
6843 depends_on_order.add((parent, pkg))
6845 # None of the above blocker resolutions techniques apply,
6846 # so apparently this one is unresolvable.
6847 unresolved_blocks = True
6849 # Make sure we don't unmerge any package that have been pulled
6851 if not unresolved_blocks and depends_on_order:
6852 for inst_pkg, inst_task in depends_on_order:
6853 if self.digraph.contains(inst_pkg) and \
6854 self.digraph.parent_nodes(inst_pkg):
6855 unresolved_blocks = True
6858 if not unresolved_blocks and depends_on_order:
6859 for inst_pkg, inst_task in depends_on_order:
6860 uninst_task = Package(built=inst_pkg.built,
6861 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6862 metadata=inst_pkg.metadata,
6863 operation="uninstall",
6864 root_config=inst_pkg.root_config,
6865 type_name=inst_pkg.type_name)
6866 self._pkg_cache[uninst_task] = uninst_task
6867 # Enforce correct merge order with a hard dep.
6868 self.digraph.addnode(uninst_task, inst_task,
6869 priority=BlockerDepPriority.instance)
6870 # Count references to this blocker so that it can be
6871 # invalidated after nodes referencing it have been
6873 self._blocker_uninstalls.addnode(uninst_task, blocker)
6874 if not unresolved_blocks and not depends_on_order:
6875 self._irrelevant_blockers.add(blocker, parent)
6876 self._blocker_parents.remove_edge(blocker, parent)
6877 if not self._blocker_parents.parent_nodes(blocker):
6878 self._blocker_parents.remove(blocker)
6879 if not self._blocker_parents.child_nodes(parent):
6880 self._blocker_parents.remove(parent)
6881 if unresolved_blocks:
6882 self._unsolvable_blockers.add(blocker, parent)
6886 def _accept_blocker_conflicts(self):
6888 for x in ("--buildpkgonly", "--fetchonly",
6889 "--fetch-all-uri", "--nodeps"):
6890 if x in self.myopts:
6895 def _merge_order_bias(self, mygraph):
6897 For optimal leaf node selection, promote deep system runtime deps and
6898 order nodes from highest to lowest overall reference count.
6902 for node in mygraph.order:
6903 node_info[node] = len(mygraph.parent_nodes(node))
6904 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6906 def cmp_merge_preference(node1, node2):
6908 if node1.operation == 'uninstall':
6909 if node2.operation == 'uninstall':
6913 if node2.operation == 'uninstall':
6914 if node1.operation == 'uninstall':
6918 node1_sys = node1 in deep_system_deps
6919 node2_sys = node2 in deep_system_deps
6920 if node1_sys != node2_sys:
6925 return node_info[node2] - node_info[node1]
6927 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6929 def altlist(self, reversed=False):
6931 while self._serialized_tasks_cache is None:
6932 self._resolve_conflicts()
6934 self._serialized_tasks_cache, self._scheduler_graph = \
6935 self._serialize_tasks()
6936 except self._serialize_tasks_retry:
6939 retlist = self._serialized_tasks_cache[:]
6944 def schedulerGraph(self):
6946 The scheduler graph is identical to the normal one except that
6947 uninstall edges are reversed in specific cases that require
6948 conflicting packages to be temporarily installed simultaneously.
6949 This is intended for use by the Scheduler in it's parallelization
6950 logic. It ensures that temporary simultaneous installation of
6951 conflicting packages is avoided when appropriate (especially for
6952 !!atom blockers), but allowed in specific cases that require it.
6954 Note that this method calls break_refs() which alters the state of
6955 internal Package instances such that this depgraph instance should
6956 not be used to perform any more calculations.
6958 if self._scheduler_graph is None:
6960 self.break_refs(self._scheduler_graph.order)
6961 return self._scheduler_graph
6963 def break_refs(self, nodes):
6965 Take a mergelist like that returned from self.altlist() and
6966 break any references that lead back to the depgraph. This is
6967 useful if you want to hold references to packages without
6968 also holding the depgraph on the heap.
6971 if hasattr(node, "root_config"):
6972 # The FakeVartree references the _package_cache which
6973 # references the depgraph. So that Package instances don't
6974 # hold the depgraph and FakeVartree on the heap, replace
6975 # the RootConfig that references the FakeVartree with the
6976 # original RootConfig instance which references the actual
6978 node.root_config = \
6979 self._trees_orig[node.root_config.root]["root_config"]
6981 def _resolve_conflicts(self):
6982 if not self._complete_graph():
6983 raise self._unknown_internal_error()
6985 if not self.validate_blockers():
6986 raise self._unknown_internal_error()
6988 if self._slot_collision_info:
6989 self._process_slot_conflicts()
6991 def _serialize_tasks(self):
6993 if "--debug" in self.myopts:
6994 writemsg("\ndigraph:\n\n", noiselevel=-1)
6995 self.digraph.debug_print()
6996 writemsg("\n", noiselevel=-1)
6998 scheduler_graph = self.digraph.copy()
6999 mygraph=self.digraph.copy()
7000 # Prune "nomerge" root nodes if nothing depends on them, since
7001 # otherwise they slow down merge order calculation. Don't remove
7002 # non-root nodes since they help optimize merge order in some cases
7003 # such as revdep-rebuild.
7004 removed_nodes = set()
7006 for node in mygraph.root_nodes():
7007 if not isinstance(node, Package) or \
7008 node.installed or node.onlydeps:
7009 removed_nodes.add(node)
7011 self.spinner.update()
7012 mygraph.difference_update(removed_nodes)
7013 if not removed_nodes:
7015 removed_nodes.clear()
7016 self._merge_order_bias(mygraph)
7017 def cmp_circular_bias(n1, n2):
7019 RDEPEND is stronger than PDEPEND and this function
7020 measures such a strength bias within a circular
7021 dependency relationship.
7023 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7024 ignore_priority=priority_range.ignore_medium_soft)
7025 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7026 ignore_priority=priority_range.ignore_medium_soft)
7027 if n1_n2_medium == n2_n1_medium:
7032 myblocker_uninstalls = self._blocker_uninstalls.copy()
7034 # Contains uninstall tasks that have been scheduled to
7035 # occur after overlapping blockers have been installed.
7036 scheduled_uninstalls = set()
7037 # Contains any Uninstall tasks that have been ignored
7038 # in order to avoid the circular deps code path. These
7039 # correspond to blocker conflicts that could not be
7041 ignored_uninstall_tasks = set()
7042 have_uninstall_task = False
7043 complete = "complete" in self.myparams
7046 def get_nodes(**kwargs):
7048 Returns leaf nodes excluding Uninstall instances
7049 since those should be executed as late as possible.
7051 return [node for node in mygraph.leaf_nodes(**kwargs) \
7052 if isinstance(node, Package) and \
7053 (node.operation != "uninstall" or \
7054 node in scheduled_uninstalls)]
7056 # sys-apps/portage needs special treatment if ROOT="/"
7057 running_root = self._running_root.root
7058 from portage.const import PORTAGE_PACKAGE_ATOM
7059 runtime_deps = InternalPackageSet(
7060 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7061 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7062 PORTAGE_PACKAGE_ATOM)
7063 replacement_portage = self.mydbapi[running_root].match_pkgs(
7064 PORTAGE_PACKAGE_ATOM)
7067 running_portage = running_portage[0]
7069 running_portage = None
7071 if replacement_portage:
7072 replacement_portage = replacement_portage[0]
7074 replacement_portage = None
7076 if replacement_portage == running_portage:
7077 replacement_portage = None
7079 if replacement_portage is not None:
7080 # update from running_portage to replacement_portage asap
7081 asap_nodes.append(replacement_portage)
7083 if running_portage is not None:
7085 portage_rdepend = self._select_atoms_highest_available(
7086 running_root, running_portage.metadata["RDEPEND"],
7087 myuse=running_portage.use.enabled,
7088 parent=running_portage, strict=False)
7089 except portage.exception.InvalidDependString, e:
7090 portage.writemsg("!!! Invalid RDEPEND in " + \
7091 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7092 (running_root, running_portage.cpv, e), noiselevel=-1)
7094 portage_rdepend = []
7095 runtime_deps.update(atom for atom in portage_rdepend \
7096 if not atom.startswith("!"))
7098 def gather_deps(ignore_priority, mergeable_nodes,
7099 selected_nodes, node):
7101 Recursively gather a group of nodes that RDEPEND on
7102 eachother. This ensures that they are merged as a group
7103 and get their RDEPENDs satisfied as soon as possible.
7105 if node in selected_nodes:
7107 if node not in mergeable_nodes:
7109 if node == replacement_portage and \
7110 mygraph.child_nodes(node,
7111 ignore_priority=priority_range.ignore_medium_soft):
7112 # Make sure that portage always has all of it's
7113 # RDEPENDs installed first.
7115 selected_nodes.add(node)
7116 for child in mygraph.child_nodes(node,
7117 ignore_priority=ignore_priority):
7118 if not gather_deps(ignore_priority,
7119 mergeable_nodes, selected_nodes, child):
7123 def ignore_uninst_or_med(priority):
7124 if priority is BlockerDepPriority.instance:
7126 return priority_range.ignore_medium(priority)
7128 def ignore_uninst_or_med_soft(priority):
7129 if priority is BlockerDepPriority.instance:
7131 return priority_range.ignore_medium_soft(priority)
7133 tree_mode = "--tree" in self.myopts
7134 # Tracks whether or not the current iteration should prefer asap_nodes
7135 # if available. This is set to False when the previous iteration
7136 # failed to select any nodes. It is reset whenever nodes are
7137 # successfully selected.
7140 # Controls whether or not the current iteration should drop edges that
7141 # are "satisfied" by installed packages, in order to solve circular
7142 # dependencies. The deep runtime dependencies of installed packages are
7143 # not checked in this case (bug #199856), so it must be avoided
7144 # whenever possible.
7145 drop_satisfied = False
7147 # State of variables for successive iterations that loosen the
7148 # criteria for node selection.
7150 # iteration prefer_asap drop_satisfied
7155 # If no nodes are selected on the last iteration, it is due to
7156 # unresolved blockers or circular dependencies.
7158 while not mygraph.empty():
7159 self.spinner.update()
7160 selected_nodes = None
7161 ignore_priority = None
7162 if drop_satisfied or (prefer_asap and asap_nodes):
7163 priority_range = DepPrioritySatisfiedRange
7165 priority_range = DepPriorityNormalRange
7166 if prefer_asap and asap_nodes:
7167 # ASAP nodes are merged before their soft deps. Go ahead and
7168 # select root nodes here if necessary, since it's typical for
7169 # the parent to have been removed from the graph already.
7170 asap_nodes = [node for node in asap_nodes \
7171 if mygraph.contains(node)]
7172 for node in asap_nodes:
7173 if not mygraph.child_nodes(node,
7174 ignore_priority=priority_range.ignore_soft):
7175 selected_nodes = [node]
7176 asap_nodes.remove(node)
7178 if not selected_nodes and \
7179 not (prefer_asap and asap_nodes):
7180 for i in xrange(priority_range.NONE,
7181 priority_range.MEDIUM_SOFT + 1):
7182 ignore_priority = priority_range.ignore_priority[i]
7183 nodes = get_nodes(ignore_priority=ignore_priority)
7185 # If there is a mix of uninstall nodes with other
7186 # types, save the uninstall nodes for later since
7187 # sometimes a merge node will render an uninstall
7188 # node unnecessary (due to occupying the same slot),
7189 # and we want to avoid executing a separate uninstall
7190 # task in that case.
7192 good_uninstalls = []
7193 with_some_uninstalls_excluded = []
7195 if node.operation == "uninstall":
7196 slot_node = self.mydbapi[node.root
7197 ].match_pkgs(node.slot_atom)
7199 slot_node[0].operation == "merge":
7201 good_uninstalls.append(node)
7202 with_some_uninstalls_excluded.append(node)
7204 nodes = good_uninstalls
7205 elif with_some_uninstalls_excluded:
7206 nodes = with_some_uninstalls_excluded
7210 if ignore_priority is None and not tree_mode:
7211 # Greedily pop all of these nodes since no
7212 # relationship has been ignored. This optimization
7213 # destroys --tree output, so it's disabled in tree
7215 selected_nodes = nodes
7217 # For optimal merge order:
7218 # * Only pop one node.
7219 # * Removing a root node (node without a parent)
7220 # will not produce a leaf node, so avoid it.
7221 # * It's normal for a selected uninstall to be a
7222 # root node, so don't check them for parents.
7224 if node.operation == "uninstall" or \
7225 mygraph.parent_nodes(node):
7226 selected_nodes = [node]
7232 if not selected_nodes:
7233 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7235 mergeable_nodes = set(nodes)
7236 if prefer_asap and asap_nodes:
7238 for i in xrange(priority_range.SOFT,
7239 priority_range.MEDIUM_SOFT + 1):
7240 ignore_priority = priority_range.ignore_priority[i]
7242 if not mygraph.parent_nodes(node):
7244 selected_nodes = set()
7245 if gather_deps(ignore_priority,
7246 mergeable_nodes, selected_nodes, node):
7249 selected_nodes = None
7253 if prefer_asap and asap_nodes and not selected_nodes:
7254 # We failed to find any asap nodes to merge, so ignore
7255 # them for the next iteration.
7259 if selected_nodes and ignore_priority is not None:
7260 # Try to merge ignored medium_soft deps as soon as possible
7261 # if they're not satisfied by installed packages.
7262 for node in selected_nodes:
7263 children = set(mygraph.child_nodes(node))
7264 soft = children.difference(
7265 mygraph.child_nodes(node,
7266 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7267 medium_soft = children.difference(
7268 mygraph.child_nodes(node,
7270 DepPrioritySatisfiedRange.ignore_medium_soft))
7271 medium_soft.difference_update(soft)
7272 for child in medium_soft:
7273 if child in selected_nodes:
7275 if child in asap_nodes:
7277 asap_nodes.append(child)
7279 if selected_nodes and len(selected_nodes) > 1:
7280 if not isinstance(selected_nodes, list):
7281 selected_nodes = list(selected_nodes)
7282 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7284 if not selected_nodes and not myblocker_uninstalls.is_empty():
7285 # An Uninstall task needs to be executed in order to
7286 # avoid conflict if possible.
7289 priority_range = DepPrioritySatisfiedRange
7291 priority_range = DepPriorityNormalRange
7293 mergeable_nodes = get_nodes(
7294 ignore_priority=ignore_uninst_or_med)
7296 min_parent_deps = None
7298 for task in myblocker_uninstalls.leaf_nodes():
7299 # Do some sanity checks so that system or world packages
7300 # don't get uninstalled inappropriately here (only really
7301 # necessary when --complete-graph has not been enabled).
7303 if task in ignored_uninstall_tasks:
7306 if task in scheduled_uninstalls:
7307 # It's been scheduled but it hasn't
7308 # been executed yet due to dependence
7309 # on installation of blocking packages.
7312 root_config = self.roots[task.root]
7313 inst_pkg = self._pkg_cache[
7314 ("installed", task.root, task.cpv, "nomerge")]
7316 if self.digraph.contains(inst_pkg):
7319 forbid_overlap = False
7320 heuristic_overlap = False
7321 for blocker in myblocker_uninstalls.parent_nodes(task):
7322 if blocker.eapi in ("0", "1"):
7323 heuristic_overlap = True
7324 elif blocker.atom.blocker.overlap.forbid:
7325 forbid_overlap = True
7327 if forbid_overlap and running_root == task.root:
7330 if heuristic_overlap and running_root == task.root:
7331 # Never uninstall sys-apps/portage or it's essential
7332 # dependencies, except through replacement.
7334 runtime_dep_atoms = \
7335 list(runtime_deps.iterAtomsForPackage(task))
7336 except portage.exception.InvalidDependString, e:
7337 portage.writemsg("!!! Invalid PROVIDE in " + \
7338 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7339 (task.root, task.cpv, e), noiselevel=-1)
7343 # Don't uninstall a runtime dep if it appears
7344 # to be the only suitable one installed.
7346 vardb = root_config.trees["vartree"].dbapi
7347 for atom in runtime_dep_atoms:
7348 other_version = None
7349 for pkg in vardb.match_pkgs(atom):
7350 if pkg.cpv == task.cpv and \
7351 pkg.metadata["COUNTER"] == \
7352 task.metadata["COUNTER"]:
7356 if other_version is None:
7362 # For packages in the system set, don't take
7363 # any chances. If the conflict can't be resolved
7364 # by a normal replacement operation then abort.
7367 for atom in root_config.sets[
7368 "system"].iterAtomsForPackage(task):
7371 except portage.exception.InvalidDependString, e:
7372 portage.writemsg("!!! Invalid PROVIDE in " + \
7373 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7374 (task.root, task.cpv, e), noiselevel=-1)
7380 # Note that the world check isn't always
7381 # necessary since self._complete_graph() will
7382 # add all packages from the system and world sets to the
7383 # graph. This just allows unresolved conflicts to be
7384 # detected as early as possible, which makes it possible
7385 # to avoid calling self._complete_graph() when it is
7386 # unnecessary due to blockers triggering an abortion.
7388 # For packages in the world set, go ahead an uninstall
7389 # when necessary, as long as the atom will be satisfied
7390 # in the final state.
7391 graph_db = self.mydbapi[task.root]
7394 for atom in root_config.sets[
7395 "world"].iterAtomsForPackage(task):
7397 for pkg in graph_db.match_pkgs(atom):
7404 self._blocked_world_pkgs[inst_pkg] = atom
7406 except portage.exception.InvalidDependString, e:
7407 portage.writemsg("!!! Invalid PROVIDE in " + \
7408 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7409 (task.root, task.cpv, e), noiselevel=-1)
7415 # Check the deps of parent nodes to ensure that
7416 # the chosen task produces a leaf node. Maybe
7417 # this can be optimized some more to make the
7418 # best possible choice, but the current algorithm
7419 # is simple and should be near optimal for most
7421 mergeable_parent = False
7423 for parent in mygraph.parent_nodes(task):
7424 parent_deps.update(mygraph.child_nodes(parent,
7425 ignore_priority=priority_range.ignore_medium_soft))
7426 if parent in mergeable_nodes and \
7427 gather_deps(ignore_uninst_or_med_soft,
7428 mergeable_nodes, set(), parent):
7429 mergeable_parent = True
7431 if not mergeable_parent:
7434 parent_deps.remove(task)
7435 if min_parent_deps is None or \
7436 len(parent_deps) < min_parent_deps:
7437 min_parent_deps = len(parent_deps)
7440 if uninst_task is not None:
7441 # The uninstall is performed only after blocking
7442 # packages have been merged on top of it. File
7443 # collisions between blocking packages are detected
7444 # and removed from the list of files to be uninstalled.
7445 scheduled_uninstalls.add(uninst_task)
7446 parent_nodes = mygraph.parent_nodes(uninst_task)
7448 # Reverse the parent -> uninstall edges since we want
7449 # to do the uninstall after blocking packages have
7450 # been merged on top of it.
7451 mygraph.remove(uninst_task)
7452 for blocked_pkg in parent_nodes:
7453 mygraph.add(blocked_pkg, uninst_task,
7454 priority=BlockerDepPriority.instance)
7455 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7456 scheduler_graph.add(blocked_pkg, uninst_task,
7457 priority=BlockerDepPriority.instance)
7459 # Reset the state variables for leaf node selection and
7460 # continue trying to select leaf nodes.
7462 drop_satisfied = False
7465 if not selected_nodes:
7466 # Only select root nodes as a last resort. This case should
7467 # only trigger when the graph is nearly empty and the only
7468 # remaining nodes are isolated (no parents or children). Since
7469 # the nodes must be isolated, ignore_priority is not needed.
7470 selected_nodes = get_nodes()
7472 if not selected_nodes and not drop_satisfied:
7473 drop_satisfied = True
7476 if not selected_nodes and not myblocker_uninstalls.is_empty():
7477 # If possible, drop an uninstall task here in order to avoid
7478 # the circular deps code path. The corresponding blocker will
7479 # still be counted as an unresolved conflict.
7481 for node in myblocker_uninstalls.leaf_nodes():
7483 mygraph.remove(node)
7488 ignored_uninstall_tasks.add(node)
7491 if uninst_task is not None:
7492 # Reset the state variables for leaf node selection and
7493 # continue trying to select leaf nodes.
7495 drop_satisfied = False
7498 if not selected_nodes:
7499 self._circular_deps_for_display = mygraph
7500 raise self._unknown_internal_error()
7502 # At this point, we've succeeded in selecting one or more nodes, so
7503 # reset state variables for leaf node selection.
7505 drop_satisfied = False
7507 mygraph.difference_update(selected_nodes)
7509 for node in selected_nodes:
7510 if isinstance(node, Package) and \
7511 node.operation == "nomerge":
7514 # Handle interactions between blockers
7515 # and uninstallation tasks.
7516 solved_blockers = set()
7518 if isinstance(node, Package) and \
7519 "uninstall" == node.operation:
7520 have_uninstall_task = True
7523 vardb = self.trees[node.root]["vartree"].dbapi
7524 previous_cpv = vardb.match(node.slot_atom)
7526 # The package will be replaced by this one, so remove
7527 # the corresponding Uninstall task if necessary.
7528 previous_cpv = previous_cpv[0]
7530 ("installed", node.root, previous_cpv, "uninstall")
7532 mygraph.remove(uninst_task)
7536 if uninst_task is not None and \
7537 uninst_task not in ignored_uninstall_tasks and \
7538 myblocker_uninstalls.contains(uninst_task):
7539 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7540 myblocker_uninstalls.remove(uninst_task)
7541 # Discard any blockers that this Uninstall solves.
7542 for blocker in blocker_nodes:
7543 if not myblocker_uninstalls.child_nodes(blocker):
7544 myblocker_uninstalls.remove(blocker)
7545 solved_blockers.add(blocker)
7547 retlist.append(node)
7549 if (isinstance(node, Package) and \
7550 "uninstall" == node.operation) or \
7551 (uninst_task is not None and \
7552 uninst_task in scheduled_uninstalls):
7553 # Include satisfied blockers in the merge list
7554 # since the user might be interested and also
7555 # it serves as an indicator that blocking packages
7556 # will be temporarily installed simultaneously.
7557 for blocker in solved_blockers:
7558 retlist.append(Blocker(atom=blocker.atom,
7559 root=blocker.root, eapi=blocker.eapi,
7562 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7563 for node in myblocker_uninstalls.root_nodes():
7564 unsolvable_blockers.add(node)
7566 for blocker in unsolvable_blockers:
7567 retlist.append(blocker)
7569 # If any Uninstall tasks need to be executed in order
7570 # to avoid a conflict, complete the graph with any
7571 # dependencies that may have been initially
7572 # neglected (to ensure that unsafe Uninstall tasks
7573 # are properly identified and blocked from execution).
7574 if have_uninstall_task and \
7576 not unsolvable_blockers:
7577 self.myparams.add("complete")
7578 raise self._serialize_tasks_retry("")
7580 if unsolvable_blockers and \
7581 not self._accept_blocker_conflicts():
7582 self._unsatisfied_blockers_for_display = unsolvable_blockers
7583 self._serialized_tasks_cache = retlist[:]
7584 self._scheduler_graph = scheduler_graph
7585 raise self._unknown_internal_error()
7587 if self._slot_collision_info and \
7588 not self._accept_blocker_conflicts():
7589 self._serialized_tasks_cache = retlist[:]
7590 self._scheduler_graph = scheduler_graph
7591 raise self._unknown_internal_error()
7593 return retlist, scheduler_graph
7595 def _show_circular_deps(self, mygraph):
7596 # No leaf nodes are available, so we have a circular
7597 # dependency panic situation. Reduce the noise level to a
7598 # minimum via repeated elimination of root nodes since they
7599 # have no parents and thus can not be part of a cycle.
7601 root_nodes = mygraph.root_nodes(
7602 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7605 mygraph.difference_update(root_nodes)
7606 # Display the USE flags that are enabled on nodes that are part
7607 # of dependency cycles in case that helps the user decide to
7608 # disable some of them.
7610 tempgraph = mygraph.copy()
7611 while not tempgraph.empty():
7612 nodes = tempgraph.leaf_nodes()
7614 node = tempgraph.order[0]
7617 display_order.append(node)
7618 tempgraph.remove(node)
7619 display_order.reverse()
7620 self.myopts.pop("--quiet", None)
7621 self.myopts.pop("--verbose", None)
7622 self.myopts["--tree"] = True
7623 portage.writemsg("\n\n", noiselevel=-1)
7624 self.display(display_order)
7625 prefix = colorize("BAD", " * ")
7626 portage.writemsg("\n", noiselevel=-1)
7627 portage.writemsg(prefix + "Error: circular dependencies:\n",
7629 portage.writemsg("\n", noiselevel=-1)
7630 mygraph.debug_print()
7631 portage.writemsg("\n", noiselevel=-1)
7632 portage.writemsg(prefix + "Note that circular dependencies " + \
7633 "can often be avoided by temporarily\n", noiselevel=-1)
7634 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7635 "optional dependencies.\n", noiselevel=-1)
7637 def _show_merge_list(self):
7638 if self._serialized_tasks_cache is not None and \
7639 not (self._displayed_list and \
7640 (self._displayed_list == self._serialized_tasks_cache or \
7641 self._displayed_list == \
7642 list(reversed(self._serialized_tasks_cache)))):
7643 display_list = self._serialized_tasks_cache[:]
7644 if "--tree" in self.myopts:
7645 display_list.reverse()
7646 self.display(display_list)
7648 def _show_unsatisfied_blockers(self, blockers):
7649 self._show_merge_list()
7650 msg = "Error: The above package list contains " + \
7651 "packages which cannot be installed " + \
7652 "at the same time on the same system."
7653 prefix = colorize("BAD", " * ")
7654 from textwrap import wrap
7655 portage.writemsg("\n", noiselevel=-1)
7656 for line in wrap(msg, 70):
7657 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7659 # Display the conflicting packages along with the packages
7660 # that pulled them in. This is helpful for troubleshooting
7661 # cases in which blockers don't solve automatically and
7662 # the reasons are not apparent from the normal merge list
7666 for blocker in blockers:
7667 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7668 self._blocker_parents.parent_nodes(blocker)):
7669 parent_atoms = self._parent_atoms.get(pkg)
7670 if not parent_atoms:
7671 atom = self._blocked_world_pkgs.get(pkg)
7672 if atom is not None:
7673 parent_atoms = set([("@world", atom)])
7675 conflict_pkgs[pkg] = parent_atoms
7678 # Reduce noise by pruning packages that are only
7679 # pulled in by other conflict packages.
7681 for pkg, parent_atoms in conflict_pkgs.iteritems():
7682 relevant_parent = False
7683 for parent, atom in parent_atoms:
7684 if parent not in conflict_pkgs:
7685 relevant_parent = True
7687 if not relevant_parent:
7688 pruned_pkgs.add(pkg)
7689 for pkg in pruned_pkgs:
7690 del conflict_pkgs[pkg]
7696 # Max number of parents shown, to avoid flooding the display.
7698 for pkg, parent_atoms in conflict_pkgs.iteritems():
7702 # Prefer packages that are not directly involved in a conflict.
7703 for parent_atom in parent_atoms:
7704 if len(pruned_list) >= max_parents:
7706 parent, atom = parent_atom
7707 if parent not in conflict_pkgs:
7708 pruned_list.add(parent_atom)
7710 for parent_atom in parent_atoms:
7711 if len(pruned_list) >= max_parents:
7713 pruned_list.add(parent_atom)
7715 omitted_parents = len(parent_atoms) - len(pruned_list)
7716 msg.append(indent + "%s pulled in by\n" % pkg)
7718 for parent_atom in pruned_list:
7719 parent, atom = parent_atom
7720 msg.append(2*indent)
7721 if isinstance(parent,
7722 (PackageArg, AtomArg)):
7723 # For PackageArg and AtomArg types, it's
7724 # redundant to display the atom attribute.
7725 msg.append(str(parent))
7727 # Display the specific atom from SetArg or
7729 msg.append("%s required by %s" % (atom, parent))
7733 msg.append(2*indent)
7734 msg.append("(and %d more)\n" % omitted_parents)
7738 sys.stderr.write("".join(msg))
7741 if "--quiet" not in self.myopts:
7742 show_blocker_docs_link()
7744 def display(self, mylist, favorites=[], verbosity=None):
7746 # This is used to prevent display_problems() from
7747 # redundantly displaying this exact same merge list
7748 # again via _show_merge_list().
7749 self._displayed_list = mylist
7751 if verbosity is None:
7752 verbosity = ("--quiet" in self.myopts and 1 or \
7753 "--verbose" in self.myopts and 3 or 2)
7754 favorites_set = InternalPackageSet(favorites)
7755 oneshot = "--oneshot" in self.myopts or \
7756 "--onlydeps" in self.myopts
7757 columns = "--columns" in self.myopts
7762 counters = PackageCounters()
7764 if verbosity == 1 and "--verbose" not in self.myopts:
7765 def create_use_string(*args):
7768 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7770 is_new, reinst_flags,
7771 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7772 alphabetical=("--alphabetical" in self.myopts)):
7780 cur_iuse = set(cur_iuse)
7781 enabled_flags = cur_iuse.intersection(cur_use)
7782 removed_iuse = set(old_iuse).difference(cur_iuse)
7783 any_iuse = cur_iuse.union(old_iuse)
7784 any_iuse = list(any_iuse)
7786 for flag in any_iuse:
7789 reinst_flag = reinst_flags and flag in reinst_flags
7790 if flag in enabled_flags:
7792 if is_new or flag in old_use and \
7793 (all_flags or reinst_flag):
7794 flag_str = red(flag)
7795 elif flag not in old_iuse:
7796 flag_str = yellow(flag) + "%*"
7797 elif flag not in old_use:
7798 flag_str = green(flag) + "*"
7799 elif flag in removed_iuse:
7800 if all_flags or reinst_flag:
7801 flag_str = yellow("-" + flag) + "%"
7804 flag_str = "(" + flag_str + ")"
7805 removed.append(flag_str)
7808 if is_new or flag in old_iuse and \
7809 flag not in old_use and \
7810 (all_flags or reinst_flag):
7811 flag_str = blue("-" + flag)
7812 elif flag not in old_iuse:
7813 flag_str = yellow("-" + flag)
7814 if flag not in iuse_forced:
7816 elif flag in old_use:
7817 flag_str = green("-" + flag) + "*"
7819 if flag in iuse_forced:
7820 flag_str = "(" + flag_str + ")"
7822 enabled.append(flag_str)
7824 disabled.append(flag_str)
7827 ret = " ".join(enabled)
7829 ret = " ".join(enabled + disabled + removed)
7831 ret = '%s="%s" ' % (name, ret)
7834 repo_display = RepoDisplay(self.roots)
7838 mygraph = self.digraph.copy()
7840 # If there are any Uninstall instances, add the corresponding
7841 # blockers to the digraph (useful for --tree display).
7843 executed_uninstalls = set(node for node in mylist \
7844 if isinstance(node, Package) and node.operation == "unmerge")
7846 for uninstall in self._blocker_uninstalls.leaf_nodes():
7847 uninstall_parents = \
7848 self._blocker_uninstalls.parent_nodes(uninstall)
7849 if not uninstall_parents:
7852 # Remove the corresponding "nomerge" node and substitute
7853 # the Uninstall node.
7854 inst_pkg = self._pkg_cache[
7855 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7857 mygraph.remove(inst_pkg)
7862 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7864 inst_pkg_blockers = []
7866 # Break the Package -> Uninstall edges.
7867 mygraph.remove(uninstall)
7869 # Resolution of a package's blockers
7870 # depend on it's own uninstallation.
7871 for blocker in inst_pkg_blockers:
7872 mygraph.add(uninstall, blocker)
7874 # Expand Package -> Uninstall edges into
7875 # Package -> Blocker -> Uninstall edges.
7876 for blocker in uninstall_parents:
7877 mygraph.add(uninstall, blocker)
7878 for parent in self._blocker_parents.parent_nodes(blocker):
7879 if parent != inst_pkg:
7880 mygraph.add(blocker, parent)
7882 # If the uninstall task did not need to be executed because
7883 # of an upgrade, display Blocker -> Upgrade edges since the
7884 # corresponding Blocker -> Uninstall edges will not be shown.
7886 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7887 if upgrade_node is not None and \
7888 uninstall not in executed_uninstalls:
7889 for blocker in uninstall_parents:
7890 mygraph.add(upgrade_node, blocker)
7892 unsatisfied_blockers = []
7897 if isinstance(x, Blocker) and not x.satisfied:
7898 unsatisfied_blockers.append(x)
7901 if "--tree" in self.myopts:
7902 depth = len(tree_nodes)
7903 while depth and graph_key not in \
7904 mygraph.child_nodes(tree_nodes[depth-1]):
7907 tree_nodes = tree_nodes[:depth]
7908 tree_nodes.append(graph_key)
7909 display_list.append((x, depth, True))
7910 shown_edges.add((graph_key, tree_nodes[depth-1]))
7912 traversed_nodes = set() # prevent endless circles
7913 traversed_nodes.add(graph_key)
7914 def add_parents(current_node, ordered):
7916 # Do not traverse to parents if this node is an
7917 # an argument or a direct member of a set that has
7918 # been specified as an argument (system or world).
7919 if current_node not in self._set_nodes:
7920 parent_nodes = mygraph.parent_nodes(current_node)
7922 child_nodes = set(mygraph.child_nodes(current_node))
7923 selected_parent = None
7924 # First, try to avoid a direct cycle.
7925 for node in parent_nodes:
7926 if not isinstance(node, (Blocker, Package)):
7928 if node not in traversed_nodes and \
7929 node not in child_nodes:
7930 edge = (current_node, node)
7931 if edge in shown_edges:
7933 selected_parent = node
7935 if not selected_parent:
7936 # A direct cycle is unavoidable.
7937 for node in parent_nodes:
7938 if not isinstance(node, (Blocker, Package)):
7940 if node not in traversed_nodes:
7941 edge = (current_node, node)
7942 if edge in shown_edges:
7944 selected_parent = node
7947 shown_edges.add((current_node, selected_parent))
7948 traversed_nodes.add(selected_parent)
7949 add_parents(selected_parent, False)
7950 display_list.append((current_node,
7951 len(tree_nodes), ordered))
7952 tree_nodes.append(current_node)
7954 add_parents(graph_key, True)
7956 display_list.append((x, depth, True))
7957 mylist = display_list
7958 for x in unsatisfied_blockers:
7959 mylist.append((x, 0, True))
7961 last_merge_depth = 0
7962 for i in xrange(len(mylist)-1,-1,-1):
7963 graph_key, depth, ordered = mylist[i]
7964 if not ordered and depth == 0 and i > 0 \
7965 and graph_key == mylist[i-1][0] and \
7966 mylist[i-1][1] == 0:
7967 # An ordered node got a consecutive duplicate when the tree was
7971 if ordered and graph_key[-1] != "nomerge":
7972 last_merge_depth = depth
7974 if depth >= last_merge_depth or \
7975 i < len(mylist) - 1 and \
7976 depth >= mylist[i+1][1]:
7979 from portage import flatten
7980 from portage.dep import use_reduce, paren_reduce
7981 # files to fetch list - avoids counting a same file twice
7982 # in size display (verbose mode)
7985 # Use this set to detect when all the "repoadd" strings are "[0]"
7986 # and disable the entire repo display in this case.
7989 for mylist_index in xrange(len(mylist)):
7990 x, depth, ordered = mylist[mylist_index]
7994 portdb = self.trees[myroot]["porttree"].dbapi
7995 bindb = self.trees[myroot]["bintree"].dbapi
7996 vardb = self.trees[myroot]["vartree"].dbapi
7997 vartree = self.trees[myroot]["vartree"]
7998 pkgsettings = self.pkgsettings[myroot]
8001 indent = " " * depth
8003 if isinstance(x, Blocker):
8005 blocker_style = "PKG_BLOCKER_SATISFIED"
8006 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8008 blocker_style = "PKG_BLOCKER"
8009 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8011 counters.blocks += 1
8013 counters.blocks_satisfied += 1
8014 resolved = portage.key_expand(
8015 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8016 if "--columns" in self.myopts and "--quiet" in self.myopts:
8017 addl += " " + colorize(blocker_style, resolved)
8019 addl = "[%s %s] %s%s" % \
8020 (colorize(blocker_style, "blocks"),
8021 addl, indent, colorize(blocker_style, resolved))
8022 block_parents = self._blocker_parents.parent_nodes(x)
8023 block_parents = set([pnode[2] for pnode in block_parents])
8024 block_parents = ", ".join(block_parents)
8026 addl += colorize(blocker_style,
8027 " (\"%s\" is blocking %s)") % \
8028 (str(x.atom).lstrip("!"), block_parents)
8030 addl += colorize(blocker_style,
8031 " (is blocking %s)") % block_parents
8032 if isinstance(x, Blocker) and x.satisfied:
8037 blockers.append(addl)
8040 pkg_merge = ordered and pkg_status == "merge"
8041 if not pkg_merge and pkg_status == "merge":
8042 pkg_status = "nomerge"
8043 built = pkg_type != "ebuild"
8044 installed = pkg_type == "installed"
8046 metadata = pkg.metadata
8048 repo_name = metadata["repository"]
8049 if pkg_type == "ebuild":
8050 ebuild_path = portdb.findname(pkg_key)
8051 if not ebuild_path: # shouldn't happen
8052 raise portage.exception.PackageNotFound(pkg_key)
8053 repo_path_real = os.path.dirname(os.path.dirname(
8054 os.path.dirname(ebuild_path)))
8056 repo_path_real = portdb.getRepositoryPath(repo_name)
8057 pkg_use = list(pkg.use.enabled)
8059 restrict = flatten(use_reduce(paren_reduce(
8060 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8061 except portage.exception.InvalidDependString, e:
8062 if not pkg.installed:
8063 show_invalid_depstring_notice(x,
8064 pkg.metadata["RESTRICT"], str(e))
8068 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8069 "fetch" in restrict:
8072 counters.restrict_fetch += 1
8073 if portdb.fetch_check(pkg_key, pkg_use):
8076 counters.restrict_fetch_satisfied += 1
8078 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8079 #param is used for -u, where you still *do* want to see when something is being upgraded.
8082 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8083 if vardb.cpv_exists(pkg_key):
8084 addl=" "+yellow("R")+fetch+" "
8087 counters.reinst += 1
8088 elif pkg_status == "uninstall":
8089 counters.uninst += 1
8090 # filter out old-style virtual matches
8091 elif installed_versions and \
8092 portage.cpv_getkey(installed_versions[0]) == \
8093 portage.cpv_getkey(pkg_key):
8094 myinslotlist = vardb.match(pkg.slot_atom)
8095 # If this is the first install of a new-style virtual, we
8096 # need to filter out old-style virtual matches.
8097 if myinslotlist and \
8098 portage.cpv_getkey(myinslotlist[0]) != \
8099 portage.cpv_getkey(pkg_key):
8102 myoldbest = myinslotlist[:]
8104 if not portage.dep.cpvequal(pkg_key,
8105 portage.best([pkg_key] + myoldbest)):
8107 addl += turquoise("U")+blue("D")
8109 counters.downgrades += 1
8112 addl += turquoise("U") + " "
8114 counters.upgrades += 1
8116 # New slot, mark it new.
8117 addl = " " + green("NS") + fetch + " "
8118 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8120 counters.newslot += 1
8122 if "--changelog" in self.myopts:
8123 inst_matches = vardb.match(pkg.slot_atom)
8125 changelogs.extend(self.calc_changelog(
8126 portdb.findname(pkg_key),
8127 inst_matches[0], pkg_key))
8129 addl = " " + green("N") + " " + fetch + " "
8138 forced_flags = set()
8139 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8140 forced_flags.update(pkgsettings.useforce)
8141 forced_flags.update(pkgsettings.usemask)
8143 cur_use = [flag for flag in pkg.use.enabled \
8144 if flag in pkg.iuse.all]
8145 cur_iuse = sorted(pkg.iuse.all)
8147 if myoldbest and myinslotlist:
8148 previous_cpv = myoldbest[0]
8150 previous_cpv = pkg.cpv
8151 if vardb.cpv_exists(previous_cpv):
8152 old_iuse, old_use = vardb.aux_get(
8153 previous_cpv, ["IUSE", "USE"])
8154 old_iuse = list(set(
8155 filter_iuse_defaults(old_iuse.split())))
8157 old_use = old_use.split()
8164 old_use = [flag for flag in old_use if flag in old_iuse]
8166 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8168 use_expand.reverse()
8169 use_expand_hidden = \
8170 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8172 def map_to_use_expand(myvals, forcedFlags=False,
8176 for exp in use_expand:
8179 for val in myvals[:]:
8180 if val.startswith(exp.lower()+"_"):
8181 if val in forced_flags:
8182 forced[exp].add(val[len(exp)+1:])
8183 ret[exp].append(val[len(exp)+1:])
8186 forced["USE"] = [val for val in myvals \
8187 if val in forced_flags]
8189 for exp in use_expand_hidden:
8195 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8196 # are the only thing that triggered reinstallation.
8197 reinst_flags_map = {}
8198 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8199 reinst_expand_map = None
8200 if reinstall_for_flags:
8201 reinst_flags_map = map_to_use_expand(
8202 list(reinstall_for_flags), removeHidden=False)
8203 for k in list(reinst_flags_map):
8204 if not reinst_flags_map[k]:
8205 del reinst_flags_map[k]
8206 if not reinst_flags_map.get("USE"):
8207 reinst_expand_map = reinst_flags_map.copy()
8208 reinst_expand_map.pop("USE", None)
8209 if reinst_expand_map and \
8210 not set(reinst_expand_map).difference(
8212 use_expand_hidden = \
8213 set(use_expand_hidden).difference(
8216 cur_iuse_map, iuse_forced = \
8217 map_to_use_expand(cur_iuse, forcedFlags=True)
8218 cur_use_map = map_to_use_expand(cur_use)
8219 old_iuse_map = map_to_use_expand(old_iuse)
8220 old_use_map = map_to_use_expand(old_use)
8223 use_expand.insert(0, "USE")
8225 for key in use_expand:
8226 if key in use_expand_hidden:
8228 verboseadd += create_use_string(key.upper(),
8229 cur_iuse_map[key], iuse_forced[key],
8230 cur_use_map[key], old_iuse_map[key],
8231 old_use_map[key], is_new,
8232 reinst_flags_map.get(key))
8237 if pkg_type == "ebuild" and pkg_merge:
8239 myfilesdict = portdb.getfetchsizes(pkg_key,
8240 useflags=pkg_use, debug=self.edebug)
8241 except portage.exception.InvalidDependString, e:
8242 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8243 show_invalid_depstring_notice(x, src_uri, str(e))
8246 if myfilesdict is None:
8247 myfilesdict="[empty/missing/bad digest]"
8249 for myfetchfile in myfilesdict:
8250 if myfetchfile not in myfetchlist:
8251 mysize+=myfilesdict[myfetchfile]
8252 myfetchlist.append(myfetchfile)
8254 counters.totalsize += mysize
8255 verboseadd += format_size(mysize)
8258 # assign index for a previous version in the same slot
8259 has_previous = False
8260 repo_name_prev = None
8261 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8263 slot_matches = vardb.match(slot_atom)
8266 repo_name_prev = vardb.aux_get(slot_matches[0],
8269 # now use the data to generate output
8270 if pkg.installed or not has_previous:
8271 repoadd = repo_display.repoStr(repo_path_real)
8273 repo_path_prev = None
8275 repo_path_prev = portdb.getRepositoryPath(
8277 if repo_path_prev == repo_path_real:
8278 repoadd = repo_display.repoStr(repo_path_real)
8280 repoadd = "%s=>%s" % (
8281 repo_display.repoStr(repo_path_prev),
8282 repo_display.repoStr(repo_path_real))
8284 repoadd_set.add(repoadd)
8286 xs = [portage.cpv_getkey(pkg_key)] + \
8287 list(portage.catpkgsplit(pkg_key)[2:])
8294 if "COLUMNWIDTH" in self.settings:
8296 mywidth = int(self.settings["COLUMNWIDTH"])
8297 except ValueError, e:
8298 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8300 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8301 self.settings["COLUMNWIDTH"], noiselevel=-1)
8303 oldlp = mywidth - 30
8306 # Convert myoldbest from a list to a string.
8310 for pos, key in enumerate(myoldbest):
8311 key = portage.catpkgsplit(key)[2] + \
8312 "-" + portage.catpkgsplit(key)[3]
8313 if key[-3:] == "-r0":
8315 myoldbest[pos] = key
8316 myoldbest = blue("["+", ".join(myoldbest)+"]")
8319 root_config = self.roots[myroot]
8320 system_set = root_config.sets["system"]
8321 world_set = root_config.sets["world"]
8326 pkg_system = system_set.findAtomForPackage(pkg)
8327 pkg_world = world_set.findAtomForPackage(pkg)
8328 if not (oneshot or pkg_world) and \
8329 myroot == self.target_root and \
8330 favorites_set.findAtomForPackage(pkg):
8331 # Maybe it will be added to world now.
8332 if create_world_atom(pkg, favorites_set, root_config):
8334 except portage.exception.InvalidDependString:
8335 # This is reported elsewhere if relevant.
8338 def pkgprint(pkg_str):
8341 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8343 return colorize("PKG_MERGE_WORLD", pkg_str)
8345 return colorize("PKG_MERGE", pkg_str)
8346 elif pkg_status == "uninstall":
8347 return colorize("PKG_UNINSTALL", pkg_str)
8350 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8352 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8354 return colorize("PKG_NOMERGE", pkg_str)
8357 properties = flatten(use_reduce(paren_reduce(
8358 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8359 except portage.exception.InvalidDependString, e:
8360 if not pkg.installed:
8361 show_invalid_depstring_notice(pkg,
8362 pkg.metadata["PROPERTIES"], str(e))
8366 interactive = "interactive" in properties
8367 if interactive and pkg.operation == "merge":
8368 addl = colorize("WARN", "I") + addl[1:]
8370 counters.interactive += 1
8375 if "--columns" in self.myopts:
8376 if "--quiet" in self.myopts:
8377 myprint=addl+" "+indent+pkgprint(pkg_cp)
8378 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8379 myprint=myprint+myoldbest
8380 myprint=myprint+darkgreen("to "+x[1])
8384 myprint = "[%s] %s%s" % \
8385 (pkgprint(pkg_status.ljust(13)),
8386 indent, pkgprint(pkg.cp))
8388 myprint = "[%s %s] %s%s" % \
8389 (pkgprint(pkg.type_name), addl,
8390 indent, pkgprint(pkg.cp))
8391 if (newlp-nc_len(myprint)) > 0:
8392 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8393 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8394 if (oldlp-nc_len(myprint)) > 0:
8395 myprint=myprint+" "*(oldlp-nc_len(myprint))
8396 myprint=myprint+myoldbest
8397 myprint += darkgreen("to " + pkg.root)
8400 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8402 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8403 myprint += indent + pkgprint(pkg_key) + " " + \
8404 myoldbest + darkgreen("to " + myroot)
8406 if "--columns" in self.myopts:
8407 if "--quiet" in self.myopts:
8408 myprint=addl+" "+indent+pkgprint(pkg_cp)
8409 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8410 myprint=myprint+myoldbest
8414 myprint = "[%s] %s%s" % \
8415 (pkgprint(pkg_status.ljust(13)),
8416 indent, pkgprint(pkg.cp))
8418 myprint = "[%s %s] %s%s" % \
8419 (pkgprint(pkg.type_name), addl,
8420 indent, pkgprint(pkg.cp))
8421 if (newlp-nc_len(myprint)) > 0:
8422 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8423 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8424 if (oldlp-nc_len(myprint)) > 0:
8425 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8426 myprint += myoldbest
8429 myprint = "[%s] %s%s %s" % \
8430 (pkgprint(pkg_status.ljust(13)),
8431 indent, pkgprint(pkg.cpv),
8434 myprint = "[%s %s] %s%s %s" % \
8435 (pkgprint(pkg_type), addl, indent,
8436 pkgprint(pkg.cpv), myoldbest)
8438 if columns and pkg.operation == "uninstall":
8440 p.append((myprint, verboseadd, repoadd))
8442 if "--tree" not in self.myopts and \
8443 "--quiet" not in self.myopts and \
8444 not self._opts_no_restart.intersection(self.myopts) and \
8445 pkg.root == self._running_root.root and \
8446 portage.match_from_list(
8447 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8448 not vardb.cpv_exists(pkg.cpv) and \
8449 "--quiet" not in self.myopts:
8450 if mylist_index < len(mylist) - 1:
8451 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8452 p.append(colorize("WARN", " then resume the merge."))
8455 show_repos = repoadd_set and repoadd_set != set(["0"])
8458 if isinstance(x, basestring):
8459 out.write("%s\n" % (x,))
8462 myprint, verboseadd, repoadd = x
8465 myprint += " " + verboseadd
8467 if show_repos and repoadd:
8468 myprint += " " + teal("[%s]" % repoadd)
8470 out.write("%s\n" % (myprint,))
8479 sys.stdout.write(str(repo_display))
8481 if "--changelog" in self.myopts:
8483 for revision,text in changelogs:
8484 print bold('*'+revision)
8485 sys.stdout.write(text)
8490 def display_problems(self):
8492 Display problems with the dependency graph such as slot collisions.
8493 This is called internally by display() to show the problems _after_
8494 the merge list where it is most likely to be seen, but if display()
8495 is not going to be called then this method should be called explicitly
8496 to ensure that the user is notified of problems with the graph.
8498 All output goes to stderr, except for unsatisfied dependencies which
8499 go to stdout for parsing by programs such as autounmask.
8502 # Note that show_masked_packages() sends it's output to
8503 # stdout, and some programs such as autounmask parse the
8504 # output in cases when emerge bails out. However, when
8505 # show_masked_packages() is called for installed packages
8506 # here, the message is a warning that is more appropriate
8507 # to send to stderr, so temporarily redirect stdout to
8508 # stderr. TODO: Fix output code so there's a cleaner way
8509 # to redirect everything to stderr.
8514 sys.stdout = sys.stderr
8515 self._display_problems()
8521 # This goes to stdout for parsing by programs like autounmask.
8522 for pargs, kwargs in self._unsatisfied_deps_for_display:
8523 self._show_unsatisfied_dep(*pargs, **kwargs)
8525 def _display_problems(self):
8526 if self._circular_deps_for_display is not None:
8527 self._show_circular_deps(
8528 self._circular_deps_for_display)
8530 # The user is only notified of a slot conflict if
8531 # there are no unresolvable blocker conflicts.
8532 if self._unsatisfied_blockers_for_display is not None:
8533 self._show_unsatisfied_blockers(
8534 self._unsatisfied_blockers_for_display)
8536 self._show_slot_collision_notice()
8538 # TODO: Add generic support for "set problem" handlers so that
8539 # the below warnings aren't special cases for world only.
8541 if self._missing_args:
8542 world_problems = False
8543 if "world" in self._sets:
8544 # Filter out indirect members of world (from nested sets)
8545 # since only direct members of world are desired here.
8546 world_set = self.roots[self.target_root].sets["world"]
8547 for arg, atom in self._missing_args:
8548 if arg.name == "world" and atom in world_set:
8549 world_problems = True
8553 sys.stderr.write("\n!!! Problems have been " + \
8554 "detected with your world file\n")
8555 sys.stderr.write("!!! Please run " + \
8556 green("emaint --check world")+"\n\n")
8558 if self._missing_args:
8559 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8560 " Ebuilds for the following packages are either all\n")
8561 sys.stderr.write(colorize("BAD", "!!!") + \
8562 " masked or don't exist:\n")
8563 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8564 self._missing_args) + "\n")
8566 if self._pprovided_args:
8568 for arg, atom in self._pprovided_args:
8569 if isinstance(arg, SetArg):
8571 arg_atom = (atom, atom)
8574 arg_atom = (arg.arg, atom)
8575 refs = arg_refs.setdefault(arg_atom, [])
8576 if parent not in refs:
8579 msg.append(bad("\nWARNING: "))
8580 if len(self._pprovided_args) > 1:
8581 msg.append("Requested packages will not be " + \
8582 "merged because they are listed in\n")
8584 msg.append("A requested package will not be " + \
8585 "merged because it is listed in\n")
8586 msg.append("package.provided:\n\n")
8587 problems_sets = set()
8588 for (arg, atom), refs in arg_refs.iteritems():
8591 problems_sets.update(refs)
8593 ref_string = ", ".join(["'%s'" % name for name in refs])
8594 ref_string = " pulled in by " + ref_string
8595 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8597 if "world" in problems_sets:
8598 msg.append("This problem can be solved in one of the following ways:\n\n")
8599 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8600 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8601 msg.append(" C) Remove offending entries from package.provided.\n\n")
8602 msg.append("The best course of action depends on the reason that an offending\n")
8603 msg.append("package.provided entry exists.\n\n")
8604 sys.stderr.write("".join(msg))
8606 masked_packages = []
8607 for pkg in self._masked_installed:
8608 root_config = pkg.root_config
8609 pkgsettings = self.pkgsettings[pkg.root]
8610 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8611 masked_packages.append((root_config, pkgsettings,
8612 pkg.cpv, pkg.metadata, mreasons))
8614 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8615 " The following installed packages are masked:\n")
8616 show_masked_packages(masked_packages)
8620 def calc_changelog(self,ebuildpath,current,next):
8621 if ebuildpath == None or not os.path.exists(ebuildpath):
8623 current = '-'.join(portage.catpkgsplit(current)[1:])
8624 if current.endswith('-r0'):
8625 current = current[:-3]
8626 next = '-'.join(portage.catpkgsplit(next)[1:])
8627 if next.endswith('-r0'):
8629 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8631 changelog = open(changelogpath).read()
8632 except SystemExit, e:
8633 raise # Needed else can't exit
8636 divisions = self.find_changelog_tags(changelog)
8637 #print 'XX from',current,'to',next
8638 #for div,text in divisions: print 'XX',div
8639 # skip entries for all revisions above the one we are about to emerge
8640 for i in range(len(divisions)):
8641 if divisions[i][0]==next:
8642 divisions = divisions[i:]
8644 # find out how many entries we are going to display
8645 for i in range(len(divisions)):
8646 if divisions[i][0]==current:
8647 divisions = divisions[:i]
8650 # couldnt find the current revision in the list. display nothing
8654 def find_changelog_tags(self,changelog):
8658 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8660 if release is not None:
8661 divs.append((release,changelog))
8663 if release is not None:
8664 divs.append((release,changelog[:match.start()]))
8665 changelog = changelog[match.end():]
8666 release = match.group(1)
8667 if release.endswith('.ebuild'):
8668 release = release[:-7]
8669 if release.endswith('-r0'):
8670 release = release[:-3]
8672 def saveNomergeFavorites(self):
8673 """Find atoms in favorites that are not in the mergelist and add them
8674 to the world file if necessary."""
8675 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8676 "--oneshot", "--onlydeps", "--pretend"):
8677 if x in self.myopts:
8679 root_config = self.roots[self.target_root]
8680 world_set = root_config.sets["world"]
8682 world_locked = False
8683 if hasattr(world_set, "lock"):
8687 if hasattr(world_set, "load"):
8688 world_set.load() # maybe it's changed on disk
8690 args_set = self._sets["args"]
8691 portdb = self.trees[self.target_root]["porttree"].dbapi
8692 added_favorites = set()
8693 for x in self._set_nodes:
8694 pkg_type, root, pkg_key, pkg_status = x
8695 if pkg_status != "nomerge":
8699 myfavkey = create_world_atom(x, args_set, root_config)
8701 if myfavkey in added_favorites:
8703 added_favorites.add(myfavkey)
8704 except portage.exception.InvalidDependString, e:
8705 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8706 (pkg_key, str(e)), noiselevel=-1)
8707 writemsg("!!! see '%s'\n\n" % os.path.join(
8708 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8711 for k in self._sets:
8712 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8717 all_added.append(SETPREFIX + k)
8718 all_added.extend(added_favorites)
8721 print ">>> Recording %s in \"world\" favorites file..." % \
8722 colorize("INFORM", str(a))
8724 world_set.update(all_added)
8729 def loadResumeCommand(self, resume_data, skip_masked=False):
8731 Add a resume command to the graph and validate it in the process. This
8732 will raise a PackageNotFound exception if a package is not available.
8735 if not isinstance(resume_data, dict):
8738 mergelist = resume_data.get("mergelist")
8739 if not isinstance(mergelist, list):
8742 fakedb = self.mydbapi
8744 serialized_tasks = []
8747 if not (isinstance(x, list) and len(x) == 4):
8749 pkg_type, myroot, pkg_key, action = x
8750 if pkg_type not in self.pkg_tree_map:
8752 if action != "merge":
8754 tree_type = self.pkg_tree_map[pkg_type]
8755 mydb = trees[myroot][tree_type].dbapi
8756 db_keys = list(self._trees_orig[myroot][
8757 tree_type].dbapi._aux_cache_keys)
8759 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8761 # It does no exist or it is corrupt.
8762 if action == "uninstall":
8764 raise portage.exception.PackageNotFound(pkg_key)
8765 installed = action == "uninstall"
8766 built = pkg_type != "ebuild"
8767 root_config = self.roots[myroot]
8768 pkg = Package(built=built, cpv=pkg_key,
8769 installed=installed, metadata=metadata,
8770 operation=action, root_config=root_config,
8772 if pkg_type == "ebuild":
8773 pkgsettings = self.pkgsettings[myroot]
8774 pkgsettings.setcpv(pkg)
8775 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8776 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8777 self._pkg_cache[pkg] = pkg
8779 root_config = self.roots[pkg.root]
8780 if "merge" == pkg.operation and \
8781 not visible(root_config.settings, pkg):
8783 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8785 self._unsatisfied_deps_for_display.append(
8786 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8788 fakedb[myroot].cpv_inject(pkg)
8789 serialized_tasks.append(pkg)
8790 self.spinner.update()
8792 if self._unsatisfied_deps_for_display:
8795 if not serialized_tasks or "--nodeps" in self.myopts:
8796 self._serialized_tasks_cache = serialized_tasks
8797 self._scheduler_graph = self.digraph
8799 self._select_package = self._select_pkg_from_graph
8800 self.myparams.add("selective")
8801 # Always traverse deep dependencies in order to account for
8802 # potentially unsatisfied dependencies of installed packages.
8803 # This is necessary for correct --keep-going or --resume operation
8804 # in case a package from a group of circularly dependent packages
8805 # fails. In this case, a package which has recently been installed
8806 # may have an unsatisfied circular dependency (pulled in by
8807 # PDEPEND, for example). So, even though a package is already
8808 # installed, it may not have all of it's dependencies satisfied, so
8809 # it may not be usable. If such a package is in the subgraph of
8810 # deep depenedencies of a scheduled build, that build needs to
8811 # be cancelled. In order for this type of situation to be
8812 # recognized, deep traversal of dependencies is required.
8813 self.myparams.add("deep")
8815 favorites = resume_data.get("favorites")
8816 args_set = self._sets["args"]
8817 if isinstance(favorites, list):
8818 args = self._load_favorites(favorites)
8822 for task in serialized_tasks:
8823 if isinstance(task, Package) and \
8824 task.operation == "merge":
8825 if not self._add_pkg(task, None):
8828 # Packages for argument atoms need to be explicitly
8829 # added via _add_pkg() so that they are included in the
8830 # digraph (needed at least for --tree display).
8832 for atom in arg.set:
8833 pkg, existing_node = self._select_package(
8834 arg.root_config.root, atom)
8835 if existing_node is None and \
8837 if not self._add_pkg(pkg, Dependency(atom=atom,
8838 root=pkg.root, parent=arg)):
8841 # Allow unsatisfied deps here to avoid showing a masking
8842 # message for an unsatisfied dep that isn't necessarily
8844 if not self._create_graph(allow_unsatisfied=True):
8847 unsatisfied_deps = []
8848 for dep in self._unsatisfied_deps:
8849 if not isinstance(dep.parent, Package):
8851 if dep.parent.operation == "merge":
8852 unsatisfied_deps.append(dep)
8855 # For unsatisfied deps of installed packages, only account for
8856 # them if they are in the subgraph of dependencies of a package
8857 # which is scheduled to be installed.
8858 unsatisfied_install = False
8860 dep_stack = self.digraph.parent_nodes(dep.parent)
8862 node = dep_stack.pop()
8863 if not isinstance(node, Package):
8865 if node.operation == "merge":
8866 unsatisfied_install = True
8868 if node in traversed:
8871 dep_stack.extend(self.digraph.parent_nodes(node))
8873 if unsatisfied_install:
8874 unsatisfied_deps.append(dep)
8876 if masked_tasks or unsatisfied_deps:
8877 # This probably means that a required package
8878 # was dropped via --skipfirst. It makes the
8879 # resume list invalid, so convert it to a
8880 # UnsatisfiedResumeDep exception.
8881 raise self.UnsatisfiedResumeDep(self,
8882 masked_tasks + unsatisfied_deps)
8883 self._serialized_tasks_cache = None
8886 except self._unknown_internal_error:
8891 def _load_favorites(self, favorites):
8893 Use a list of favorites to resume state from a
8894 previous select_files() call. This creates similar
8895 DependencyArg instances to those that would have
8896 been created by the original select_files() call.
8897 This allows Package instances to be matched with
8898 DependencyArg instances during graph creation.
8900 root_config = self.roots[self.target_root]
8901 getSetAtoms = root_config.setconfig.getSetAtoms
8902 sets = root_config.sets
8905 if not isinstance(x, basestring):
8907 if x in ("system", "world"):
8909 if x.startswith(SETPREFIX):
8910 s = x[len(SETPREFIX):]
8915 # Recursively expand sets so that containment tests in
8916 # self._get_parent_sets() properly match atoms in nested
8917 # sets (like if world contains system).
8918 expanded_set = InternalPackageSet(
8919 initial_atoms=getSetAtoms(s))
8920 self._sets[s] = expanded_set
8921 args.append(SetArg(arg=x, set=expanded_set,
8922 root_config=root_config))
8924 if not portage.isvalidatom(x):
8926 args.append(AtomArg(arg=x, atom=x,
8927 root_config=root_config))
8929 self._set_args(args)
8932 class UnsatisfiedResumeDep(portage.exception.PortageException):
8934 A dependency of a resume list is not installed. This
8935 can occur when a required package is dropped from the
8936 merge list via --skipfirst.
8938 def __init__(self, depgraph, value):
8939 portage.exception.PortageException.__init__(self, value)
8940 self.depgraph = depgraph
8942 class _internal_exception(portage.exception.PortageException):
8943 def __init__(self, value=""):
8944 portage.exception.PortageException.__init__(self, value)
8946 class _unknown_internal_error(_internal_exception):
8948 Used by the depgraph internally to terminate graph creation.
8949 The specific reason for the failure should have been dumped
8950 to stderr, unfortunately, the exact reason for the failure
8954 class _serialize_tasks_retry(_internal_exception):
8956 This is raised by the _serialize_tasks() method when it needs to
8957 be called again for some reason. The only case that it's currently
8958 used for is when neglected dependencies need to be added to the
8959 graph in order to avoid making a potentially unsafe decision.
8962 class _dep_check_composite_db(portage.dbapi):
8964 A dbapi-like interface that is optimized for use in dep_check() calls.
8965 This is built on top of the existing depgraph package selection logic.
8966 Some packages that have been added to the graph may be masked from this
8967 view in order to influence the atom preference selection that occurs
8970 def __init__(self, depgraph, root):
8971 portage.dbapi.__init__(self)
8972 self._depgraph = depgraph
8974 self._match_cache = {}
8975 self._cpv_pkg_map = {}
8977 def _clear_cache(self):
8978 self._match_cache.clear()
8979 self._cpv_pkg_map.clear()
8981 def match(self, atom):
8982 ret = self._match_cache.get(atom)
8987 atom = self._dep_expand(atom)
8988 pkg, existing = self._depgraph._select_package(self._root, atom)
8992 # Return the highest available from select_package() as well as
8993 # any matching slots in the graph db.
8995 slots.add(pkg.metadata["SLOT"])
8996 atom_cp = portage.dep_getkey(atom)
8997 if pkg.cp.startswith("virtual/"):
8998 # For new-style virtual lookahead that occurs inside
8999 # dep_check(), examine all slots. This is needed
9000 # so that newer slots will not unnecessarily be pulled in
9001 # when a satisfying lower slot is already installed. For
9002 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9003 # there's no need to pull in a newer slot to satisfy a
9004 # virtual/jdk dependency.
9005 for db, pkg_type, built, installed, db_keys in \
9006 self._depgraph._filtered_trees[self._root]["dbs"]:
9007 for cpv in db.match(atom):
9008 if portage.cpv_getkey(cpv) != pkg.cp:
9010 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9012 if self._visible(pkg):
9013 self._cpv_pkg_map[pkg.cpv] = pkg
9015 slots.remove(pkg.metadata["SLOT"])
9017 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9018 pkg, existing = self._depgraph._select_package(
9019 self._root, slot_atom)
9022 if not self._visible(pkg):
9024 self._cpv_pkg_map[pkg.cpv] = pkg
9027 self._cpv_sort_ascending(ret)
9028 self._match_cache[orig_atom] = ret
9031 def _visible(self, pkg):
9032 if pkg.installed and "selective" not in self._depgraph.myparams:
9034 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9035 except (StopIteration, portage.exception.InvalidDependString):
9042 self._depgraph.pkgsettings[pkg.root], pkg):
9044 except portage.exception.InvalidDependString:
9046 in_graph = self._depgraph._slot_pkg_map[
9047 self._root].get(pkg.slot_atom)
9048 if in_graph is None:
9049 # Mask choices for packages which are not the highest visible
9050 # version within their slot (since they usually trigger slot
9052 highest_visible, in_graph = self._depgraph._select_package(
9053 self._root, pkg.slot_atom)
9054 if pkg != highest_visible:
9056 elif in_graph != pkg:
9057 # Mask choices for packages that would trigger a slot
9058 # conflict with a previously selected package.
9062 def _dep_expand(self, atom):
9064 This is only needed for old installed packages that may
9065 contain atoms that are not fully qualified with a specific
9066 category. Emulate the cpv_expand() function that's used by
9067 dbapi.match() in cases like this. If there are multiple
9068 matches, it's often due to a new-style virtual that has
9069 been added, so try to filter those out to avoid raising
9072 root_config = self._depgraph.roots[self._root]
9074 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9075 if len(expanded_atoms) > 1:
9076 non_virtual_atoms = []
9077 for x in expanded_atoms:
9078 if not portage.dep_getkey(x).startswith("virtual/"):
9079 non_virtual_atoms.append(x)
9080 if len(non_virtual_atoms) == 1:
9081 expanded_atoms = non_virtual_atoms
9082 if len(expanded_atoms) > 1:
9083 # compatible with portage.cpv_expand()
9084 raise portage.exception.AmbiguousPackageName(
9085 [portage.dep_getkey(x) for x in expanded_atoms])
9087 atom = expanded_atoms[0]
9089 null_atom = insert_category_into_atom(atom, "null")
9090 null_cp = portage.dep_getkey(null_atom)
9091 cat, atom_pn = portage.catsplit(null_cp)
9092 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9094 # Allow the resolver to choose which virtual.
9095 atom = insert_category_into_atom(atom, "virtual")
9097 atom = insert_category_into_atom(atom, "null")
9100 def aux_get(self, cpv, wants):
9101 metadata = self._cpv_pkg_map[cpv].metadata
9102 return [metadata.get(x, "") for x in wants]
9104 class RepoDisplay(object):
9105 def __init__(self, roots):
9106 self._shown_repos = {}
9107 self._unknown_repo = False
9109 for root_config in roots.itervalues():
9110 portdir = root_config.settings.get("PORTDIR")
9112 repo_paths.add(portdir)
9113 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9115 repo_paths.update(overlays.split())
9116 repo_paths = list(repo_paths)
9117 self._repo_paths = repo_paths
9118 self._repo_paths_real = [ os.path.realpath(repo_path) \
9119 for repo_path in repo_paths ]
9121 # pre-allocate index for PORTDIR so that it always has index 0.
9122 for root_config in roots.itervalues():
9123 portdb = root_config.trees["porttree"].dbapi
9124 portdir = portdb.porttree_root
9126 self.repoStr(portdir)
9128 def repoStr(self, repo_path_real):
9131 real_index = self._repo_paths_real.index(repo_path_real)
9132 if real_index == -1:
9134 self._unknown_repo = True
9136 shown_repos = self._shown_repos
9137 repo_paths = self._repo_paths
9138 repo_path = repo_paths[real_index]
9139 index = shown_repos.get(repo_path)
9141 index = len(shown_repos)
9142 shown_repos[repo_path] = index
9148 shown_repos = self._shown_repos
9149 unknown_repo = self._unknown_repo
9150 if shown_repos or self._unknown_repo:
9151 output.append("Portage tree and overlays:\n")
9152 show_repo_paths = list(shown_repos)
9153 for repo_path, repo_index in shown_repos.iteritems():
9154 show_repo_paths[repo_index] = repo_path
9156 for index, repo_path in enumerate(show_repo_paths):
9157 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9159 output.append(" "+teal("[?]") + \
9160 " indicates that the source repository could not be determined\n")
9161 return "".join(output)
9163 class PackageCounters(object):
9173 self.blocks_satisfied = 0
9175 self.restrict_fetch = 0
9176 self.restrict_fetch_satisfied = 0
9177 self.interactive = 0
9180 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9183 myoutput.append("Total: %s package" % total_installs)
9184 if total_installs != 1:
9185 myoutput.append("s")
9186 if total_installs != 0:
9187 myoutput.append(" (")
9188 if self.upgrades > 0:
9189 details.append("%s upgrade" % self.upgrades)
9190 if self.upgrades > 1:
9192 if self.downgrades > 0:
9193 details.append("%s downgrade" % self.downgrades)
9194 if self.downgrades > 1:
9197 details.append("%s new" % self.new)
9198 if self.newslot > 0:
9199 details.append("%s in new slot" % self.newslot)
9200 if self.newslot > 1:
9203 details.append("%s reinstall" % self.reinst)
9207 details.append("%s uninstall" % self.uninst)
9210 if self.interactive > 0:
9211 details.append("%s %s" % (self.interactive,
9212 colorize("WARN", "interactive")))
9213 myoutput.append(", ".join(details))
9214 if total_installs != 0:
9215 myoutput.append(")")
9216 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9217 if self.restrict_fetch:
9218 myoutput.append("\nFetch Restriction: %s package" % \
9219 self.restrict_fetch)
9220 if self.restrict_fetch > 1:
9221 myoutput.append("s")
9222 if self.restrict_fetch_satisfied < self.restrict_fetch:
9223 myoutput.append(bad(" (%s unsatisfied)") % \
9224 (self.restrict_fetch - self.restrict_fetch_satisfied))
9226 myoutput.append("\nConflict: %s block" % \
9229 myoutput.append("s")
9230 if self.blocks_satisfied < self.blocks:
9231 myoutput.append(bad(" (%s unsatisfied)") % \
9232 (self.blocks - self.blocks_satisfied))
9233 return "".join(myoutput)
9235 class UseFlagDisplay(object):
9237 __slots__ = ('name', 'enabled', 'forced')
9239 def __init__(self, name, enabled, forced):
9241 self.enabled = enabled
9242 self.forced = forced
9256 def cmp_combined(cls, a, b):
9258 Sort by name, combining enabled and disabled flags.
9260 return (a.name > b.name) - (a.name < b.name)
9263 def cmp_separated(cls, a, b):
9265 Sort by name, separating enabled flags from disabled flags.
9267 enabled_diff = b.enabled - a.enabled
9270 return (a.name > b.name) - (a.name < b.name)
9272 class PollSelectAdapter(PollConstants):
9275 Use select to emulate a poll object, for
9276 systems that don't support poll().
9280 self._registered = {}
9281 self._select_args = [[], [], []]
9283 def register(self, fd, *args):
9285 Only POLLIN is currently supported!
9289 "register expected at most 2 arguments, got " + \
9290 repr(1 + len(args)))
9292 eventmask = PollConstants.POLLIN | \
9293 PollConstants.POLLPRI | PollConstants.POLLOUT
9297 self._registered[fd] = eventmask
9298 self._select_args = None
9300 def unregister(self, fd):
9301 self._select_args = None
9302 del self._registered[fd]
9304 def poll(self, *args):
9307 "poll expected at most 2 arguments, got " + \
9308 repr(1 + len(args)))
9314 select_args = self._select_args
9315 if select_args is None:
9316 select_args = [self._registered.keys(), [], []]
9318 if timeout is not None:
9319 select_args = select_args[:]
9320 # Translate poll() timeout args to select() timeout args:
9322 # | units | value(s) for indefinite block
9323 # ---------|--------------|------------------------------
9324 # poll | milliseconds | omitted, negative, or None
9325 # ---------|--------------|------------------------------
9326 # select | seconds | omitted
9327 # ---------|--------------|------------------------------
9329 if timeout is not None and timeout < 0:
9331 if timeout is not None:
9332 select_args.append(timeout / 1000)
9334 select_events = select.select(*select_args)
9336 for fd in select_events[0]:
9337 poll_events.append((fd, PollConstants.POLLIN))
9340 class SequentialTaskQueue(SlotObject):
9342 __slots__ = ("max_jobs", "running_tasks") + \
9343 ("_dirty", "_scheduling", "_task_queue")
9345 def __init__(self, **kwargs):
9346 SlotObject.__init__(self, **kwargs)
9347 self._task_queue = deque()
9348 self.running_tasks = set()
9349 if self.max_jobs is None:
9353 def add(self, task):
9354 self._task_queue.append(task)
9357 def addFront(self, task):
9358 self._task_queue.appendleft(task)
9369 if self._scheduling:
9370 # Ignore any recursive schedule() calls triggered via
9371 # self._task_exit().
9374 self._scheduling = True
9376 task_queue = self._task_queue
9377 running_tasks = self.running_tasks
9378 max_jobs = self.max_jobs
9379 state_changed = False
9381 while task_queue and \
9382 (max_jobs is True or len(running_tasks) < max_jobs):
9383 task = task_queue.popleft()
9384 cancelled = getattr(task, "cancelled", None)
9386 running_tasks.add(task)
9387 task.addExitListener(self._task_exit)
9389 state_changed = True
9392 self._scheduling = False
9394 return state_changed
9396 def _task_exit(self, task):
9398 Since we can always rely on exit listeners being called, the set of
9399 running tasks is always pruned automatically and there is never any need
9400 to actively prune it.
9402 self.running_tasks.remove(task)
9403 if self._task_queue:
9407 self._task_queue.clear()
9408 running_tasks = self.running_tasks
9409 while running_tasks:
9410 task = running_tasks.pop()
9411 task.removeExitListener(self._task_exit)
9415 def __nonzero__(self):
9416 return bool(self._task_queue or self.running_tasks)
9419 return len(self._task_queue) + len(self.running_tasks)
9421 _can_poll_device = None
9423 def can_poll_device():
9425 Test if it's possible to use poll() on a device such as a pty. This
9426 is known to fail on Darwin.
9428 @returns: True if poll() on a device succeeds, False otherwise.
9431 global _can_poll_device
9432 if _can_poll_device is not None:
9433 return _can_poll_device
9435 if not hasattr(select, "poll"):
9436 _can_poll_device = False
9437 return _can_poll_device
9440 dev_null = open('/dev/null', 'rb')
9442 _can_poll_device = False
9443 return _can_poll_device
9446 p.register(dev_null.fileno(), PollConstants.POLLIN)
9448 invalid_request = False
9449 for f, event in p.poll():
9450 if event & PollConstants.POLLNVAL:
9451 invalid_request = True
9455 _can_poll_device = not invalid_request
9456 return _can_poll_device
9458 def create_poll_instance():
9460 Create an instance of select.poll, or an instance of
9461 PollSelectAdapter there is no poll() implementation or
9462 it is broken somehow.
9464 if can_poll_device():
9465 return select.poll()
9466 return PollSelectAdapter()
9468 getloadavg = getattr(os, "getloadavg", None)
9469 if getloadavg is None:
9472 Uses /proc/loadavg to emulate os.getloadavg().
9473 Raises OSError if the load average was unobtainable.
9476 loadavg_str = open('/proc/loadavg').readline()
9478 # getloadavg() is only supposed to raise OSError, so convert
9479 raise OSError('unknown')
9480 loadavg_split = loadavg_str.split()
9481 if len(loadavg_split) < 3:
9482 raise OSError('unknown')
9486 loadavg_floats.append(float(loadavg_split[i]))
9488 raise OSError('unknown')
9489 return tuple(loadavg_floats)
9491 class PollScheduler(object):
9493 class _sched_iface_class(SlotObject):
9494 __slots__ = ("register", "schedule", "unregister")
9498 self._max_load = None
9500 self._poll_event_queue = []
9501 self._poll_event_handlers = {}
9502 self._poll_event_handler_ids = {}
9503 # Increment id for each new handler.
9504 self._event_handler_id = 0
9505 self._poll_obj = create_poll_instance()
9506 self._scheduling = False
9508 def _schedule(self):
9510 Calls _schedule_tasks() and automatically returns early from
9511 any recursive calls to this method that the _schedule_tasks()
9512 call might trigger. This makes _schedule() safe to call from
9513 inside exit listeners.
9515 if self._scheduling:
9517 self._scheduling = True
9519 return self._schedule_tasks()
9521 self._scheduling = False
9523 def _running_job_count(self):
9526 def _can_add_job(self):
9527 max_jobs = self._max_jobs
9528 max_load = self._max_load
9530 if self._max_jobs is not True and \
9531 self._running_job_count() >= self._max_jobs:
9534 if max_load is not None and \
9535 (max_jobs is True or max_jobs > 1) and \
9536 self._running_job_count() >= 1:
9538 avg1, avg5, avg15 = getloadavg()
9542 if avg1 >= max_load:
9547 def _poll(self, timeout=None):
9549 All poll() calls pass through here. The poll events
9550 are added directly to self._poll_event_queue.
9551 In order to avoid endless blocking, this raises
9552 StopIteration if timeout is None and there are
9553 no file descriptors to poll.
9555 if not self._poll_event_handlers:
9557 if timeout is None and \
9558 not self._poll_event_handlers:
9559 raise StopIteration(
9560 "timeout is None and there are no poll() event handlers")
9562 # The following error is known to occur with Linux kernel versions
9565 # select.error: (4, 'Interrupted system call')
9567 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9568 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9569 # without any events.
9572 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9574 except select.error, e:
9575 writemsg_level("\n!!! select error: %s\n" % (e,),
9576 level=logging.ERROR, noiselevel=-1)
9578 if timeout is not None:
9581 def _next_poll_event(self, timeout=None):
9583 Since the _schedule_wait() loop is called by event
9584 handlers from _poll_loop(), maintain a central event
9585 queue for both of them to share events from a single
9586 poll() call. In order to avoid endless blocking, this
9587 raises StopIteration if timeout is None and there are
9588 no file descriptors to poll.
9590 if not self._poll_event_queue:
9592 return self._poll_event_queue.pop()
9594 def _poll_loop(self):
9596 event_handlers = self._poll_event_handlers
9597 event_handled = False
9600 while event_handlers:
9601 f, event = self._next_poll_event()
9602 handler, reg_id = event_handlers[f]
9604 event_handled = True
9605 except StopIteration:
9606 event_handled = True
9608 if not event_handled:
9609 raise AssertionError("tight loop")
9611 def _schedule_yield(self):
9613 Schedule for a short period of time chosen by the scheduler based
9614 on internal state. Synchronous tasks should call this periodically
9615 in order to allow the scheduler to service pending poll events. The
9616 scheduler will call poll() exactly once, without blocking, and any
9617 resulting poll events will be serviced.
9619 event_handlers = self._poll_event_handlers
9622 if not event_handlers:
9623 return bool(events_handled)
9625 if not self._poll_event_queue:
9629 while event_handlers and self._poll_event_queue:
9630 f, event = self._next_poll_event()
9631 handler, reg_id = event_handlers[f]
9634 except StopIteration:
9637 return bool(events_handled)
9639 def _register(self, f, eventmask, handler):
9642 @return: A unique registration id, for use in schedule() or
9645 if f in self._poll_event_handlers:
9646 raise AssertionError("fd %d is already registered" % f)
9647 self._event_handler_id += 1
9648 reg_id = self._event_handler_id
9649 self._poll_event_handler_ids[reg_id] = f
9650 self._poll_event_handlers[f] = (handler, reg_id)
9651 self._poll_obj.register(f, eventmask)
9654 def _unregister(self, reg_id):
9655 f = self._poll_event_handler_ids[reg_id]
9656 self._poll_obj.unregister(f)
9657 del self._poll_event_handlers[f]
9658 del self._poll_event_handler_ids[reg_id]
9660 def _schedule_wait(self, wait_ids):
9662 Schedule until wait_id is not longer registered
9665 @param wait_id: a task id to wait for
9667 event_handlers = self._poll_event_handlers
9668 handler_ids = self._poll_event_handler_ids
9669 event_handled = False
9671 if isinstance(wait_ids, int):
9672 wait_ids = frozenset([wait_ids])
9675 while wait_ids.intersection(handler_ids):
9676 f, event = self._next_poll_event()
9677 handler, reg_id = event_handlers[f]
9679 event_handled = True
9680 except StopIteration:
9681 event_handled = True
9683 return event_handled
9685 class QueueScheduler(PollScheduler):
9688 Add instances of SequentialTaskQueue and then call run(). The
9689 run() method returns when no tasks remain.
9692 def __init__(self, max_jobs=None, max_load=None):
9693 PollScheduler.__init__(self)
9695 if max_jobs is None:
9698 self._max_jobs = max_jobs
9699 self._max_load = max_load
9700 self.sched_iface = self._sched_iface_class(
9701 register=self._register,
9702 schedule=self._schedule_wait,
9703 unregister=self._unregister)
9706 self._schedule_listeners = []
9709 self._queues.append(q)
9711 def remove(self, q):
9712 self._queues.remove(q)
9716 while self._schedule():
9719 while self._running_job_count():
9722 def _schedule_tasks(self):
9725 @returns: True if there may be remaining tasks to schedule,
9728 while self._can_add_job():
9729 n = self._max_jobs - self._running_job_count()
9733 if not self._start_next_job(n):
9736 for q in self._queues:
9741 def _running_job_count(self):
9743 for q in self._queues:
9744 job_count += len(q.running_tasks)
9745 self._jobs = job_count
9748 def _start_next_job(self, n=1):
9750 for q in self._queues:
9751 initial_job_count = len(q.running_tasks)
9753 final_job_count = len(q.running_tasks)
9754 if final_job_count > initial_job_count:
9755 started_count += (final_job_count - initial_job_count)
9756 if started_count >= n:
9758 return started_count
9760 class TaskScheduler(object):
9763 A simple way to handle scheduling of AsynchrousTask instances. Simply
9764 add tasks and call run(). The run() method returns when no tasks remain.
9767 def __init__(self, max_jobs=None, max_load=None):
9768 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9769 self._scheduler = QueueScheduler(
9770 max_jobs=max_jobs, max_load=max_load)
9771 self.sched_iface = self._scheduler.sched_iface
9772 self.run = self._scheduler.run
9773 self._scheduler.add(self._queue)
9775 def add(self, task):
9776 self._queue.add(task)
9778 class JobStatusDisplay(object):
9780 _bound_properties = ("curval", "failed", "running")
9781 _jobs_column_width = 48
9783 # Don't update the display unless at least this much
9784 # time has passed, in units of seconds.
9785 _min_display_latency = 2
9787 _default_term_codes = {
9793 _termcap_name_map = {
9794 'carriage_return' : 'cr',
9799 def __init__(self, out=sys.stdout, quiet=False):
9800 object.__setattr__(self, "out", out)
9801 object.__setattr__(self, "quiet", quiet)
9802 object.__setattr__(self, "maxval", 0)
9803 object.__setattr__(self, "merges", 0)
9804 object.__setattr__(self, "_changed", False)
9805 object.__setattr__(self, "_displayed", False)
9806 object.__setattr__(self, "_last_display_time", 0)
9807 object.__setattr__(self, "width", 80)
9810 isatty = hasattr(out, "isatty") and out.isatty()
9811 object.__setattr__(self, "_isatty", isatty)
9812 if not isatty or not self._init_term():
9814 for k, capname in self._termcap_name_map.iteritems():
9815 term_codes[k] = self._default_term_codes[capname]
9816 object.__setattr__(self, "_term_codes", term_codes)
9817 encoding = sys.getdefaultencoding()
9818 for k, v in self._term_codes.items():
9819 if not isinstance(v, basestring):
9820 self._term_codes[k] = v.decode(encoding, 'replace')
9822 def _init_term(self):
9824 Initialize term control codes.
9826 @returns: True if term codes were successfully initialized,
9830 term_type = os.environ.get("TERM", "vt100")
9836 curses.setupterm(term_type, self.out.fileno())
9837 tigetstr = curses.tigetstr
9838 except curses.error:
9843 if tigetstr is None:
9847 for k, capname in self._termcap_name_map.iteritems():
9848 code = tigetstr(capname)
9850 code = self._default_term_codes[capname]
9851 term_codes[k] = code
9852 object.__setattr__(self, "_term_codes", term_codes)
9855 def _format_msg(self, msg):
9856 return ">>> %s" % msg
9860 self._term_codes['carriage_return'] + \
9861 self._term_codes['clr_eol'])
9863 self._displayed = False
9865 def _display(self, line):
9866 self.out.write(line)
9868 self._displayed = True
9870 def _update(self, msg):
9873 if not self._isatty:
9874 out.write(self._format_msg(msg) + self._term_codes['newline'])
9876 self._displayed = True
9882 self._display(self._format_msg(msg))
9884 def displayMessage(self, msg):
9886 was_displayed = self._displayed
9888 if self._isatty and self._displayed:
9891 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9893 self._displayed = False
9896 self._changed = True
9902 for name in self._bound_properties:
9903 object.__setattr__(self, name, 0)
9906 self.out.write(self._term_codes['newline'])
9908 self._displayed = False
9910 def __setattr__(self, name, value):
9911 old_value = getattr(self, name)
9912 if value == old_value:
9914 object.__setattr__(self, name, value)
9915 if name in self._bound_properties:
9916 self._property_change(name, old_value, value)
9918 def _property_change(self, name, old_value, new_value):
9919 self._changed = True
9922 def _load_avg_str(self):
9937 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9941 Display status on stdout, but only if something has
9942 changed since the last call.
9948 current_time = time.time()
9949 time_delta = current_time - self._last_display_time
9950 if self._displayed and \
9952 if not self._isatty:
9954 if time_delta < self._min_display_latency:
9957 self._last_display_time = current_time
9958 self._changed = False
9959 self._display_status()
9961 def _display_status(self):
9962 # Don't use len(self._completed_tasks) here since that also
9963 # can include uninstall tasks.
9964 curval_str = str(self.curval)
9965 maxval_str = str(self.maxval)
9966 running_str = str(self.running)
9967 failed_str = str(self.failed)
9968 load_avg_str = self._load_avg_str()
9970 color_output = StringIO()
9971 plain_output = StringIO()
9972 style_file = portage.output.ConsoleStyleFile(color_output)
9973 style_file.write_listener = plain_output
9974 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9975 style_writer.style_listener = style_file.new_styles
9976 f = formatter.AbstractFormatter(style_writer)
9978 number_style = "INFORM"
9979 f.add_literal_data("Jobs: ")
9980 f.push_style(number_style)
9981 f.add_literal_data(curval_str)
9983 f.add_literal_data(" of ")
9984 f.push_style(number_style)
9985 f.add_literal_data(maxval_str)
9987 f.add_literal_data(" complete")
9990 f.add_literal_data(", ")
9991 f.push_style(number_style)
9992 f.add_literal_data(running_str)
9994 f.add_literal_data(" running")
9997 f.add_literal_data(", ")
9998 f.push_style(number_style)
9999 f.add_literal_data(failed_str)
10001 f.add_literal_data(" failed")
10003 padding = self._jobs_column_width - len(plain_output.getvalue())
10005 f.add_literal_data(padding * " ")
10007 f.add_literal_data("Load avg: ")
10008 f.add_literal_data(load_avg_str)
10010 # Truncate to fit width, to avoid making the terminal scroll if the
10011 # line overflows (happens when the load average is large).
10012 plain_output = plain_output.getvalue()
10013 if self._isatty and len(plain_output) > self.width:
10014 # Use plain_output here since it's easier to truncate
10015 # properly than the color output which contains console
10017 self._update(plain_output[:self.width])
10019 self._update(color_output.getvalue())
10021 xtermTitle(" ".join(plain_output.split()))
10023 class Scheduler(PollScheduler):
10025 _opts_ignore_blockers = \
10026 frozenset(["--buildpkgonly",
10027 "--fetchonly", "--fetch-all-uri",
10028 "--nodeps", "--pretend"])
10030 _opts_no_background = \
10031 frozenset(["--pretend",
10032 "--fetchonly", "--fetch-all-uri"])
10034 _opts_no_restart = frozenset(["--buildpkgonly",
10035 "--fetchonly", "--fetch-all-uri", "--pretend"])
10037 _bad_resume_opts = set(["--ask", "--changelog",
10038 "--resume", "--skipfirst"])
10040 _fetch_log = "/var/log/emerge-fetch.log"
10042 class _iface_class(SlotObject):
10043 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10044 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10045 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10048 class _fetch_iface_class(SlotObject):
10049 __slots__ = ("log_file", "schedule")
10051 _task_queues_class = slot_dict_class(
10052 ("merge", "jobs", "fetch", "unpack"), prefix="")
10054 class _build_opts_class(SlotObject):
10055 __slots__ = ("buildpkg", "buildpkgonly",
10056 "fetch_all_uri", "fetchonly", "pretend")
10058 class _binpkg_opts_class(SlotObject):
10059 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10061 class _pkg_count_class(SlotObject):
10062 __slots__ = ("curval", "maxval")
10064 class _emerge_log_class(SlotObject):
10065 __slots__ = ("xterm_titles",)
10067 def log(self, *pargs, **kwargs):
10068 if not self.xterm_titles:
10069 # Avoid interference with the scheduler's status display.
10070 kwargs.pop("short_msg", None)
10071 emergelog(self.xterm_titles, *pargs, **kwargs)
10073 class _failed_pkg(SlotObject):
10074 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10076 class _ConfigPool(object):
10077 """Interface for a task to temporarily allocate a config
10078 instance from a pool. This allows a task to be constructed
10079 long before the config instance actually becomes needed, like
10080 when prefetchers are constructed for the whole merge list."""
10081 __slots__ = ("_root", "_allocate", "_deallocate")
10082 def __init__(self, root, allocate, deallocate):
10084 self._allocate = allocate
10085 self._deallocate = deallocate
10086 def allocate(self):
10087 return self._allocate(self._root)
10088 def deallocate(self, settings):
10089 self._deallocate(settings)
10091 class _unknown_internal_error(portage.exception.PortageException):
10093 Used internally to terminate scheduling. The specific reason for
10094 the failure should have been dumped to stderr.
10096 def __init__(self, value=""):
10097 portage.exception.PortageException.__init__(self, value)
10099 def __init__(self, settings, trees, mtimedb, myopts,
10100 spinner, mergelist, favorites, digraph):
10101 PollScheduler.__init__(self)
10102 self.settings = settings
10103 self.target_root = settings["ROOT"]
10105 self.myopts = myopts
10106 self._spinner = spinner
10107 self._mtimedb = mtimedb
10108 self._mergelist = mergelist
10109 self._favorites = favorites
10110 self._args_set = InternalPackageSet(favorites)
10111 self._build_opts = self._build_opts_class()
10112 for k in self._build_opts.__slots__:
10113 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10114 self._binpkg_opts = self._binpkg_opts_class()
10115 for k in self._binpkg_opts.__slots__:
10116 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10119 self._logger = self._emerge_log_class()
10120 self._task_queues = self._task_queues_class()
10121 for k in self._task_queues.allowed_keys:
10122 setattr(self._task_queues, k,
10123 SequentialTaskQueue())
10125 # Holds merges that will wait to be executed when no builds are
10126 # executing. This is useful for system packages since dependencies
10127 # on system packages are frequently unspecified.
10128 self._merge_wait_queue = []
10129 # Holds merges that have been transfered from the merge_wait_queue to
10130 # the actual merge queue. They are removed from this list upon
10131 # completion. Other packages can start building only when this list is
10133 self._merge_wait_scheduled = []
10135 # Holds system packages and their deep runtime dependencies. Before
10136 # being merged, these packages go to merge_wait_queue, to be merged
10137 # when no other packages are building.
10138 self._deep_system_deps = set()
10140 # Holds packages to merge which will satisfy currently unsatisfied
10141 # deep runtime dependencies of system packages. If this is not empty
10142 # then no parallel builds will be spawned until it is empty. This
10143 # minimizes the possibility that a build will fail due to the system
10144 # being in a fragile state. For example, see bug #259954.
10145 self._unsatisfied_system_deps = set()
10147 self._status_display = JobStatusDisplay()
10148 self._max_load = myopts.get("--load-average")
10149 max_jobs = myopts.get("--jobs")
10150 if max_jobs is None:
10152 self._set_max_jobs(max_jobs)
10154 # The root where the currently running
10155 # portage instance is installed.
10156 self._running_root = trees["/"]["root_config"]
10158 if settings.get("PORTAGE_DEBUG", "") == "1":
10160 self.pkgsettings = {}
10161 self._config_pool = {}
10162 self._blocker_db = {}
10164 self._config_pool[root] = []
10165 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10167 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10168 schedule=self._schedule_fetch)
10169 self._sched_iface = self._iface_class(
10170 dblinkEbuildPhase=self._dblink_ebuild_phase,
10171 dblinkDisplayMerge=self._dblink_display_merge,
10172 dblinkElog=self._dblink_elog,
10173 dblinkEmergeLog=self._dblink_emerge_log,
10174 fetch=fetch_iface, register=self._register,
10175 schedule=self._schedule_wait,
10176 scheduleSetup=self._schedule_setup,
10177 scheduleUnpack=self._schedule_unpack,
10178 scheduleYield=self._schedule_yield,
10179 unregister=self._unregister)
10181 self._prefetchers = weakref.WeakValueDictionary()
10182 self._pkg_queue = []
10183 self._completed_tasks = set()
10185 self._failed_pkgs = []
10186 self._failed_pkgs_all = []
10187 self._failed_pkgs_die_msgs = []
10188 self._post_mod_echo_msgs = []
10189 self._parallel_fetch = False
10190 merge_count = len([x for x in mergelist \
10191 if isinstance(x, Package) and x.operation == "merge"])
10192 self._pkg_count = self._pkg_count_class(
10193 curval=0, maxval=merge_count)
10194 self._status_display.maxval = self._pkg_count.maxval
10196 # The load average takes some time to respond when new
10197 # jobs are added, so we need to limit the rate of adding
10199 self._job_delay_max = 10
10200 self._job_delay_factor = 1.0
10201 self._job_delay_exp = 1.5
10202 self._previous_job_start_time = None
10204 self._set_digraph(digraph)
10206 # This is used to memoize the _choose_pkg() result when
10207 # no packages can be chosen until one of the existing
10209 self._choose_pkg_return_early = False
10211 features = self.settings.features
10212 if "parallel-fetch" in features and \
10213 not ("--pretend" in self.myopts or \
10214 "--fetch-all-uri" in self.myopts or \
10215 "--fetchonly" in self.myopts):
10216 if "distlocks" not in features:
10217 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10218 portage.writemsg(red("!!!")+" parallel-fetching " + \
10219 "requires the distlocks feature enabled"+"\n",
10221 portage.writemsg(red("!!!")+" you have it disabled, " + \
10222 "thus parallel-fetching is being disabled"+"\n",
10224 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10225 elif len(mergelist) > 1:
10226 self._parallel_fetch = True
10228 if self._parallel_fetch:
10229 # clear out existing fetch log if it exists
10231 open(self._fetch_log, 'w')
10232 except EnvironmentError:
10235 self._running_portage = None
10236 portage_match = self._running_root.trees["vartree"].dbapi.match(
10237 portage.const.PORTAGE_PACKAGE_ATOM)
10239 cpv = portage_match.pop()
10240 self._running_portage = self._pkg(cpv, "installed",
10241 self._running_root, installed=True)
10243 def _poll(self, timeout=None):
10245 PollScheduler._poll(self, timeout=timeout)
10247 def _set_max_jobs(self, max_jobs):
10248 self._max_jobs = max_jobs
10249 self._task_queues.jobs.max_jobs = max_jobs
10251 def _background_mode(self):
10253 Check if background mode is enabled and adjust states as necessary.
10256 @returns: True if background mode is enabled, False otherwise.
10258 background = (self._max_jobs is True or \
10259 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10260 not bool(self._opts_no_background.intersection(self.myopts))
10263 interactive_tasks = self._get_interactive_tasks()
10264 if interactive_tasks:
10266 writemsg_level(">>> Sending package output to stdio due " + \
10267 "to interactive package(s):\n",
10268 level=logging.INFO, noiselevel=-1)
10270 for pkg in interactive_tasks:
10271 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10272 if pkg.root != "/":
10273 pkg_str += " for " + pkg.root
10274 msg.append(pkg_str)
10276 writemsg_level("".join("%s\n" % (l,) for l in msg),
10277 level=logging.INFO, noiselevel=-1)
10278 if self._max_jobs is True or self._max_jobs > 1:
10279 self._set_max_jobs(1)
10280 writemsg_level(">>> Setting --jobs=1 due " + \
10281 "to the above interactive package(s)\n",
10282 level=logging.INFO, noiselevel=-1)
10284 self._status_display.quiet = \
10285 not background or \
10286 ("--quiet" in self.myopts and \
10287 "--verbose" not in self.myopts)
10289 self._logger.xterm_titles = \
10290 "notitles" not in self.settings.features and \
10291 self._status_display.quiet
10295 def _get_interactive_tasks(self):
10296 from portage import flatten
10297 from portage.dep import use_reduce, paren_reduce
10298 interactive_tasks = []
10299 for task in self._mergelist:
10300 if not (isinstance(task, Package) and \
10301 task.operation == "merge"):
10304 properties = flatten(use_reduce(paren_reduce(
10305 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10306 except portage.exception.InvalidDependString, e:
10307 show_invalid_depstring_notice(task,
10308 task.metadata["PROPERTIES"], str(e))
10309 raise self._unknown_internal_error()
10310 if "interactive" in properties:
10311 interactive_tasks.append(task)
10312 return interactive_tasks
10314 def _set_digraph(self, digraph):
10315 if "--nodeps" in self.myopts or \
10316 (self._max_jobs is not True and self._max_jobs < 2):
10318 self._digraph = None
10321 self._digraph = digraph
10322 self._find_system_deps()
10323 self._prune_digraph()
10324 self._prevent_builddir_collisions()
10326 def _find_system_deps(self):
10328 Find system packages and their deep runtime dependencies. Before being
10329 merged, these packages go to merge_wait_queue, to be merged when no
10330 other packages are building.
10332 deep_system_deps = self._deep_system_deps
10333 deep_system_deps.clear()
10334 deep_system_deps.update(
10335 _find_deep_system_runtime_deps(self._digraph))
10336 deep_system_deps.difference_update([pkg for pkg in \
10337 deep_system_deps if pkg.operation != "merge"])
10339 def _prune_digraph(self):
10341 Prune any root nodes that are irrelevant.
10344 graph = self._digraph
10345 completed_tasks = self._completed_tasks
10346 removed_nodes = set()
10348 for node in graph.root_nodes():
10349 if not isinstance(node, Package) or \
10350 (node.installed and node.operation == "nomerge") or \
10352 node in completed_tasks:
10353 removed_nodes.add(node)
10355 graph.difference_update(removed_nodes)
10356 if not removed_nodes:
10358 removed_nodes.clear()
10360 def _prevent_builddir_collisions(self):
10362 When building stages, sometimes the same exact cpv needs to be merged
10363 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10364 in the builddir. Currently, normal file locks would be inappropriate
10365 for this purpose since emerge holds all of it's build dir locks from
10369 for pkg in self._mergelist:
10370 if not isinstance(pkg, Package):
10371 # a satisfied blocker
10375 if pkg.cpv not in cpv_map:
10376 cpv_map[pkg.cpv] = [pkg]
10378 for earlier_pkg in cpv_map[pkg.cpv]:
10379 self._digraph.add(earlier_pkg, pkg,
10380 priority=DepPriority(buildtime=True))
10381 cpv_map[pkg.cpv].append(pkg)
10383 class _pkg_failure(portage.exception.PortageException):
10385 An instance of this class is raised by unmerge() when
10386 an uninstallation fails.
10389 def __init__(self, *pargs):
10390 portage.exception.PortageException.__init__(self, pargs)
10392 self.status = pargs[0]
10394 def _schedule_fetch(self, fetcher):
10396 Schedule a fetcher on the fetch queue, in order to
10397 serialize access to the fetch log.
10399 self._task_queues.fetch.addFront(fetcher)
10401 def _schedule_setup(self, setup_phase):
10403 Schedule a setup phase on the merge queue, in order to
10404 serialize unsandboxed access to the live filesystem.
10406 self._task_queues.merge.addFront(setup_phase)
10409 def _schedule_unpack(self, unpack_phase):
10411 Schedule an unpack phase on the unpack queue, in order
10412 to serialize $DISTDIR access for live ebuilds.
10414 self._task_queues.unpack.add(unpack_phase)
10416 def _find_blockers(self, new_pkg):
10418 Returns a callable which should be called only when
10419 the vdb lock has been acquired.
10421 def get_blockers():
10422 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10423 return get_blockers
10425 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10426 if self._opts_ignore_blockers.intersection(self.myopts):
10429 # Call gc.collect() here to avoid heap overflow that
10430 # triggers 'Cannot allocate memory' errors (reported
10431 # with python-2.5).
10435 blocker_db = self._blocker_db[new_pkg.root]
10437 blocker_dblinks = []
10438 for blocking_pkg in blocker_db.findInstalledBlockers(
10439 new_pkg, acquire_lock=acquire_lock):
10440 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10442 if new_pkg.cpv == blocking_pkg.cpv:
10444 blocker_dblinks.append(portage.dblink(
10445 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10446 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10447 vartree=self.trees[blocking_pkg.root]["vartree"]))
10451 return blocker_dblinks
10453 def _dblink_pkg(self, pkg_dblink):
10454 cpv = pkg_dblink.mycpv
10455 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10456 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10457 installed = type_name == "installed"
10458 return self._pkg(cpv, type_name, root_config, installed=installed)
10460 def _append_to_log_path(self, log_path, msg):
10461 f = open(log_path, 'a')
10467 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10469 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10472 background = self._background
10474 if background and log_path is not None:
10475 log_file = open(log_path, 'a')
10480 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10482 if log_file is not None:
10485 def _dblink_emerge_log(self, msg):
10486 self._logger.log(msg)
10488 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10489 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10490 background = self._background
10492 if log_path is None:
10493 if not (background and level < logging.WARN):
10494 portage.util.writemsg_level(msg,
10495 level=level, noiselevel=noiselevel)
10498 portage.util.writemsg_level(msg,
10499 level=level, noiselevel=noiselevel)
10500 self._append_to_log_path(log_path, msg)
10502 def _dblink_ebuild_phase(self,
10503 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10505 Using this callback for merge phases allows the scheduler
10506 to run while these phases execute asynchronously, and allows
10507 the scheduler control output handling.
10510 scheduler = self._sched_iface
10511 settings = pkg_dblink.settings
10512 pkg = self._dblink_pkg(pkg_dblink)
10513 background = self._background
10514 log_path = settings.get("PORTAGE_LOG_FILE")
10516 ebuild_phase = EbuildPhase(background=background,
10517 pkg=pkg, phase=phase, scheduler=scheduler,
10518 settings=settings, tree=pkg_dblink.treetype)
10519 ebuild_phase.start()
10520 ebuild_phase.wait()
10522 return ebuild_phase.returncode
10524 def _generate_digests(self):
10526 Generate digests if necessary for --digests or FEATURES=digest.
10527 In order to avoid interference, this must done before parallel
10531 if '--fetchonly' in self.myopts:
10534 digest = '--digest' in self.myopts
10536 for pkgsettings in self.pkgsettings.itervalues():
10537 if 'digest' in pkgsettings.features:
10544 for x in self._mergelist:
10545 if not isinstance(x, Package) or \
10546 x.type_name != 'ebuild' or \
10547 x.operation != 'merge':
10549 pkgsettings = self.pkgsettings[x.root]
10550 if '--digest' not in self.myopts and \
10551 'digest' not in pkgsettings.features:
10553 portdb = x.root_config.trees['porttree'].dbapi
10554 ebuild_path = portdb.findname(x.cpv)
10555 if not ebuild_path:
10557 "!!! Could not locate ebuild for '%s'.\n" \
10558 % x.cpv, level=logging.ERROR, noiselevel=-1)
10560 pkgsettings['O'] = os.path.dirname(ebuild_path)
10561 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10563 "!!! Unable to generate manifest for '%s'.\n" \
10564 % x.cpv, level=logging.ERROR, noiselevel=-1)
10569 def _check_manifests(self):
10570 # Verify all the manifests now so that the user is notified of failure
10571 # as soon as possible.
10572 if "strict" not in self.settings.features or \
10573 "--fetchonly" in self.myopts or \
10574 "--fetch-all-uri" in self.myopts:
10577 shown_verifying_msg = False
10578 quiet_settings = {}
10579 for myroot, pkgsettings in self.pkgsettings.iteritems():
10580 quiet_config = portage.config(clone=pkgsettings)
10581 quiet_config["PORTAGE_QUIET"] = "1"
10582 quiet_config.backup_changes("PORTAGE_QUIET")
10583 quiet_settings[myroot] = quiet_config
10586 for x in self._mergelist:
10587 if not isinstance(x, Package) or \
10588 x.type_name != "ebuild":
10591 if not shown_verifying_msg:
10592 shown_verifying_msg = True
10593 self._status_msg("Verifying ebuild manifests")
10595 root_config = x.root_config
10596 portdb = root_config.trees["porttree"].dbapi
10597 quiet_config = quiet_settings[root_config.root]
10598 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10599 if not portage.digestcheck([], quiet_config, strict=True):
10604 def _add_prefetchers(self):
10606 if not self._parallel_fetch:
10609 if self._parallel_fetch:
10610 self._status_msg("Starting parallel fetch")
10612 prefetchers = self._prefetchers
10613 getbinpkg = "--getbinpkg" in self.myopts
10615 # In order to avoid "waiting for lock" messages
10616 # at the beginning, which annoy users, never
10617 # spawn a prefetcher for the first package.
10618 for pkg in self._mergelist[1:]:
10619 prefetcher = self._create_prefetcher(pkg)
10620 if prefetcher is not None:
10621 self._task_queues.fetch.add(prefetcher)
10622 prefetchers[pkg] = prefetcher
10624 def _create_prefetcher(self, pkg):
10626 @return: a prefetcher, or None if not applicable
10630 if not isinstance(pkg, Package):
10633 elif pkg.type_name == "ebuild":
10635 prefetcher = EbuildFetcher(background=True,
10636 config_pool=self._ConfigPool(pkg.root,
10637 self._allocate_config, self._deallocate_config),
10638 fetchonly=1, logfile=self._fetch_log,
10639 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10641 elif pkg.type_name == "binary" and \
10642 "--getbinpkg" in self.myopts and \
10643 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10645 prefetcher = BinpkgPrefetcher(background=True,
10646 pkg=pkg, scheduler=self._sched_iface)
10650 def _is_restart_scheduled(self):
10652 Check if the merge list contains a replacement
10653 for the current running instance, that will result
10654 in restart after merge.
10656 @returns: True if a restart is scheduled, False otherwise.
10658 if self._opts_no_restart.intersection(self.myopts):
10661 mergelist = self._mergelist
10663 for i, pkg in enumerate(mergelist):
10664 if self._is_restart_necessary(pkg) and \
10665 i != len(mergelist) - 1:
10670 def _is_restart_necessary(self, pkg):
10672 @return: True if merging the given package
10673 requires restart, False otherwise.
10676 # Figure out if we need a restart.
10677 if pkg.root == self._running_root.root and \
10678 portage.match_from_list(
10679 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10680 if self._running_portage:
10681 return pkg.cpv != self._running_portage.cpv
10685 def _restart_if_necessary(self, pkg):
10687 Use execv() to restart emerge. This happens
10688 if portage upgrades itself and there are
10689 remaining packages in the list.
10692 if self._opts_no_restart.intersection(self.myopts):
10695 if not self._is_restart_necessary(pkg):
10698 if pkg == self._mergelist[-1]:
10701 self._main_loop_cleanup()
10703 logger = self._logger
10704 pkg_count = self._pkg_count
10705 mtimedb = self._mtimedb
10706 bad_resume_opts = self._bad_resume_opts
10708 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10709 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10711 logger.log(" *** RESTARTING " + \
10712 "emerge via exec() after change of " + \
10713 "portage version.")
10715 mtimedb["resume"]["mergelist"].remove(list(pkg))
10717 portage.run_exitfuncs()
10718 mynewargv = [sys.argv[0], "--resume"]
10719 resume_opts = self.myopts.copy()
10720 # For automatic resume, we need to prevent
10721 # any of bad_resume_opts from leaking in
10722 # via EMERGE_DEFAULT_OPTS.
10723 resume_opts["--ignore-default-opts"] = True
10724 for myopt, myarg in resume_opts.iteritems():
10725 if myopt not in bad_resume_opts:
10727 mynewargv.append(myopt)
10729 mynewargv.append(myopt +"="+ str(myarg))
10730 # priority only needs to be adjusted on the first run
10731 os.environ["PORTAGE_NICENESS"] = "0"
10732 os.execv(mynewargv[0], mynewargv)
10736 if "--resume" in self.myopts:
10738 portage.writemsg_stdout(
10739 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10740 self._logger.log(" *** Resuming merge...")
10742 self._save_resume_list()
10745 self._background = self._background_mode()
10746 except self._unknown_internal_error:
10749 for root in self.trees:
10750 root_config = self.trees[root]["root_config"]
10752 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10753 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10754 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10755 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10756 if not tmpdir or not os.path.isdir(tmpdir):
10757 msg = "The directory specified in your " + \
10758 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10759 "does not exist. Please create this " + \
10760 "directory or correct your PORTAGE_TMPDIR setting."
10761 msg = textwrap.wrap(msg, 70)
10762 out = portage.output.EOutput()
10767 if self._background:
10768 root_config.settings.unlock()
10769 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10770 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10771 root_config.settings.lock()
10773 self.pkgsettings[root] = portage.config(
10774 clone=root_config.settings)
10776 rval = self._generate_digests()
10777 if rval != os.EX_OK:
10780 rval = self._check_manifests()
10781 if rval != os.EX_OK:
10784 keep_going = "--keep-going" in self.myopts
10785 fetchonly = self._build_opts.fetchonly
10786 mtimedb = self._mtimedb
10787 failed_pkgs = self._failed_pkgs
10790 rval = self._merge()
10791 if rval == os.EX_OK or fetchonly or not keep_going:
10793 if "resume" not in mtimedb:
10795 mergelist = self._mtimedb["resume"].get("mergelist")
10799 if not failed_pkgs:
10802 for failed_pkg in failed_pkgs:
10803 mergelist.remove(list(failed_pkg.pkg))
10805 self._failed_pkgs_all.extend(failed_pkgs)
10811 if not self._calc_resume_list():
10814 clear_caches(self.trees)
10815 if not self._mergelist:
10818 self._save_resume_list()
10819 self._pkg_count.curval = 0
10820 self._pkg_count.maxval = len([x for x in self._mergelist \
10821 if isinstance(x, Package) and x.operation == "merge"])
10822 self._status_display.maxval = self._pkg_count.maxval
10824 self._logger.log(" *** Finished. Cleaning up...")
10827 self._failed_pkgs_all.extend(failed_pkgs)
10830 background = self._background
10831 failure_log_shown = False
10832 if background and len(self._failed_pkgs_all) == 1:
10833 # If only one package failed then just show it's
10834 # whole log for easy viewing.
10835 failed_pkg = self._failed_pkgs_all[-1]
10836 build_dir = failed_pkg.build_dir
10839 log_paths = [failed_pkg.build_log]
10841 log_path = self._locate_failure_log(failed_pkg)
10842 if log_path is not None:
10844 log_file = open(log_path)
10848 if log_file is not None:
10850 for line in log_file:
10851 writemsg_level(line, noiselevel=-1)
10854 failure_log_shown = True
10856 # Dump mod_echo output now since it tends to flood the terminal.
10857 # This allows us to avoid having more important output, generated
10858 # later, from being swept away by the mod_echo output.
10859 mod_echo_output = _flush_elog_mod_echo()
10861 if background and not failure_log_shown and \
10862 self._failed_pkgs_all and \
10863 self._failed_pkgs_die_msgs and \
10864 not mod_echo_output:
10866 printer = portage.output.EOutput()
10867 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10869 if mysettings["ROOT"] != "/":
10870 root_msg = " merged to %s" % mysettings["ROOT"]
10872 printer.einfo("Error messages for package %s%s:" % \
10873 (colorize("INFORM", key), root_msg))
10875 for phase in portage.const.EBUILD_PHASES:
10876 if phase not in logentries:
10878 for msgtype, msgcontent in logentries[phase]:
10879 if isinstance(msgcontent, basestring):
10880 msgcontent = [msgcontent]
10881 for line in msgcontent:
10882 printer.eerror(line.strip("\n"))
10884 if self._post_mod_echo_msgs:
10885 for msg in self._post_mod_echo_msgs:
10888 if len(self._failed_pkgs_all) > 1 or \
10889 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10890 if len(self._failed_pkgs_all) > 1:
10891 msg = "The following %d packages have " % \
10892 len(self._failed_pkgs_all) + \
10893 "failed to build or install:"
10895 msg = "The following package has " + \
10896 "failed to build or install:"
10897 prefix = bad(" * ")
10898 writemsg(prefix + "\n", noiselevel=-1)
10899 from textwrap import wrap
10900 for line in wrap(msg, 72):
10901 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10902 writemsg(prefix + "\n", noiselevel=-1)
10903 for failed_pkg in self._failed_pkgs_all:
10904 writemsg("%s\t%s\n" % (prefix,
10905 colorize("INFORM", str(failed_pkg.pkg))),
10907 writemsg(prefix + "\n", noiselevel=-1)
10911 def _elog_listener(self, mysettings, key, logentries, fulltext):
10912 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10914 self._failed_pkgs_die_msgs.append(
10915 (mysettings, key, errors))
10917 def _locate_failure_log(self, failed_pkg):
10919 build_dir = failed_pkg.build_dir
10922 log_paths = [failed_pkg.build_log]
10924 for log_path in log_paths:
10929 log_size = os.stat(log_path).st_size
10940 def _add_packages(self):
10941 pkg_queue = self._pkg_queue
10942 for pkg in self._mergelist:
10943 if isinstance(pkg, Package):
10944 pkg_queue.append(pkg)
10945 elif isinstance(pkg, Blocker):
10948 def _system_merge_started(self, merge):
10950 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10952 graph = self._digraph
10955 pkg = merge.merge.pkg
10957 # Skip this if $ROOT != / since it shouldn't matter if there
10958 # are unsatisfied system runtime deps in this case.
10959 if pkg.root != '/':
10962 completed_tasks = self._completed_tasks
10963 unsatisfied = self._unsatisfied_system_deps
10965 def ignore_non_runtime_or_satisfied(priority):
10967 Ignore non-runtime and satisfied runtime priorities.
10969 if isinstance(priority, DepPriority) and \
10970 not priority.satisfied and \
10971 (priority.runtime or priority.runtime_post):
10975 # When checking for unsatisfied runtime deps, only check
10976 # direct deps since indirect deps are checked when the
10977 # corresponding parent is merged.
10978 for child in graph.child_nodes(pkg,
10979 ignore_priority=ignore_non_runtime_or_satisfied):
10980 if not isinstance(child, Package) or \
10981 child.operation == 'uninstall':
10985 if child.operation == 'merge' and \
10986 child not in completed_tasks:
10987 unsatisfied.add(child)
10989 def _merge_wait_exit_handler(self, task):
10990 self._merge_wait_scheduled.remove(task)
10991 self._merge_exit(task)
10993 def _merge_exit(self, merge):
10994 self._do_merge_exit(merge)
10995 self._deallocate_config(merge.merge.settings)
10996 if merge.returncode == os.EX_OK and \
10997 not merge.merge.pkg.installed:
10998 self._status_display.curval += 1
10999 self._status_display.merges = len(self._task_queues.merge)
11002 def _do_merge_exit(self, merge):
11003 pkg = merge.merge.pkg
11004 if merge.returncode != os.EX_OK:
11005 settings = merge.merge.settings
11006 build_dir = settings.get("PORTAGE_BUILDDIR")
11007 build_log = settings.get("PORTAGE_LOG_FILE")
11009 self._failed_pkgs.append(self._failed_pkg(
11010 build_dir=build_dir, build_log=build_log,
11012 returncode=merge.returncode))
11013 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11015 self._status_display.failed = len(self._failed_pkgs)
11018 self._task_complete(pkg)
11019 pkg_to_replace = merge.merge.pkg_to_replace
11020 if pkg_to_replace is not None:
11021 # When a package is replaced, mark it's uninstall
11022 # task complete (if any).
11023 uninst_hash_key = \
11024 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11025 self._task_complete(uninst_hash_key)
11030 self._restart_if_necessary(pkg)
11032 # Call mtimedb.commit() after each merge so that
11033 # --resume still works after being interrupted
11034 # by reboot, sigkill or similar.
11035 mtimedb = self._mtimedb
11036 mtimedb["resume"]["mergelist"].remove(list(pkg))
11037 if not mtimedb["resume"]["mergelist"]:
11038 del mtimedb["resume"]
11041 def _build_exit(self, build):
11042 if build.returncode == os.EX_OK:
11044 merge = PackageMerge(merge=build)
11045 if not build.build_opts.buildpkgonly and \
11046 build.pkg in self._deep_system_deps:
11047 # Since dependencies on system packages are frequently
11048 # unspecified, merge them only when no builds are executing.
11049 self._merge_wait_queue.append(merge)
11050 merge.addStartListener(self._system_merge_started)
11052 merge.addExitListener(self._merge_exit)
11053 self._task_queues.merge.add(merge)
11054 self._status_display.merges = len(self._task_queues.merge)
11056 settings = build.settings
11057 build_dir = settings.get("PORTAGE_BUILDDIR")
11058 build_log = settings.get("PORTAGE_LOG_FILE")
11060 self._failed_pkgs.append(self._failed_pkg(
11061 build_dir=build_dir, build_log=build_log,
11063 returncode=build.returncode))
11064 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11066 self._status_display.failed = len(self._failed_pkgs)
11067 self._deallocate_config(build.settings)
11069 self._status_display.running = self._jobs
11072 def _extract_exit(self, build):
11073 self._build_exit(build)
11075 def _task_complete(self, pkg):
11076 self._completed_tasks.add(pkg)
11077 self._unsatisfied_system_deps.discard(pkg)
11078 self._choose_pkg_return_early = False
11082 self._add_prefetchers()
11083 self._add_packages()
11084 pkg_queue = self._pkg_queue
11085 failed_pkgs = self._failed_pkgs
11086 portage.locks._quiet = self._background
11087 portage.elog._emerge_elog_listener = self._elog_listener
11093 self._main_loop_cleanup()
11094 portage.locks._quiet = False
11095 portage.elog._emerge_elog_listener = None
11097 rval = failed_pkgs[-1].returncode
11101 def _main_loop_cleanup(self):
11102 del self._pkg_queue[:]
11103 self._completed_tasks.clear()
11104 self._deep_system_deps.clear()
11105 self._unsatisfied_system_deps.clear()
11106 self._choose_pkg_return_early = False
11107 self._status_display.reset()
11108 self._digraph = None
11109 self._task_queues.fetch.clear()
11111 def _choose_pkg(self):
11113 Choose a task that has all it's dependencies satisfied.
11116 if self._choose_pkg_return_early:
11119 if self._digraph is None:
11120 if (self._jobs or self._task_queues.merge) and \
11121 not ("--nodeps" in self.myopts and \
11122 (self._max_jobs is True or self._max_jobs > 1)):
11123 self._choose_pkg_return_early = True
11125 return self._pkg_queue.pop(0)
11127 if not (self._jobs or self._task_queues.merge):
11128 return self._pkg_queue.pop(0)
11130 self._prune_digraph()
11133 later = set(self._pkg_queue)
11134 for pkg in self._pkg_queue:
11136 if not self._dependent_on_scheduled_merges(pkg, later):
11140 if chosen_pkg is not None:
11141 self._pkg_queue.remove(chosen_pkg)
11143 if chosen_pkg is None:
11144 # There's no point in searching for a package to
11145 # choose until at least one of the existing jobs
11147 self._choose_pkg_return_early = True
11151 def _dependent_on_scheduled_merges(self, pkg, later):
11153 Traverse the subgraph of the given packages deep dependencies
11154 to see if it contains any scheduled merges.
11155 @param pkg: a package to check dependencies for
11157 @param later: packages for which dependence should be ignored
11158 since they will be merged later than pkg anyway and therefore
11159 delaying the merge of pkg will not result in a more optimal
11163 @returns: True if the package is dependent, False otherwise.
11166 graph = self._digraph
11167 completed_tasks = self._completed_tasks
11170 traversed_nodes = set([pkg])
11171 direct_deps = graph.child_nodes(pkg)
11172 node_stack = direct_deps
11173 direct_deps = frozenset(direct_deps)
11175 node = node_stack.pop()
11176 if node in traversed_nodes:
11178 traversed_nodes.add(node)
11179 if not ((node.installed and node.operation == "nomerge") or \
11180 (node.operation == "uninstall" and \
11181 node not in direct_deps) or \
11182 node in completed_tasks or \
11186 node_stack.extend(graph.child_nodes(node))
11190 def _allocate_config(self, root):
11192 Allocate a unique config instance for a task in order
11193 to prevent interference between parallel tasks.
11195 if self._config_pool[root]:
11196 temp_settings = self._config_pool[root].pop()
11198 temp_settings = portage.config(clone=self.pkgsettings[root])
11199 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11200 # performance reasons, call it here to make sure all settings from the
11201 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11202 temp_settings.reload()
11203 temp_settings.reset()
11204 return temp_settings
11206 def _deallocate_config(self, settings):
11207 self._config_pool[settings["ROOT"]].append(settings)
11209 def _main_loop(self):
11211 # Only allow 1 job max if a restart is scheduled
11212 # due to portage update.
11213 if self._is_restart_scheduled() or \
11214 self._opts_no_background.intersection(self.myopts):
11215 self._set_max_jobs(1)
11217 merge_queue = self._task_queues.merge
11219 while self._schedule():
11220 if self._poll_event_handlers:
11225 if not (self._jobs or merge_queue):
11227 if self._poll_event_handlers:
11230 def _keep_scheduling(self):
11231 return bool(self._pkg_queue and \
11232 not (self._failed_pkgs and not self._build_opts.fetchonly))
11234 def _schedule_tasks(self):
11236 # When the number of jobs drops to zero, process all waiting merges.
11237 if not self._jobs and self._merge_wait_queue:
11238 for task in self._merge_wait_queue:
11239 task.addExitListener(self._merge_wait_exit_handler)
11240 self._task_queues.merge.add(task)
11241 self._status_display.merges = len(self._task_queues.merge)
11242 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11243 del self._merge_wait_queue[:]
11245 self._schedule_tasks_imp()
11246 self._status_display.display()
11249 for q in self._task_queues.values():
11253 # Cancel prefetchers if they're the only reason
11254 # the main poll loop is still running.
11255 if self._failed_pkgs and not self._build_opts.fetchonly and \
11256 not (self._jobs or self._task_queues.merge) and \
11257 self._task_queues.fetch:
11258 self._task_queues.fetch.clear()
11262 self._schedule_tasks_imp()
11263 self._status_display.display()
11265 return self._keep_scheduling()
11267 def _job_delay(self):
11270 @returns: True if job scheduling should be delayed, False otherwise.
11273 if self._jobs and self._max_load is not None:
11275 current_time = time.time()
11277 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11278 if delay > self._job_delay_max:
11279 delay = self._job_delay_max
11280 if (current_time - self._previous_job_start_time) < delay:
11285 def _schedule_tasks_imp(self):
11288 @returns: True if state changed, False otherwise.
11295 if not self._keep_scheduling():
11296 return bool(state_change)
11298 if self._choose_pkg_return_early or \
11299 self._merge_wait_scheduled or \
11300 (self._jobs and self._unsatisfied_system_deps) or \
11301 not self._can_add_job() or \
11303 return bool(state_change)
11305 pkg = self._choose_pkg()
11307 return bool(state_change)
11311 if not pkg.installed:
11312 self._pkg_count.curval += 1
11314 task = self._task(pkg)
11317 merge = PackageMerge(merge=task)
11318 merge.addExitListener(self._merge_exit)
11319 self._task_queues.merge.add(merge)
11323 self._previous_job_start_time = time.time()
11324 self._status_display.running = self._jobs
11325 task.addExitListener(self._extract_exit)
11326 self._task_queues.jobs.add(task)
11330 self._previous_job_start_time = time.time()
11331 self._status_display.running = self._jobs
11332 task.addExitListener(self._build_exit)
11333 self._task_queues.jobs.add(task)
11335 return bool(state_change)
11337 def _task(self, pkg):
11339 pkg_to_replace = None
11340 if pkg.operation != "uninstall":
11341 vardb = pkg.root_config.trees["vartree"].dbapi
11342 previous_cpv = vardb.match(pkg.slot_atom)
11344 previous_cpv = previous_cpv.pop()
11345 pkg_to_replace = self._pkg(previous_cpv,
11346 "installed", pkg.root_config, installed=True)
11348 task = MergeListItem(args_set=self._args_set,
11349 background=self._background, binpkg_opts=self._binpkg_opts,
11350 build_opts=self._build_opts,
11351 config_pool=self._ConfigPool(pkg.root,
11352 self._allocate_config, self._deallocate_config),
11353 emerge_opts=self.myopts,
11354 find_blockers=self._find_blockers(pkg), logger=self._logger,
11355 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11356 pkg_to_replace=pkg_to_replace,
11357 prefetcher=self._prefetchers.get(pkg),
11358 scheduler=self._sched_iface,
11359 settings=self._allocate_config(pkg.root),
11360 statusMessage=self._status_msg,
11361 world_atom=self._world_atom)
11365 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11366 pkg = failed_pkg.pkg
11367 msg = "%s to %s %s" % \
11368 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11369 if pkg.root != "/":
11370 msg += " %s %s" % (preposition, pkg.root)
11372 log_path = self._locate_failure_log(failed_pkg)
11373 if log_path is not None:
11374 msg += ", Log file:"
11375 self._status_msg(msg)
11377 if log_path is not None:
11378 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11380 def _status_msg(self, msg):
11382 Display a brief status message (no newlines) in the status display.
11383 This is called by tasks to provide feedback to the user. This
11384 delegates the resposibility of generating \r and \n control characters,
11385 to guarantee that lines are created or erased when necessary and
11389 @param msg: a brief status message (no newlines allowed)
11391 if not self._background:
11392 writemsg_level("\n")
11393 self._status_display.displayMessage(msg)
11395 def _save_resume_list(self):
11397 Do this before verifying the ebuild Manifests since it might
11398 be possible for the user to use --resume --skipfirst get past
11399 a non-essential package with a broken digest.
11401 mtimedb = self._mtimedb
11402 mtimedb["resume"]["mergelist"] = [list(x) \
11403 for x in self._mergelist \
11404 if isinstance(x, Package) and x.operation == "merge"]
11408 def _calc_resume_list(self):
11410 Use the current resume list to calculate a new one,
11411 dropping any packages with unsatisfied deps.
11413 @returns: True if successful, False otherwise.
11415 print colorize("GOOD", "*** Resuming merge...")
11417 if self._show_list():
11418 if "--tree" in self.myopts:
11419 portage.writemsg_stdout("\n" + \
11420 darkgreen("These are the packages that " + \
11421 "would be merged, in reverse order:\n\n"))
11424 portage.writemsg_stdout("\n" + \
11425 darkgreen("These are the packages that " + \
11426 "would be merged, in order:\n\n"))
11428 show_spinner = "--quiet" not in self.myopts and \
11429 "--nodeps" not in self.myopts
11432 print "Calculating dependencies ",
11434 myparams = create_depgraph_params(self.myopts, None)
11438 success, mydepgraph, dropped_tasks = resume_depgraph(
11439 self.settings, self.trees, self._mtimedb, self.myopts,
11440 myparams, self._spinner)
11441 except depgraph.UnsatisfiedResumeDep, exc:
11442 # rename variable to avoid python-3.0 error:
11443 # SyntaxError: can not delete variable 'e' referenced in nested
11446 mydepgraph = e.depgraph
11447 dropped_tasks = set()
11450 print "\b\b... done!"
11453 def unsatisfied_resume_dep_msg():
11454 mydepgraph.display_problems()
11455 out = portage.output.EOutput()
11456 out.eerror("One or more packages are either masked or " + \
11457 "have missing dependencies:")
11460 show_parents = set()
11461 for dep in e.value:
11462 if dep.parent in show_parents:
11464 show_parents.add(dep.parent)
11465 if dep.atom is None:
11466 out.eerror(indent + "Masked package:")
11467 out.eerror(2 * indent + str(dep.parent))
11470 out.eerror(indent + str(dep.atom) + " pulled in by:")
11471 out.eerror(2 * indent + str(dep.parent))
11473 msg = "The resume list contains packages " + \
11474 "that are either masked or have " + \
11475 "unsatisfied dependencies. " + \
11476 "Please restart/continue " + \
11477 "the operation manually, or use --skipfirst " + \
11478 "to skip the first package in the list and " + \
11479 "any other packages that may be " + \
11480 "masked or have missing dependencies."
11481 for line in textwrap.wrap(msg, 72):
11483 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11486 if success and self._show_list():
11487 mylist = mydepgraph.altlist()
11489 if "--tree" in self.myopts:
11491 mydepgraph.display(mylist, favorites=self._favorites)
11494 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11496 mydepgraph.display_problems()
11498 mylist = mydepgraph.altlist()
11499 mydepgraph.break_refs(mylist)
11500 mydepgraph.break_refs(dropped_tasks)
11501 self._mergelist = mylist
11502 self._set_digraph(mydepgraph.schedulerGraph())
11505 for task in dropped_tasks:
11506 if not (isinstance(task, Package) and task.operation == "merge"):
11509 msg = "emerge --keep-going:" + \
11511 if pkg.root != "/":
11512 msg += " for %s" % (pkg.root,)
11513 msg += " dropped due to unsatisfied dependency."
11514 for line in textwrap.wrap(msg, msg_width):
11515 eerror(line, phase="other", key=pkg.cpv)
11516 settings = self.pkgsettings[pkg.root]
11517 # Ensure that log collection from $T is disabled inside
11518 # elog_process(), since any logs that might exist are
11520 settings.pop("T", None)
11521 portage.elog.elog_process(pkg.cpv, settings)
11522 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11526 def _show_list(self):
11527 myopts = self.myopts
11528 if "--quiet" not in myopts and \
11529 ("--ask" in myopts or "--tree" in myopts or \
11530 "--verbose" in myopts):
11534 def _world_atom(self, pkg):
11536 Add the package to the world file, but only if
11537 it's supposed to be added. Otherwise, do nothing.
11540 if set(("--buildpkgonly", "--fetchonly",
11542 "--oneshot", "--onlydeps",
11543 "--pretend")).intersection(self.myopts):
11546 if pkg.root != self.target_root:
11549 args_set = self._args_set
11550 if not args_set.findAtomForPackage(pkg):
11553 logger = self._logger
11554 pkg_count = self._pkg_count
11555 root_config = pkg.root_config
11556 world_set = root_config.sets["world"]
11557 world_locked = False
11558 if hasattr(world_set, "lock"):
11560 world_locked = True
11563 if hasattr(world_set, "load"):
11564 world_set.load() # maybe it's changed on disk
11566 atom = create_world_atom(pkg, args_set, root_config)
11568 if hasattr(world_set, "add"):
11569 self._status_msg(('Recording %s in "world" ' + \
11570 'favorites file...') % atom)
11571 logger.log(" === (%s of %s) Updating world file (%s)" % \
11572 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11573 world_set.add(atom)
11575 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11576 (atom,), level=logging.WARN, noiselevel=-1)
11581 def _pkg(self, cpv, type_name, root_config, installed=False):
11583 Get a package instance from the cache, or create a new
11584 one if necessary. Raises KeyError from aux_get if it
11585 failures for some reason (package does not exist or is
11588 operation = "merge"
11590 operation = "nomerge"
11592 if self._digraph is not None:
11593 # Reuse existing instance when available.
11594 pkg = self._digraph.get(
11595 (type_name, root_config.root, cpv, operation))
11596 if pkg is not None:
11599 tree_type = depgraph.pkg_tree_map[type_name]
11600 db = root_config.trees[tree_type].dbapi
11601 db_keys = list(self.trees[root_config.root][
11602 tree_type].dbapi._aux_cache_keys)
11603 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11604 pkg = Package(cpv=cpv, metadata=metadata,
11605 root_config=root_config, installed=installed)
11606 if type_name == "ebuild":
11607 settings = self.pkgsettings[root_config.root]
11608 settings.setcpv(pkg)
11609 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11610 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11614 class MetadataRegen(PollScheduler):
11616 def __init__(self, portdb, cp_iter=None, consumer=None,
11617 max_jobs=None, max_load=None):
11618 PollScheduler.__init__(self)
11619 self._portdb = portdb
11620 self._global_cleanse = False
11621 if cp_iter is None:
11622 cp_iter = self._iter_every_cp()
11623 # We can globally cleanse stale cache only if we
11624 # iterate over every single cp.
11625 self._global_cleanse = True
11626 self._cp_iter = cp_iter
11627 self._consumer = consumer
11629 if max_jobs is None:
11632 self._max_jobs = max_jobs
11633 self._max_load = max_load
11634 self._sched_iface = self._sched_iface_class(
11635 register=self._register,
11636 schedule=self._schedule_wait,
11637 unregister=self._unregister)
11639 self._valid_pkgs = set()
11640 self._cp_set = set()
11641 self._process_iter = self._iter_metadata_processes()
11642 self.returncode = os.EX_OK
11643 self._error_count = 0
11645 def _iter_every_cp(self):
11646 every_cp = self._portdb.cp_all()
11647 every_cp.sort(reverse=True)
11650 yield every_cp.pop()
11654 def _iter_metadata_processes(self):
11655 portdb = self._portdb
11656 valid_pkgs = self._valid_pkgs
11657 cp_set = self._cp_set
11658 consumer = self._consumer
11660 for cp in self._cp_iter:
11662 portage.writemsg_stdout("Processing %s\n" % cp)
11663 cpv_list = portdb.cp_list(cp)
11664 for cpv in cpv_list:
11665 valid_pkgs.add(cpv)
11666 ebuild_path, repo_path = portdb.findname2(cpv)
11667 metadata, st, emtime = portdb._pull_valid_cache(
11668 cpv, ebuild_path, repo_path)
11669 if metadata is not None:
11670 if consumer is not None:
11671 consumer(cpv, ebuild_path,
11672 repo_path, metadata)
11675 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11676 ebuild_mtime=emtime,
11677 metadata_callback=portdb._metadata_callback,
11678 portdb=portdb, repo_path=repo_path,
11679 settings=portdb.doebuild_settings)
11683 portdb = self._portdb
11684 from portage.cache.cache_errors import CacheError
11687 while self._schedule():
11693 if self._global_cleanse:
11694 for mytree in portdb.porttrees:
11696 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11697 except CacheError, e:
11698 portage.writemsg("Error listing cache entries for " + \
11699 "'%s': %s, continuing...\n" % (mytree, e),
11705 cp_set = self._cp_set
11706 cpv_getkey = portage.cpv_getkey
11707 for mytree in portdb.porttrees:
11709 dead_nodes[mytree] = set(cpv for cpv in \
11710 portdb.auxdb[mytree].iterkeys() \
11711 if cpv_getkey(cpv) in cp_set)
11712 except CacheError, e:
11713 portage.writemsg("Error listing cache entries for " + \
11714 "'%s': %s, continuing...\n" % (mytree, e),
11721 for y in self._valid_pkgs:
11722 for mytree in portdb.porttrees:
11723 if portdb.findname2(y, mytree=mytree)[0]:
11724 dead_nodes[mytree].discard(y)
11726 for mytree, nodes in dead_nodes.iteritems():
11727 auxdb = portdb.auxdb[mytree]
11731 except (KeyError, CacheError):
11734 def _schedule_tasks(self):
11737 @returns: True if there may be remaining tasks to schedule,
11740 while self._can_add_job():
11742 metadata_process = self._process_iter.next()
11743 except StopIteration:
11747 metadata_process.scheduler = self._sched_iface
11748 metadata_process.addExitListener(self._metadata_exit)
11749 metadata_process.start()
11752 def _metadata_exit(self, metadata_process):
11754 if metadata_process.returncode != os.EX_OK:
11755 self.returncode = 1
11756 self._error_count += 1
11757 self._valid_pkgs.discard(metadata_process.cpv)
11758 portage.writemsg("Error processing %s, continuing...\n" % \
11759 (metadata_process.cpv,), noiselevel=-1)
11761 if self._consumer is not None:
11762 # On failure, still notify the consumer (in this case the metadata
11763 # argument is None).
11764 self._consumer(metadata_process.cpv,
11765 metadata_process.ebuild_path,
11766 metadata_process.repo_path,
11767 metadata_process.metadata)
11771 class UninstallFailure(portage.exception.PortageException):
11773 An instance of this class is raised by unmerge() when
11774 an uninstallation fails.
11777 def __init__(self, *pargs):
11778 portage.exception.PortageException.__init__(self, pargs)
11780 self.status = pargs[0]
11782 def unmerge(root_config, myopts, unmerge_action,
11783 unmerge_files, ldpath_mtimes, autoclean=0,
11784 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11785 scheduler=None, writemsg_level=portage.util.writemsg_level):
11787 quiet = "--quiet" in myopts
11788 settings = root_config.settings
11789 sets = root_config.sets
11790 vartree = root_config.trees["vartree"]
11791 candidate_catpkgs=[]
11793 xterm_titles = "notitles" not in settings.features
11794 out = portage.output.EOutput()
11796 db_keys = list(vartree.dbapi._aux_cache_keys)
11799 pkg = pkg_cache.get(cpv)
11801 pkg = Package(cpv=cpv, installed=True,
11802 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11803 root_config=root_config,
11804 type_name="installed")
11805 pkg_cache[cpv] = pkg
11808 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11810 # At least the parent needs to exist for the lock file.
11811 portage.util.ensure_dirs(vdb_path)
11812 except portage.exception.PortageException:
11816 if os.access(vdb_path, os.W_OK):
11817 vdb_lock = portage.locks.lockdir(vdb_path)
11818 realsyslist = sets["system"].getAtoms()
11820 for x in realsyslist:
11821 mycp = portage.dep_getkey(x)
11822 if mycp in settings.getvirtuals():
11824 for provider in settings.getvirtuals()[mycp]:
11825 if vartree.dbapi.match(provider):
11826 providers.append(provider)
11827 if len(providers) == 1:
11828 syslist.extend(providers)
11830 syslist.append(mycp)
11832 mysettings = portage.config(clone=settings)
11834 if not unmerge_files:
11835 if unmerge_action == "unmerge":
11837 print bold("emerge unmerge") + " can only be used with specific package names"
11843 localtree = vartree
11844 # process all arguments and add all
11845 # valid db entries to candidate_catpkgs
11847 if not unmerge_files:
11848 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11850 #we've got command-line arguments
11851 if not unmerge_files:
11852 print "\nNo packages to unmerge have been provided.\n"
11854 for x in unmerge_files:
11855 arg_parts = x.split('/')
11856 if x[0] not in [".","/"] and \
11857 arg_parts[-1][-7:] != ".ebuild":
11858 #possible cat/pkg or dep; treat as such
11859 candidate_catpkgs.append(x)
11860 elif unmerge_action in ["prune","clean"]:
11861 print "\n!!! Prune and clean do not accept individual" + \
11862 " ebuilds as arguments;\n skipping.\n"
11865 # it appears that the user is specifying an installed
11866 # ebuild and we're in "unmerge" mode, so it's ok.
11867 if not os.path.exists(x):
11868 print "\n!!! The path '"+x+"' doesn't exist.\n"
11871 absx = os.path.abspath(x)
11872 sp_absx = absx.split("/")
11873 if sp_absx[-1][-7:] == ".ebuild":
11875 absx = "/".join(sp_absx)
11877 sp_absx_len = len(sp_absx)
11879 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11880 vdb_len = len(vdb_path)
11882 sp_vdb = vdb_path.split("/")
11883 sp_vdb_len = len(sp_vdb)
11885 if not os.path.exists(absx+"/CONTENTS"):
11886 print "!!! Not a valid db dir: "+str(absx)
11889 if sp_absx_len <= sp_vdb_len:
11890 # The Path is shorter... so it can't be inside the vdb.
11893 print "\n!!!",x,"cannot be inside "+ \
11894 vdb_path+"; aborting.\n"
11897 for idx in range(0,sp_vdb_len):
11898 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11901 print "\n!!!", x, "is not inside "+\
11902 vdb_path+"; aborting.\n"
11905 print "="+"/".join(sp_absx[sp_vdb_len:])
11906 candidate_catpkgs.append(
11907 "="+"/".join(sp_absx[sp_vdb_len:]))
11910 if (not "--quiet" in myopts):
11912 if settings["ROOT"] != "/":
11913 writemsg_level(darkgreen(newline+ \
11914 ">>> Using system located in ROOT tree %s\n" % \
11917 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11918 not ("--quiet" in myopts):
11919 writemsg_level(darkgreen(newline+\
11920 ">>> These are the packages that would be unmerged:\n"))
11922 # Preservation of order is required for --depclean and --prune so
11923 # that dependencies are respected. Use all_selected to eliminate
11924 # duplicate packages since the same package may be selected by
11927 all_selected = set()
11928 for x in candidate_catpkgs:
11929 # cycle through all our candidate deps and determine
11930 # what will and will not get unmerged
11932 mymatch = vartree.dbapi.match(x)
11933 except portage.exception.AmbiguousPackageName, errpkgs:
11934 print "\n\n!!! The short ebuild name \"" + \
11935 x + "\" is ambiguous. Please specify"
11936 print "!!! one of the following fully-qualified " + \
11937 "ebuild names instead:\n"
11938 for i in errpkgs[0]:
11939 print " " + green(i)
11943 if not mymatch and x[0] not in "<>=~":
11944 mymatch = localtree.dep_match(x)
11946 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11947 (x, unmerge_action), noiselevel=-1)
11951 {"protected": set(), "selected": set(), "omitted": set()})
11952 mykey = len(pkgmap) - 1
11953 if unmerge_action=="unmerge":
11955 if y not in all_selected:
11956 pkgmap[mykey]["selected"].add(y)
11957 all_selected.add(y)
11958 elif unmerge_action == "prune":
11959 if len(mymatch) == 1:
11961 best_version = mymatch[0]
11962 best_slot = vartree.getslot(best_version)
11963 best_counter = vartree.dbapi.cpv_counter(best_version)
11964 for mypkg in mymatch[1:]:
11965 myslot = vartree.getslot(mypkg)
11966 mycounter = vartree.dbapi.cpv_counter(mypkg)
11967 if (myslot == best_slot and mycounter > best_counter) or \
11968 mypkg == portage.best([mypkg, best_version]):
11969 if myslot == best_slot:
11970 if mycounter < best_counter:
11971 # On slot collision, keep the one with the
11972 # highest counter since it is the most
11973 # recently installed.
11975 best_version = mypkg
11977 best_counter = mycounter
11978 pkgmap[mykey]["protected"].add(best_version)
11979 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11980 if mypkg != best_version and mypkg not in all_selected)
11981 all_selected.update(pkgmap[mykey]["selected"])
11983 # unmerge_action == "clean"
11985 for mypkg in mymatch:
11986 if unmerge_action == "clean":
11987 myslot = localtree.getslot(mypkg)
11989 # since we're pruning, we don't care about slots
11990 # and put all the pkgs in together
11992 if myslot not in slotmap:
11993 slotmap[myslot] = {}
11994 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11996 for mypkg in vartree.dbapi.cp_list(
11997 portage.dep_getkey(mymatch[0])):
11998 myslot = vartree.getslot(mypkg)
11999 if myslot not in slotmap:
12000 slotmap[myslot] = {}
12001 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12003 for myslot in slotmap:
12004 counterkeys = slotmap[myslot].keys()
12005 if not counterkeys:
12008 pkgmap[mykey]["protected"].add(
12009 slotmap[myslot][counterkeys[-1]])
12010 del counterkeys[-1]
12012 for counter in counterkeys[:]:
12013 mypkg = slotmap[myslot][counter]
12014 if mypkg not in mymatch:
12015 counterkeys.remove(counter)
12016 pkgmap[mykey]["protected"].add(
12017 slotmap[myslot][counter])
12019 #be pretty and get them in order of merge:
12020 for ckey in counterkeys:
12021 mypkg = slotmap[myslot][ckey]
12022 if mypkg not in all_selected:
12023 pkgmap[mykey]["selected"].add(mypkg)
12024 all_selected.add(mypkg)
12025 # ok, now the last-merged package
12026 # is protected, and the rest are selected
12027 numselected = len(all_selected)
12028 if global_unmerge and not numselected:
12029 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12032 if not numselected:
12033 portage.writemsg_stdout(
12034 "\n>>> No packages selected for removal by " + \
12035 unmerge_action + "\n")
12039 vartree.dbapi.flush_cache()
12040 portage.locks.unlockdir(vdb_lock)
12042 from portage.sets.base import EditablePackageSet
12044 # generate a list of package sets that are directly or indirectly listed in "world",
12045 # as there is no persistent list of "installed" sets
12046 installed_sets = ["world"]
12051 pos = len(installed_sets)
12052 for s in installed_sets[pos - 1:]:
12055 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12058 installed_sets += candidates
12059 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12062 # we don't want to unmerge packages that are still listed in user-editable package sets
12063 # listed in "world" as they would be remerged on the next update of "world" or the
12064 # relevant package sets.
12065 unknown_sets = set()
12066 for cp in xrange(len(pkgmap)):
12067 for cpv in pkgmap[cp]["selected"].copy():
12071 # It could have been uninstalled
12072 # by a concurrent process.
12075 if unmerge_action != "clean" and \
12076 root_config.root == "/" and \
12077 portage.match_from_list(
12078 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12079 msg = ("Not unmerging package %s since there is no valid " + \
12080 "reason for portage to unmerge itself.") % (pkg.cpv,)
12081 for line in textwrap.wrap(msg, 75):
12083 # adjust pkgmap so the display output is correct
12084 pkgmap[cp]["selected"].remove(cpv)
12085 all_selected.remove(cpv)
12086 pkgmap[cp]["protected"].add(cpv)
12090 for s in installed_sets:
12091 # skip sets that the user requested to unmerge, and skip world
12092 # unless we're unmerging a package set (as the package would be
12093 # removed from "world" later on)
12094 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12098 if s in unknown_sets:
12100 unknown_sets.add(s)
12101 out = portage.output.EOutput()
12102 out.eerror(("Unknown set '@%s' in " + \
12103 "%svar/lib/portage/world_sets") % \
12104 (s, root_config.root))
12107 # only check instances of EditablePackageSet as other classes are generally used for
12108 # special purposes and can be ignored here (and are usually generated dynamically, so the
12109 # user can't do much about them anyway)
12110 if isinstance(sets[s], EditablePackageSet):
12112 # This is derived from a snippet of code in the
12113 # depgraph._iter_atoms_for_pkg() method.
12114 for atom in sets[s].iterAtomsForPackage(pkg):
12115 inst_matches = vartree.dbapi.match(atom)
12116 inst_matches.reverse() # descending order
12118 for inst_cpv in inst_matches:
12120 inst_pkg = _pkg(inst_cpv)
12122 # It could have been uninstalled
12123 # by a concurrent process.
12126 if inst_pkg.cp != atom.cp:
12128 if pkg >= inst_pkg:
12129 # This is descending order, and we're not
12130 # interested in any versions <= pkg given.
12132 if pkg.slot_atom != inst_pkg.slot_atom:
12133 higher_slot = inst_pkg
12135 if higher_slot is None:
12139 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12140 #print colorize("WARN", "but still listed in the following package sets:")
12141 #print " %s\n" % ", ".join(parents)
12142 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12143 print colorize("WARN", "still referenced by the following package sets:")
12144 print " %s\n" % ", ".join(parents)
12145 # adjust pkgmap so the display output is correct
12146 pkgmap[cp]["selected"].remove(cpv)
12147 all_selected.remove(cpv)
12148 pkgmap[cp]["protected"].add(cpv)
12152 numselected = len(all_selected)
12153 if not numselected:
12155 "\n>>> No packages selected for removal by " + \
12156 unmerge_action + "\n")
12159 # Unmerge order only matters in some cases
12163 selected = d["selected"]
12166 cp = portage.cpv_getkey(iter(selected).next())
12167 cp_dict = unordered.get(cp)
12168 if cp_dict is None:
12170 unordered[cp] = cp_dict
12173 for k, v in d.iteritems():
12174 cp_dict[k].update(v)
12175 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12177 for x in xrange(len(pkgmap)):
12178 selected = pkgmap[x]["selected"]
12181 for mytype, mylist in pkgmap[x].iteritems():
12182 if mytype == "selected":
12184 mylist.difference_update(all_selected)
12185 cp = portage.cpv_getkey(iter(selected).next())
12186 for y in localtree.dep_match(cp):
12187 if y not in pkgmap[x]["omitted"] and \
12188 y not in pkgmap[x]["selected"] and \
12189 y not in pkgmap[x]["protected"] and \
12190 y not in all_selected:
12191 pkgmap[x]["omitted"].add(y)
12192 if global_unmerge and not pkgmap[x]["selected"]:
12193 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12195 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12196 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12197 "'%s' is part of your system profile.\n" % cp),
12198 level=logging.WARNING, noiselevel=-1)
12199 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12200 "be damaging to your system.\n\n"),
12201 level=logging.WARNING, noiselevel=-1)
12202 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12203 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12204 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12206 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12208 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12209 for mytype in ["selected","protected","omitted"]:
12211 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12212 if pkgmap[x][mytype]:
12213 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12214 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12215 for pn, ver, rev in sorted_pkgs:
12219 myversion = ver + "-" + rev
12220 if mytype == "selected":
12222 colorize("UNMERGE_WARN", myversion + " "),
12226 colorize("GOOD", myversion + " "), noiselevel=-1)
12228 writemsg_level("none ", noiselevel=-1)
12230 writemsg_level("\n", noiselevel=-1)
12232 writemsg_level("\n", noiselevel=-1)
12234 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12235 " packages are slated for removal.\n")
12236 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12237 " and " + colorize("GOOD", "'omitted'") + \
12238 " packages will not be removed.\n\n")
12240 if "--pretend" in myopts:
12241 #we're done... return
12243 if "--ask" in myopts:
12244 if userquery("Would you like to unmerge these packages?")=="No":
12245 # enter pretend mode for correct formatting of results
12246 myopts["--pretend"] = True
12251 #the real unmerging begins, after a short delay....
12252 if clean_delay and not autoclean:
12253 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12255 for x in xrange(len(pkgmap)):
12256 for y in pkgmap[x]["selected"]:
12257 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12258 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12259 mysplit = y.split("/")
12261 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12262 mysettings, unmerge_action not in ["clean","prune"],
12263 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12264 scheduler=scheduler)
12266 if retval != os.EX_OK:
12267 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12269 raise UninstallFailure(retval)
12272 if clean_world and hasattr(sets["world"], "cleanPackage"):
12273 sets["world"].cleanPackage(vartree.dbapi, y)
12274 emergelog(xterm_titles, " >>> unmerge success: "+y)
12275 if clean_world and hasattr(sets["world"], "remove"):
12276 for s in root_config.setconfig.active:
12277 sets["world"].remove(SETPREFIX+s)
12280 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12282 if os.path.exists("/usr/bin/install-info"):
12283 out = portage.output.EOutput()
12288 inforoot=normpath(root+z)
12289 if os.path.isdir(inforoot):
12290 infomtime = long(os.stat(inforoot).st_mtime)
12291 if inforoot not in prev_mtimes or \
12292 prev_mtimes[inforoot] != infomtime:
12293 regen_infodirs.append(inforoot)
12295 if not regen_infodirs:
12296 portage.writemsg_stdout("\n")
12297 out.einfo("GNU info directory index is up-to-date.")
12299 portage.writemsg_stdout("\n")
12300 out.einfo("Regenerating GNU info directory index...")
12302 dir_extensions = ("", ".gz", ".bz2")
12306 for inforoot in regen_infodirs:
12310 if not os.path.isdir(inforoot) or \
12311 not os.access(inforoot, os.W_OK):
12314 file_list = os.listdir(inforoot)
12316 dir_file = os.path.join(inforoot, "dir")
12317 moved_old_dir = False
12318 processed_count = 0
12319 for x in file_list:
12320 if x.startswith(".") or \
12321 os.path.isdir(os.path.join(inforoot, x)):
12323 if x.startswith("dir"):
12325 for ext in dir_extensions:
12326 if x == "dir" + ext or \
12327 x == "dir" + ext + ".old":
12332 if processed_count == 0:
12333 for ext in dir_extensions:
12335 os.rename(dir_file + ext, dir_file + ext + ".old")
12336 moved_old_dir = True
12337 except EnvironmentError, e:
12338 if e.errno != errno.ENOENT:
12341 processed_count += 1
12342 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12343 existsstr="already exists, for file `"
12345 if re.search(existsstr,myso):
12346 # Already exists... Don't increment the count for this.
12348 elif myso[:44]=="install-info: warning: no info dir entry in ":
12349 # This info file doesn't contain a DIR-header: install-info produces this
12350 # (harmless) warning (the --quiet switch doesn't seem to work).
12351 # Don't increment the count for this.
12354 badcount=badcount+1
12355 errmsg += myso + "\n"
12358 if moved_old_dir and not os.path.exists(dir_file):
12359 # We didn't generate a new dir file, so put the old file
12360 # back where it was originally found.
12361 for ext in dir_extensions:
12363 os.rename(dir_file + ext + ".old", dir_file + ext)
12364 except EnvironmentError, e:
12365 if e.errno != errno.ENOENT:
12369 # Clean dir.old cruft so that they don't prevent
12370 # unmerge of otherwise empty directories.
12371 for ext in dir_extensions:
12373 os.unlink(dir_file + ext + ".old")
12374 except EnvironmentError, e:
12375 if e.errno != errno.ENOENT:
12379 #update mtime so we can potentially avoid regenerating.
12380 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12383 out.eerror("Processed %d info files; %d errors." % \
12384 (icount, badcount))
12385 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12388 out.einfo("Processed %d info files." % (icount,))
12391 def display_news_notification(root_config, myopts):
12392 target_root = root_config.root
12393 trees = root_config.trees
12394 settings = trees["vartree"].settings
12395 portdb = trees["porttree"].dbapi
12396 vardb = trees["vartree"].dbapi
12397 NEWS_PATH = os.path.join("metadata", "news")
12398 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12399 newsReaderDisplay = False
12400 update = "--pretend" not in myopts
12402 for repo in portdb.getRepositories():
12403 unreadItems = checkUpdatedNewsItems(
12404 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12406 if not newsReaderDisplay:
12407 newsReaderDisplay = True
12409 print colorize("WARN", " * IMPORTANT:"),
12410 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12413 if newsReaderDisplay:
12414 print colorize("WARN", " *"),
12415 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12418 def display_preserved_libs(vardbapi):
12421 # Ensure the registry is consistent with existing files.
12422 vardbapi.plib_registry.pruneNonExisting()
12424 if vardbapi.plib_registry.hasEntries():
12426 print colorize("WARN", "!!!") + " existing preserved libs:"
12427 plibdata = vardbapi.plib_registry.getPreservedLibs()
12428 linkmap = vardbapi.linkmap
12431 linkmap_broken = False
12435 except portage.exception.CommandNotFound, e:
12436 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12437 level=logging.ERROR, noiselevel=-1)
12439 linkmap_broken = True
12441 search_for_owners = set()
12442 for cpv in plibdata:
12443 internal_plib_keys = set(linkmap._obj_key(f) \
12444 for f in plibdata[cpv])
12445 for f in plibdata[cpv]:
12446 if f in consumer_map:
12449 for c in linkmap.findConsumers(f):
12450 # Filter out any consumers that are also preserved libs
12451 # belonging to the same package as the provider.
12452 if linkmap._obj_key(c) not in internal_plib_keys:
12453 consumers.append(c)
12455 consumer_map[f] = consumers
12456 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12458 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12460 for cpv in plibdata:
12461 print colorize("WARN", ">>>") + " package: %s" % cpv
12463 for f in plibdata[cpv]:
12464 obj_key = linkmap._obj_key(f)
12465 alt_paths = samefile_map.get(obj_key)
12466 if alt_paths is None:
12468 samefile_map[obj_key] = alt_paths
12471 for alt_paths in samefile_map.itervalues():
12472 alt_paths = sorted(alt_paths)
12473 for p in alt_paths:
12474 print colorize("WARN", " * ") + " - %s" % (p,)
12476 consumers = consumer_map.get(f, [])
12477 for c in consumers[:MAX_DISPLAY]:
12478 print colorize("WARN", " * ") + " used by %s (%s)" % \
12479 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12480 if len(consumers) == MAX_DISPLAY + 1:
12481 print colorize("WARN", " * ") + " used by %s (%s)" % \
12482 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12483 for x in owners.get(consumers[MAX_DISPLAY], [])))
12484 elif len(consumers) > MAX_DISPLAY:
12485 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12486 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12489 def _flush_elog_mod_echo():
12491 Dump the mod_echo output now so that our other
12492 notifications are shown last.
12494 @returns: True if messages were shown, False otherwise.
12496 messages_shown = False
12498 from portage.elog import mod_echo
12499 except ImportError:
12500 pass # happens during downgrade to a version without the module
12502 messages_shown = bool(mod_echo._items)
12503 mod_echo.finalize()
12504 return messages_shown
12506 def post_emerge(root_config, myopts, mtimedb, retval):
12508 Misc. things to run at the end of a merge session.
12511 Update Config Files
12514 Display preserved libs warnings
12517 @param trees: A dictionary mapping each ROOT to it's package databases
12519 @param mtimedb: The mtimeDB to store data needed across merge invocations
12520 @type mtimedb: MtimeDB class instance
12521 @param retval: Emerge's return value
12525 1. Calls sys.exit(retval)
12528 target_root = root_config.root
12529 trees = { target_root : root_config.trees }
12530 vardbapi = trees[target_root]["vartree"].dbapi
12531 settings = vardbapi.settings
12532 info_mtimes = mtimedb["info"]
12534 # Load the most current variables from ${ROOT}/etc/profile.env
12537 settings.regenerate()
12540 config_protect = settings.get("CONFIG_PROTECT","").split()
12541 infodirs = settings.get("INFOPATH","").split(":") + \
12542 settings.get("INFODIR","").split(":")
12546 if retval == os.EX_OK:
12547 exit_msg = " *** exiting successfully."
12549 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12550 emergelog("notitles" not in settings.features, exit_msg)
12552 _flush_elog_mod_echo()
12554 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12555 if "--pretend" in myopts or (counter_hash is not None and \
12556 counter_hash == vardbapi._counter_hash()):
12557 display_news_notification(root_config, myopts)
12558 # If vdb state has not changed then there's nothing else to do.
12561 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12562 portage.util.ensure_dirs(vdb_path)
12564 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12565 vdb_lock = portage.locks.lockdir(vdb_path)
12569 if "noinfo" not in settings.features:
12570 chk_updated_info_files(target_root,
12571 infodirs, info_mtimes, retval)
12575 portage.locks.unlockdir(vdb_lock)
12577 chk_updated_cfg_files(target_root, config_protect)
12579 display_news_notification(root_config, myopts)
12580 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12581 display_preserved_libs(vardbapi)
12586 def chk_updated_cfg_files(target_root, config_protect):
12588 #number of directories with some protect files in them
12590 for x in config_protect:
12591 x = os.path.join(target_root, x.lstrip(os.path.sep))
12592 if not os.access(x, os.W_OK):
12593 # Avoid Permission denied errors generated
12597 mymode = os.lstat(x).st_mode
12600 if stat.S_ISLNK(mymode):
12601 # We want to treat it like a directory if it
12602 # is a symlink to an existing directory.
12604 real_mode = os.stat(x).st_mode
12605 if stat.S_ISDIR(real_mode):
12609 if stat.S_ISDIR(mymode):
12610 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12612 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12613 os.path.split(x.rstrip(os.path.sep))
12614 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12615 a = commands.getstatusoutput(mycommand)
12617 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12619 # Show the error message alone, sending stdout to /dev/null.
12620 os.system(mycommand + " 1>/dev/null")
12622 files = a[1].split('\0')
12623 # split always produces an empty string as the last element
12624 if files and not files[-1]:
12628 print "\n"+colorize("WARN", " * IMPORTANT:"),
12629 if stat.S_ISDIR(mymode):
12630 print "%d config files in '%s' need updating." % \
12633 print "config file '%s' needs updating." % x
12636 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12637 " section of the " + bold("emerge")
12638 print " "+yellow("*")+" man page to learn how to update config files."
12640 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12643 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12644 Returns the number of unread (yet relevent) items.
12646 @param portdb: a portage tree database
12647 @type portdb: pordbapi
12648 @param vardb: an installed package database
12649 @type vardb: vardbapi
12652 @param UNREAD_PATH:
12658 1. The number of unread but relevant news items.
12661 from portage.news import NewsManager
12662 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12663 return manager.getUnreadItems( repo_id, update=update )
12665 def insert_category_into_atom(atom, category):
12666 alphanum = re.search(r'\w', atom)
12668 ret = atom[:alphanum.start()] + "%s/" % category + \
12669 atom[alphanum.start():]
12674 def is_valid_package_atom(x):
12676 alphanum = re.search(r'\w', x)
12678 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12679 return portage.isvalidatom(x)
12681 def show_blocker_docs_link():
12683 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12684 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12686 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12689 def show_mask_docs():
12690 print "For more information, see the MASKED PACKAGES section in the emerge"
12691 print "man page or refer to the Gentoo Handbook."
12693 def action_sync(settings, trees, mtimedb, myopts, myaction):
12694 xterm_titles = "notitles" not in settings.features
12695 emergelog(xterm_titles, " === sync")
12696 myportdir = settings.get("PORTDIR", None)
12697 out = portage.output.EOutput()
12699 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12701 if myportdir[-1]=="/":
12702 myportdir=myportdir[:-1]
12704 st = os.stat(myportdir)
12708 print ">>>",myportdir,"not found, creating it."
12709 os.makedirs(myportdir,0755)
12710 st = os.stat(myportdir)
12713 spawn_kwargs["env"] = settings.environ()
12714 if 'usersync' in settings.features and \
12715 portage.data.secpass >= 2 and \
12716 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12717 st.st_gid != os.getgid() and st.st_mode & 0070):
12719 homedir = pwd.getpwuid(st.st_uid).pw_dir
12723 # Drop privileges when syncing, in order to match
12724 # existing uid/gid settings.
12725 spawn_kwargs["uid"] = st.st_uid
12726 spawn_kwargs["gid"] = st.st_gid
12727 spawn_kwargs["groups"] = [st.st_gid]
12728 spawn_kwargs["env"]["HOME"] = homedir
12730 if not st.st_mode & 0020:
12731 umask = umask | 0020
12732 spawn_kwargs["umask"] = umask
12734 syncuri = settings.get("SYNC", "").strip()
12736 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12737 noiselevel=-1, level=logging.ERROR)
12740 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12741 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12744 dosyncuri = syncuri
12745 updatecache_flg = False
12746 if myaction == "metadata":
12747 print "skipping sync"
12748 updatecache_flg = True
12749 elif ".git" in vcs_dirs:
12750 # Update existing git repository, and ignore the syncuri. We are
12751 # going to trust the user and assume that the user is in the branch
12752 # that he/she wants updated. We'll let the user manage branches with
12754 if portage.process.find_binary("git") is None:
12755 msg = ["Command not found: git",
12756 "Type \"emerge dev-util/git\" to enable git support."]
12758 writemsg_level("!!! %s\n" % l,
12759 level=logging.ERROR, noiselevel=-1)
12761 msg = ">>> Starting git pull in %s..." % myportdir
12762 emergelog(xterm_titles, msg )
12763 writemsg_level(msg + "\n")
12764 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12765 (portage._shell_quote(myportdir),), **spawn_kwargs)
12766 if exitcode != os.EX_OK:
12767 msg = "!!! git pull error in %s." % myportdir
12768 emergelog(xterm_titles, msg)
12769 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12771 msg = ">>> Git pull in %s successful" % myportdir
12772 emergelog(xterm_titles, msg)
12773 writemsg_level(msg + "\n")
12774 exitcode = git_sync_timestamps(settings, myportdir)
12775 if exitcode == os.EX_OK:
12776 updatecache_flg = True
12777 elif syncuri[:8]=="rsync://":
12778 for vcs_dir in vcs_dirs:
12779 writemsg_level(("!!! %s appears to be under revision " + \
12780 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12781 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12783 if not os.path.exists("/usr/bin/rsync"):
12784 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12785 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12790 if settings["PORTAGE_RSYNC_OPTS"] == "":
12791 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12792 rsync_opts.extend([
12793 "--recursive", # Recurse directories
12794 "--links", # Consider symlinks
12795 "--safe-links", # Ignore links outside of tree
12796 "--perms", # Preserve permissions
12797 "--times", # Preserive mod times
12798 "--compress", # Compress the data transmitted
12799 "--force", # Force deletion on non-empty dirs
12800 "--whole-file", # Don't do block transfers, only entire files
12801 "--delete", # Delete files that aren't in the master tree
12802 "--stats", # Show final statistics about what was transfered
12803 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12804 "--exclude=/distfiles", # Exclude distfiles from consideration
12805 "--exclude=/local", # Exclude local from consideration
12806 "--exclude=/packages", # Exclude packages from consideration
12810 # The below validation is not needed when using the above hardcoded
12813 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12815 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12816 for opt in ("--recursive", "--times"):
12817 if opt not in rsync_opts:
12818 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12819 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12820 rsync_opts.append(opt)
12822 for exclude in ("distfiles", "local", "packages"):
12823 opt = "--exclude=/%s" % exclude
12824 if opt not in rsync_opts:
12825 portage.writemsg(yellow("WARNING:") + \
12826 " adding required option %s not included in " % opt + \
12827 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12828 rsync_opts.append(opt)
12830 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12831 def rsync_opt_startswith(opt_prefix):
12832 for x in rsync_opts:
12833 if x.startswith(opt_prefix):
12837 if not rsync_opt_startswith("--timeout="):
12838 rsync_opts.append("--timeout=%d" % mytimeout)
12840 for opt in ("--compress", "--whole-file"):
12841 if opt not in rsync_opts:
12842 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12843 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12844 rsync_opts.append(opt)
12846 if "--quiet" in myopts:
12847 rsync_opts.append("--quiet") # Shut up a lot
12849 rsync_opts.append("--verbose") # Print filelist
12851 if "--verbose" in myopts:
12852 rsync_opts.append("--progress") # Progress meter for each file
12854 if "--debug" in myopts:
12855 rsync_opts.append("--checksum") # Force checksum on all files
12857 # Real local timestamp file.
12858 servertimestampfile = os.path.join(
12859 myportdir, "metadata", "timestamp.chk")
12861 content = portage.util.grabfile(servertimestampfile)
12865 mytimestamp = time.mktime(time.strptime(content[0],
12866 "%a, %d %b %Y %H:%M:%S +0000"))
12867 except (OverflowError, ValueError):
12872 rsync_initial_timeout = \
12873 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12875 rsync_initial_timeout = 15
12878 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12879 except SystemExit, e:
12880 raise # Needed else can't exit
12882 maxretries=3 #default number of retries
12885 user_name, hostname, port = re.split(
12886 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12889 if user_name is None:
12891 updatecache_flg=True
12892 all_rsync_opts = set(rsync_opts)
12893 extra_rsync_opts = shlex.split(
12894 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12895 all_rsync_opts.update(extra_rsync_opts)
12896 family = socket.AF_INET
12897 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12898 family = socket.AF_INET
12899 elif socket.has_ipv6 and \
12900 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12901 family = socket.AF_INET6
12903 SERVER_OUT_OF_DATE = -1
12904 EXCEEDED_MAX_RETRIES = -2
12910 for addrinfo in socket.getaddrinfo(
12911 hostname, None, family, socket.SOCK_STREAM):
12912 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12913 # IPv6 addresses need to be enclosed in square brackets
12914 ips.append("[%s]" % addrinfo[4][0])
12916 ips.append(addrinfo[4][0])
12917 from random import shuffle
12919 except SystemExit, e:
12920 raise # Needed else can't exit
12921 except Exception, e:
12922 print "Notice:",str(e)
12927 dosyncuri = syncuri.replace(
12928 "//" + user_name + hostname + port + "/",
12929 "//" + user_name + ips[0] + port + "/", 1)
12930 except SystemExit, e:
12931 raise # Needed else can't exit
12932 except Exception, e:
12933 print "Notice:",str(e)
12937 if "--ask" in myopts:
12938 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12943 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12944 if "--quiet" not in myopts:
12945 print ">>> Starting rsync with "+dosyncuri+"..."
12947 emergelog(xterm_titles,
12948 ">>> Starting retry %d of %d with %s" % \
12949 (retries,maxretries,dosyncuri))
12950 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12952 if mytimestamp != 0 and "--quiet" not in myopts:
12953 print ">>> Checking server timestamp ..."
12955 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12957 if "--debug" in myopts:
12960 exitcode = os.EX_OK
12961 servertimestamp = 0
12962 # Even if there's no timestamp available locally, fetch the
12963 # timestamp anyway as an initial probe to verify that the server is
12964 # responsive. This protects us from hanging indefinitely on a
12965 # connection attempt to an unresponsive server which rsync's
12966 # --timeout option does not prevent.
12968 # Temporary file for remote server timestamp comparison.
12969 from tempfile import mkstemp
12970 fd, tmpservertimestampfile = mkstemp()
12972 mycommand = rsynccommand[:]
12973 mycommand.append(dosyncuri.rstrip("/") + \
12974 "/metadata/timestamp.chk")
12975 mycommand.append(tmpservertimestampfile)
12979 def timeout_handler(signum, frame):
12980 raise portage.exception.PortageException("timed out")
12981 signal.signal(signal.SIGALRM, timeout_handler)
12982 # Timeout here in case the server is unresponsive. The
12983 # --timeout rsync option doesn't apply to the initial
12984 # connection attempt.
12985 if rsync_initial_timeout:
12986 signal.alarm(rsync_initial_timeout)
12988 mypids.extend(portage.process.spawn(
12989 mycommand, env=settings.environ(), returnpid=True))
12990 exitcode = os.waitpid(mypids[0], 0)[1]
12991 content = portage.grabfile(tmpservertimestampfile)
12993 if rsync_initial_timeout:
12996 os.unlink(tmpservertimestampfile)
12999 except portage.exception.PortageException, e:
13003 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13004 os.kill(mypids[0], signal.SIGTERM)
13005 os.waitpid(mypids[0], 0)
13006 # This is the same code rsync uses for timeout.
13009 if exitcode != os.EX_OK:
13010 if exitcode & 0xff:
13011 exitcode = (exitcode & 0xff) << 8
13013 exitcode = exitcode >> 8
13015 portage.process.spawned_pids.remove(mypids[0])
13018 servertimestamp = time.mktime(time.strptime(
13019 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13020 except (OverflowError, ValueError):
13022 del mycommand, mypids, content
13023 if exitcode == os.EX_OK:
13024 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13025 emergelog(xterm_titles,
13026 ">>> Cancelling sync -- Already current.")
13029 print ">>> Timestamps on the server and in the local repository are the same."
13030 print ">>> Cancelling all further sync action. You are already up to date."
13032 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13036 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13037 emergelog(xterm_titles,
13038 ">>> Server out of date: %s" % dosyncuri)
13041 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13043 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13046 exitcode = SERVER_OUT_OF_DATE
13047 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13049 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13050 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13051 if exitcode in [0,1,3,4,11,14,20,21]:
13053 elif exitcode in [1,3,4,11,14,20,21]:
13056 # Code 2 indicates protocol incompatibility, which is expected
13057 # for servers with protocol < 29 that don't support
13058 # --prune-empty-directories. Retry for a server that supports
13059 # at least rsync protocol version 29 (>=rsync-2.6.4).
13064 if retries<=maxretries:
13065 print ">>> Retrying..."
13070 updatecache_flg=False
13071 exitcode = EXCEEDED_MAX_RETRIES
13075 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13076 elif exitcode == SERVER_OUT_OF_DATE:
13078 elif exitcode == EXCEEDED_MAX_RETRIES:
13080 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13085 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13086 msg.append("that your SYNC statement is proper.")
13087 msg.append("SYNC=" + settings["SYNC"])
13089 msg.append("Rsync has reported that there is a File IO error. Normally")
13090 msg.append("this means your disk is full, but can be caused by corruption")
13091 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13092 msg.append("and try again after the problem has been fixed.")
13093 msg.append("PORTDIR=" + settings["PORTDIR"])
13095 msg.append("Rsync was killed before it finished.")
13097 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13098 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13099 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13100 msg.append("temporary problem unless complications exist with your network")
13101 msg.append("(and possibly your system's filesystem) configuration.")
13105 elif syncuri[:6]=="cvs://":
13106 if not os.path.exists("/usr/bin/cvs"):
13107 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13108 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13110 cvsroot=syncuri[6:]
13111 cvsdir=os.path.dirname(myportdir)
13112 if not os.path.exists(myportdir+"/CVS"):
13114 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13115 if os.path.exists(cvsdir+"/gentoo-x86"):
13116 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13119 os.rmdir(myportdir)
13121 if e.errno != errno.ENOENT:
13123 "!!! existing '%s' directory; exiting.\n" % myportdir)
13126 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13127 print "!!! cvs checkout error; exiting."
13129 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13132 print ">>> Starting cvs update with "+syncuri+"..."
13133 retval = portage.process.spawn_bash(
13134 "cd %s; cvs -z0 -q update -dP" % \
13135 (portage._shell_quote(myportdir),), **spawn_kwargs)
13136 if retval != os.EX_OK:
13138 dosyncuri = syncuri
13140 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13141 noiselevel=-1, level=logging.ERROR)
13144 if updatecache_flg and \
13145 myaction != "metadata" and \
13146 "metadata-transfer" not in settings.features:
13147 updatecache_flg = False
13149 # Reload the whole config from scratch.
13150 settings, trees, mtimedb = load_emerge_config(trees=trees)
13151 root_config = trees[settings["ROOT"]]["root_config"]
13152 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13154 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13155 action_metadata(settings, portdb, myopts)
13157 if portage._global_updates(trees, mtimedb["updates"]):
13159 # Reload the whole config from scratch.
13160 settings, trees, mtimedb = load_emerge_config(trees=trees)
13161 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13162 root_config = trees[settings["ROOT"]]["root_config"]
13164 mybestpv = portdb.xmatch("bestmatch-visible",
13165 portage.const.PORTAGE_PACKAGE_ATOM)
13166 mypvs = portage.best(
13167 trees[settings["ROOT"]]["vartree"].dbapi.match(
13168 portage.const.PORTAGE_PACKAGE_ATOM))
13170 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13172 if myaction != "metadata":
13173 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13174 retval = portage.process.spawn(
13175 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13176 dosyncuri], env=settings.environ())
13177 if retval != os.EX_OK:
13178 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13180 if(mybestpv != mypvs) and not "--quiet" in myopts:
13182 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13183 print red(" * ")+"that you update portage now, before any other packages are updated."
13185 print red(" * ")+"To update portage, run 'emerge portage' now."
13188 display_news_notification(root_config, myopts)
13191 def git_sync_timestamps(settings, portdir):
13193 Since git doesn't preserve timestamps, synchronize timestamps between
13194 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13195 for a given file as long as the file in the working tree is not modified
13196 (relative to HEAD).
13198 cache_dir = os.path.join(portdir, "metadata", "cache")
13199 if not os.path.isdir(cache_dir):
13201 writemsg_level(">>> Synchronizing timestamps...\n")
13203 from portage.cache.cache_errors import CacheError
13205 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13206 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13207 except CacheError, e:
13208 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13209 level=logging.ERROR, noiselevel=-1)
13212 ec_dir = os.path.join(portdir, "eclass")
13214 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13215 if f.endswith(".eclass"))
13217 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13218 level=logging.ERROR, noiselevel=-1)
13221 args = [portage.const.BASH_BINARY, "-c",
13222 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13223 portage._shell_quote(portdir)]
13225 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13226 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13228 if rval != os.EX_OK:
13231 modified_eclasses = set(ec for ec in ec_names \
13232 if os.path.join("eclass", ec + ".eclass") in modified_files)
13234 updated_ec_mtimes = {}
13236 for cpv in cache_db:
13237 cpv_split = portage.catpkgsplit(cpv)
13238 if cpv_split is None:
13239 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13240 level=logging.ERROR, noiselevel=-1)
13243 cat, pn, ver, rev = cpv_split
13244 cat, pf = portage.catsplit(cpv)
13245 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13246 if relative_eb_path in modified_files:
13250 cache_entry = cache_db[cpv]
13251 eb_mtime = cache_entry.get("_mtime_")
13252 ec_mtimes = cache_entry.get("_eclasses_")
13254 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13255 level=logging.ERROR, noiselevel=-1)
13257 except CacheError, e:
13258 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13259 (cpv, e), level=logging.ERROR, noiselevel=-1)
13262 if eb_mtime is None:
13263 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13264 level=logging.ERROR, noiselevel=-1)
13268 eb_mtime = long(eb_mtime)
13270 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13271 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13274 if ec_mtimes is None:
13275 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13276 level=logging.ERROR, noiselevel=-1)
13279 if modified_eclasses.intersection(ec_mtimes):
13282 missing_eclasses = set(ec_mtimes).difference(ec_names)
13283 if missing_eclasses:
13284 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13285 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13289 eb_path = os.path.join(portdir, relative_eb_path)
13291 current_eb_mtime = os.stat(eb_path)
13293 writemsg_level("!!! Missing ebuild: %s\n" % \
13294 (cpv,), level=logging.ERROR, noiselevel=-1)
13297 inconsistent = False
13298 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13299 updated_mtime = updated_ec_mtimes.get(ec)
13300 if updated_mtime is not None and updated_mtime != ec_mtime:
13301 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13302 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13303 inconsistent = True
13309 if current_eb_mtime != eb_mtime:
13310 os.utime(eb_path, (eb_mtime, eb_mtime))
13312 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13313 if ec in updated_ec_mtimes:
13315 ec_path = os.path.join(ec_dir, ec + ".eclass")
13316 current_mtime = long(os.stat(ec_path).st_mtime)
13317 if current_mtime != ec_mtime:
13318 os.utime(ec_path, (ec_mtime, ec_mtime))
13319 updated_ec_mtimes[ec] = ec_mtime
13323 def action_metadata(settings, portdb, myopts):
13324 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13325 old_umask = os.umask(0002)
13326 cachedir = os.path.normpath(settings.depcachedir)
13327 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13328 "/lib", "/opt", "/proc", "/root", "/sbin",
13329 "/sys", "/tmp", "/usr", "/var"]:
13330 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13331 "ROOT DIRECTORY ON YOUR SYSTEM."
13332 print >> sys.stderr, \
13333 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13335 if not os.path.exists(cachedir):
13338 ec = portage.eclass_cache.cache(portdb.porttree_root)
13339 myportdir = os.path.realpath(settings["PORTDIR"])
13340 cm = settings.load_best_module("portdbapi.metadbmodule")(
13341 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13343 from portage.cache import util
13345 class percentage_noise_maker(util.quiet_mirroring):
13346 def __init__(self, dbapi):
13348 self.cp_all = dbapi.cp_all()
13349 l = len(self.cp_all)
13350 self.call_update_min = 100000000
13351 self.min_cp_all = l/100.0
13355 def __iter__(self):
13356 for x in self.cp_all:
13358 if self.count > self.min_cp_all:
13359 self.call_update_min = 0
13361 for y in self.dbapi.cp_list(x):
13363 self.call_update_mine = 0
13365 def update(self, *arg):
13367 self.pstr = int(self.pstr) + 1
13370 sys.stdout.write("%s%i%%" % \
13371 ("\b" * (len(str(self.pstr))+1), self.pstr))
13373 self.call_update_min = 10000000
13375 def finish(self, *arg):
13376 sys.stdout.write("\b\b\b\b100%\n")
13379 if "--quiet" in myopts:
13380 def quicky_cpv_generator(cp_all_list):
13381 for x in cp_all_list:
13382 for y in portdb.cp_list(x):
13384 source = quicky_cpv_generator(portdb.cp_all())
13385 noise_maker = portage.cache.util.quiet_mirroring()
13387 noise_maker = source = percentage_noise_maker(portdb)
13388 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13389 eclass_cache=ec, verbose_instance=noise_maker)
13392 os.umask(old_umask)
13394 def action_regen(settings, portdb, max_jobs, max_load):
13395 xterm_titles = "notitles" not in settings.features
13396 emergelog(xterm_titles, " === regen")
13397 #regenerate cache entries
13398 portage.writemsg_stdout("Regenerating cache entries...\n")
13400 os.close(sys.stdin.fileno())
13401 except SystemExit, e:
13402 raise # Needed else can't exit
13407 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13410 portage.writemsg_stdout("done!\n")
13411 return regen.returncode
13413 def action_config(settings, trees, myopts, myfiles):
13414 if len(myfiles) != 1:
13415 print red("!!! config can only take a single package atom at this time\n")
13417 if not is_valid_package_atom(myfiles[0]):
13418 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13420 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13421 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13425 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13426 except portage.exception.AmbiguousPackageName, e:
13427 # Multiple matches thrown from cpv_expand
13430 print "No packages found.\n"
13432 elif len(pkgs) > 1:
13433 if "--ask" in myopts:
13435 print "Please select a package to configure:"
13439 options.append(str(idx))
13440 print options[-1]+") "+pkg
13442 options.append("X")
13443 idx = userquery("Selection?", options)
13446 pkg = pkgs[int(idx)-1]
13448 print "The following packages available:"
13451 print "\nPlease use a specific atom or the --ask option."
13457 if "--ask" in myopts:
13458 if userquery("Ready to configure "+pkg+"?") == "No":
13461 print "Configuring pkg..."
13463 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13464 mysettings = portage.config(clone=settings)
13465 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13466 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13467 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13469 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13470 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13471 if retval == os.EX_OK:
13472 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13473 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13476 def action_info(settings, trees, myopts, myfiles):
13477 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13478 settings.profile_path, settings["CHOST"],
13479 trees[settings["ROOT"]]["vartree"].dbapi)
13481 header_title = "System Settings"
13483 print header_width * "="
13484 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13485 print header_width * "="
13486 print "System uname: "+platform.platform(aliased=1)
13488 lastSync = portage.grabfile(os.path.join(
13489 settings["PORTDIR"], "metadata", "timestamp.chk"))
13490 print "Timestamp of tree:",
13496 output=commands.getstatusoutput("distcc --version")
13498 print str(output[1].split("\n",1)[0]),
13499 if "distcc" in settings.features:
13504 output=commands.getstatusoutput("ccache -V")
13506 print str(output[1].split("\n",1)[0]),
13507 if "ccache" in settings.features:
13512 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13513 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13514 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13515 myvars = portage.util.unique_array(myvars)
13519 if portage.isvalidatom(x):
13520 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13521 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13522 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13524 for pn, ver, rev in pkg_matches:
13526 pkgs.append(ver + "-" + rev)
13530 pkgs = ", ".join(pkgs)
13531 print "%-20s %s" % (x+":", pkgs)
13533 print "%-20s %s" % (x+":", "[NOT VALID]")
13535 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13537 if "--verbose" in myopts:
13538 myvars=settings.keys()
13540 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13541 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13542 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13543 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13545 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13547 myvars = portage.util.unique_array(myvars)
13548 use_expand = settings.get('USE_EXPAND', '').split()
13550 use_expand_hidden = set(
13551 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13552 alphabetical_use = '--alphabetical' in myopts
13553 root_config = trees[settings["ROOT"]]['root_config']
13559 print '%s="%s"' % (x, settings[x])
13561 use = set(settings["USE"].split())
13562 for varname in use_expand:
13563 flag_prefix = varname.lower() + "_"
13564 for f in list(use):
13565 if f.startswith(flag_prefix):
13569 print 'USE="%s"' % " ".join(use),
13570 for varname in use_expand:
13571 myval = settings.get(varname)
13573 print '%s="%s"' % (varname, myval),
13576 unset_vars.append(x)
13578 print "Unset: "+", ".join(unset_vars)
13581 if "--debug" in myopts:
13582 for x in dir(portage):
13583 module = getattr(portage, x)
13584 if "cvs_id_string" in dir(module):
13585 print "%s: %s" % (str(x), str(module.cvs_id_string))
13587 # See if we can find any packages installed matching the strings
13588 # passed on the command line
13590 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13591 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13593 mypkgs.extend(vardb.match(x))
13595 # If some packages were found...
13597 # Get our global settings (we only print stuff if it varies from
13598 # the current config)
13599 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13600 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13602 pkgsettings = portage.config(clone=settings)
13604 for myvar in mydesiredvars:
13605 global_vals[myvar] = set(settings.get(myvar, "").split())
13607 # Loop through each package
13608 # Only print settings if they differ from global settings
13609 header_title = "Package Settings"
13610 print header_width * "="
13611 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13612 print header_width * "="
13613 from portage.output import EOutput
13616 # Get all package specific variables
13617 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13618 pkg = Package(built=True, cpv=cpv,
13619 installed=True, metadata=izip(Package.metadata_keys,
13620 (metadata.get(x, '') for x in Package.metadata_keys)),
13621 root_config=root_config, type_name='installed')
13624 valuesmap[k] = set(metadata[k].split())
13627 for myvar in mydesiredvars:
13628 # If the package variable doesn't match the
13629 # current global variable, something has changed
13630 # so set diff_found so we know to print
13631 if valuesmap[myvar] != global_vals[myvar]:
13632 diff_values[myvar] = valuesmap[myvar]
13634 # If a difference was found, print the info for
13637 # Print package info
13638 print "%s was built with the following:" % pkg.cpv
13639 for myvar in mydesiredvars:
13640 if myvar in diff_values:
13641 mylist = list(diff_values[myvar])
13643 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13645 pkgsettings.setcpv(pkg)
13646 forced_flags = set(chain(pkgsettings.useforce,
13647 pkgsettings.usemask))
13648 use = set(pkg.use.enabled)
13649 use.discard(pkgsettings.get('ARCH'))
13650 use_expand_flags = set()
13653 for varname in use_expand:
13654 flag_prefix = varname.lower() + "_"
13656 if f.startswith(flag_prefix):
13657 use_expand_flags.add(f)
13658 use_enabled.setdefault(
13659 varname.upper(), []).append(f[len(flag_prefix):])
13661 for f in pkg.iuse.all:
13662 if f.startswith(flag_prefix):
13663 use_expand_flags.add(f)
13665 use_disabled.setdefault(
13666 varname.upper(), []).append(f[len(flag_prefix):])
13668 var_order = set(use_enabled)
13669 var_order.update(use_disabled)
13670 var_order = sorted(var_order)
13671 var_order.insert(0, 'USE')
13672 use.difference_update(use_expand_flags)
13673 use_enabled['USE'] = list(use)
13674 use_disabled['USE'] = []
13676 for f in pkg.iuse.all:
13677 if f not in use and \
13678 f not in use_expand_flags:
13679 use_disabled['USE'].append(f)
13681 for varname in var_order:
13682 if varname in use_expand_hidden:
13685 for f in use_enabled.get(varname, []):
13686 flags.append(UseFlagDisplay(f, True, f in forced_flags))
13687 for f in use_disabled.get(varname, []):
13688 flags.append(UseFlagDisplay(f, False, f in forced_flags))
13689 if alphabetical_use:
13690 flags.sort(key=cmp_sort_key(UseFlagDisplay.cmp_combined))
13692 flags.sort(key=cmp_sort_key(UseFlagDisplay.cmp_separated))
13693 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13696 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13697 ebuildpath = vardb.findname(pkg.cpv)
13698 if not ebuildpath or not os.path.exists(ebuildpath):
13699 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13701 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13702 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13703 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13706 def action_search(root_config, myopts, myfiles, spinner):
13708 print "emerge: no search terms provided."
13710 searchinstance = search(root_config,
13711 spinner, "--searchdesc" in myopts,
13712 "--quiet" not in myopts, "--usepkg" in myopts,
13713 "--usepkgonly" in myopts)
13714 for mysearch in myfiles:
13716 searchinstance.execute(mysearch)
13717 except re.error, comment:
13718 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13720 searchinstance.output()
13722 def action_depclean(settings, trees, ldpath_mtimes,
13723 myopts, action, myfiles, spinner):
13724 # Kill packages that aren't explicitly merged or are required as a
13725 # dependency of another package. World file is explicit.
13727 # Global depclean or prune operations are not very safe when there are
13728 # missing dependencies since it's unknown how badly incomplete
13729 # the dependency graph is, and we might accidentally remove packages
13730 # that should have been pulled into the graph. On the other hand, it's
13731 # relatively safe to ignore missing deps when only asked to remove
13732 # specific packages.
13733 allow_missing_deps = len(myfiles) > 0
13736 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13737 msg.append("mistakes. Packages that are part of the world set will always\n")
13738 msg.append("be kept. They can be manually added to this set with\n")
13739 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13740 msg.append("package.provided (see portage(5)) will be removed by\n")
13741 msg.append("depclean, even if they are part of the world set.\n")
13743 msg.append("As a safety measure, depclean will not remove any packages\n")
13744 msg.append("unless *all* required dependencies have been resolved. As a\n")
13745 msg.append("consequence, it is often necessary to run %s\n" % \
13746 good("`emerge --update"))
13747 msg.append(good("--newuse --deep @system @world`") + \
13748 " prior to depclean.\n")
13750 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13751 portage.writemsg_stdout("\n")
13753 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13755 xterm_titles = "notitles" not in settings.features
13756 myroot = settings["ROOT"]
13757 root_config = trees[myroot]["root_config"]
13758 getSetAtoms = root_config.setconfig.getSetAtoms
13759 vardb = trees[myroot]["vartree"].dbapi
13761 required_set_names = ("system", "world")
13765 for s in required_set_names:
13766 required_sets[s] = InternalPackageSet(
13767 initial_atoms=getSetAtoms(s))
13770 # When removing packages, use a temporary version of world
13771 # which excludes packages that are intended to be eligible for
13773 world_temp_set = required_sets["world"]
13774 system_set = required_sets["system"]
13776 if not system_set or not world_temp_set:
13779 writemsg_level("!!! You have no system list.\n",
13780 level=logging.ERROR, noiselevel=-1)
13782 if not world_temp_set:
13783 writemsg_level("!!! You have no world file.\n",
13784 level=logging.WARNING, noiselevel=-1)
13786 writemsg_level("!!! Proceeding is likely to " + \
13787 "break your installation.\n",
13788 level=logging.WARNING, noiselevel=-1)
13789 if "--pretend" not in myopts:
13790 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13792 if action == "depclean":
13793 emergelog(xterm_titles, " >>> depclean")
13796 args_set = InternalPackageSet()
13799 if not is_valid_package_atom(x):
13800 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13801 level=logging.ERROR, noiselevel=-1)
13802 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13805 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13806 except portage.exception.AmbiguousPackageName, e:
13807 msg = "The short ebuild name \"" + x + \
13808 "\" is ambiguous. Please specify " + \
13809 "one of the following " + \
13810 "fully-qualified ebuild names instead:"
13811 for line in textwrap.wrap(msg, 70):
13812 writemsg_level("!!! %s\n" % (line,),
13813 level=logging.ERROR, noiselevel=-1)
13815 writemsg_level(" %s\n" % colorize("INFORM", i),
13816 level=logging.ERROR, noiselevel=-1)
13817 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13820 matched_packages = False
13823 matched_packages = True
13825 if not matched_packages:
13826 writemsg_level(">>> No packages selected for removal by %s\n" % \
13830 writemsg_level("\nCalculating dependencies ")
13831 resolver_params = create_depgraph_params(myopts, "remove")
13832 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13833 vardb = resolver.trees[myroot]["vartree"].dbapi
13835 if action == "depclean":
13838 # Pull in everything that's installed but not matched
13839 # by an argument atom since we don't want to clean any
13840 # package if something depends on it.
13842 world_temp_set.clear()
13847 if args_set.findAtomForPackage(pkg) is None:
13848 world_temp_set.add("=" + pkg.cpv)
13850 except portage.exception.InvalidDependString, e:
13851 show_invalid_depstring_notice(pkg,
13852 pkg.metadata["PROVIDE"], str(e))
13854 world_temp_set.add("=" + pkg.cpv)
13857 elif action == "prune":
13859 # Pull in everything that's installed since we don't
13860 # to prune a package if something depends on it.
13861 world_temp_set.clear()
13862 world_temp_set.update(vardb.cp_all())
13866 # Try to prune everything that's slotted.
13867 for cp in vardb.cp_all():
13868 if len(vardb.cp_list(cp)) > 1:
13871 # Remove atoms from world that match installed packages
13872 # that are also matched by argument atoms, but do not remove
13873 # them if they match the highest installed version.
13876 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13877 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13878 raise AssertionError("package expected in matches: " + \
13879 "cp = %s, cpv = %s matches = %s" % \
13880 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13882 highest_version = pkgs_for_cp[-1]
13883 if pkg == highest_version:
13884 # pkg is the highest version
13885 world_temp_set.add("=" + pkg.cpv)
13888 if len(pkgs_for_cp) <= 1:
13889 raise AssertionError("more packages expected: " + \
13890 "cp = %s, cpv = %s matches = %s" % \
13891 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13894 if args_set.findAtomForPackage(pkg) is None:
13895 world_temp_set.add("=" + pkg.cpv)
13897 except portage.exception.InvalidDependString, e:
13898 show_invalid_depstring_notice(pkg,
13899 pkg.metadata["PROVIDE"], str(e))
13901 world_temp_set.add("=" + pkg.cpv)
13905 for s, package_set in required_sets.iteritems():
13906 set_atom = SETPREFIX + s
13907 set_arg = SetArg(arg=set_atom, set=package_set,
13908 root_config=resolver.roots[myroot])
13909 set_args[s] = set_arg
13910 for atom in set_arg.set:
13911 resolver._dep_stack.append(
13912 Dependency(atom=atom, root=myroot, parent=set_arg))
13913 resolver.digraph.add(set_arg, None)
13915 success = resolver._complete_graph()
13916 writemsg_level("\b\b... done!\n")
13918 resolver.display_problems()
13923 def unresolved_deps():
13925 unresolvable = set()
13926 for dep in resolver._initially_unsatisfied_deps:
13927 if isinstance(dep.parent, Package) and \
13928 (dep.priority > UnmergeDepPriority.SOFT):
13929 unresolvable.add((dep.atom, dep.parent.cpv))
13931 if not unresolvable:
13934 if unresolvable and not allow_missing_deps:
13935 prefix = bad(" * ")
13937 msg.append("Dependencies could not be completely resolved due to")
13938 msg.append("the following required packages not being installed:")
13940 for atom, parent in unresolvable:
13941 msg.append(" %s pulled in by:" % (atom,))
13942 msg.append(" %s" % (parent,))
13944 msg.append("Have you forgotten to run " + \
13945 good("`emerge --update --newuse --deep @system @world`") + " prior")
13946 msg.append(("to %s? It may be necessary to manually " + \
13947 "uninstall packages that no longer") % action)
13948 msg.append("exist in the portage tree since " + \
13949 "it may not be possible to satisfy their")
13950 msg.append("dependencies. Also, be aware of " + \
13951 "the --with-bdeps option that is documented")
13952 msg.append("in " + good("`man emerge`") + ".")
13953 if action == "prune":
13955 msg.append("If you would like to ignore " + \
13956 "dependencies then use %s." % good("--nodeps"))
13957 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13958 level=logging.ERROR, noiselevel=-1)
13962 if unresolved_deps():
13965 graph = resolver.digraph.copy()
13966 required_pkgs_total = 0
13968 if isinstance(node, Package):
13969 required_pkgs_total += 1
13971 def show_parents(child_node):
13972 parent_nodes = graph.parent_nodes(child_node)
13973 if not parent_nodes:
13974 # With --prune, the highest version can be pulled in without any
13975 # real parent since all installed packages are pulled in. In that
13976 # case there's nothing to show here.
13979 for node in parent_nodes:
13980 parent_strs.append(str(getattr(node, "cpv", node)))
13983 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13984 for parent_str in parent_strs:
13985 msg.append(" %s\n" % (parent_str,))
13987 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13989 def cmp_pkg_cpv(pkg1, pkg2):
13990 """Sort Package instances by cpv."""
13991 if pkg1.cpv > pkg2.cpv:
13993 elif pkg1.cpv == pkg2.cpv:
13998 def create_cleanlist():
13999 pkgs_to_remove = []
14001 if action == "depclean":
14004 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14007 arg_atom = args_set.findAtomForPackage(pkg)
14008 except portage.exception.InvalidDependString:
14009 # this error has already been displayed by now
14013 if pkg not in graph:
14014 pkgs_to_remove.append(pkg)
14015 elif "--verbose" in myopts:
14019 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14020 if pkg not in graph:
14021 pkgs_to_remove.append(pkg)
14022 elif "--verbose" in myopts:
14025 elif action == "prune":
14026 # Prune really uses all installed instead of world. It's not
14027 # a real reverse dependency so don't display it as such.
14028 graph.remove(set_args["world"])
14030 for atom in args_set:
14031 for pkg in vardb.match_pkgs(atom):
14032 if pkg not in graph:
14033 pkgs_to_remove.append(pkg)
14034 elif "--verbose" in myopts:
14037 if not pkgs_to_remove:
14039 ">>> No packages selected for removal by %s\n" % action)
14040 if "--verbose" not in myopts:
14042 ">>> To see reverse dependencies, use %s\n" % \
14044 if action == "prune":
14046 ">>> To ignore dependencies, use %s\n" % \
14049 return pkgs_to_remove
14051 cleanlist = create_cleanlist()
14054 clean_set = set(cleanlist)
14056 # Check if any of these package are the sole providers of libraries
14057 # with consumers that have not been selected for removal. If so, these
14058 # packages and any dependencies need to be added to the graph.
14059 real_vardb = trees[myroot]["vartree"].dbapi
14060 linkmap = real_vardb.linkmap
14061 liblist = linkmap.listLibraryObjects()
14062 consumer_cache = {}
14063 provider_cache = {}
14067 writemsg_level(">>> Checking for lib consumers...\n")
14069 for pkg in cleanlist:
14070 pkg_dblink = real_vardb._dblink(pkg.cpv)
14071 provided_libs = set()
14073 for lib in liblist:
14074 if pkg_dblink.isowner(lib, myroot):
14075 provided_libs.add(lib)
14077 if not provided_libs:
14081 for lib in provided_libs:
14082 lib_consumers = consumer_cache.get(lib)
14083 if lib_consumers is None:
14084 lib_consumers = linkmap.findConsumers(lib)
14085 consumer_cache[lib] = lib_consumers
14087 consumers[lib] = lib_consumers
14092 for lib, lib_consumers in consumers.items():
14093 for consumer_file in list(lib_consumers):
14094 if pkg_dblink.isowner(consumer_file, myroot):
14095 lib_consumers.remove(consumer_file)
14096 if not lib_consumers:
14102 for lib, lib_consumers in consumers.iteritems():
14104 soname = soname_cache.get(lib)
14106 soname = linkmap.getSoname(lib)
14107 soname_cache[lib] = soname
14109 consumer_providers = []
14110 for lib_consumer in lib_consumers:
14111 providers = provider_cache.get(lib)
14112 if providers is None:
14113 providers = linkmap.findProviders(lib_consumer)
14114 provider_cache[lib_consumer] = providers
14115 if soname not in providers:
14116 # Why does this happen?
14118 consumer_providers.append(
14119 (lib_consumer, providers[soname]))
14121 consumers[lib] = consumer_providers
14123 consumer_map[pkg] = consumers
14127 search_files = set()
14128 for consumers in consumer_map.itervalues():
14129 for lib, consumer_providers in consumers.iteritems():
14130 for lib_consumer, providers in consumer_providers:
14131 search_files.add(lib_consumer)
14132 search_files.update(providers)
14134 writemsg_level(">>> Assigning files to packages...\n")
14135 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14137 for pkg, consumers in consumer_map.items():
14138 for lib, consumer_providers in consumers.items():
14139 lib_consumers = set()
14141 for lib_consumer, providers in consumer_providers:
14142 owner_set = file_owners.get(lib_consumer)
14143 provider_dblinks = set()
14144 provider_pkgs = set()
14146 if len(providers) > 1:
14147 for provider in providers:
14148 provider_set = file_owners.get(provider)
14149 if provider_set is not None:
14150 provider_dblinks.update(provider_set)
14152 if len(provider_dblinks) > 1:
14153 for provider_dblink in provider_dblinks:
14154 pkg_key = ("installed", myroot,
14155 provider_dblink.mycpv, "nomerge")
14156 if pkg_key not in clean_set:
14157 provider_pkgs.add(vardb.get(pkg_key))
14162 if owner_set is not None:
14163 lib_consumers.update(owner_set)
14165 for consumer_dblink in list(lib_consumers):
14166 if ("installed", myroot, consumer_dblink.mycpv,
14167 "nomerge") in clean_set:
14168 lib_consumers.remove(consumer_dblink)
14172 consumers[lib] = lib_consumers
14176 del consumer_map[pkg]
14179 # TODO: Implement a package set for rebuilding consumer packages.
14181 msg = "In order to avoid breakage of link level " + \
14182 "dependencies, one or more packages will not be removed. " + \
14183 "This can be solved by rebuilding " + \
14184 "the packages that pulled them in."
14186 prefix = bad(" * ")
14187 from textwrap import wrap
14188 writemsg_level("".join(prefix + "%s\n" % line for \
14189 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14192 for pkg, consumers in consumer_map.iteritems():
14193 unique_consumers = set(chain(*consumers.values()))
14194 unique_consumers = sorted(consumer.mycpv \
14195 for consumer in unique_consumers)
14197 msg.append(" %s pulled in by:" % (pkg.cpv,))
14198 for consumer in unique_consumers:
14199 msg.append(" %s" % (consumer,))
14201 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14202 level=logging.WARNING, noiselevel=-1)
14204 # Add lib providers to the graph as children of lib consumers,
14205 # and also add any dependencies pulled in by the provider.
14206 writemsg_level(">>> Adding lib providers to graph...\n")
14208 for pkg, consumers in consumer_map.iteritems():
14209 for consumer_dblink in set(chain(*consumers.values())):
14210 consumer_pkg = vardb.get(("installed", myroot,
14211 consumer_dblink.mycpv, "nomerge"))
14212 if not resolver._add_pkg(pkg,
14213 Dependency(parent=consumer_pkg,
14214 priority=UnmergeDepPriority(runtime=True),
14216 resolver.display_problems()
14219 writemsg_level("\nCalculating dependencies ")
14220 success = resolver._complete_graph()
14221 writemsg_level("\b\b... done!\n")
14222 resolver.display_problems()
14225 if unresolved_deps():
14228 graph = resolver.digraph.copy()
14229 required_pkgs_total = 0
14231 if isinstance(node, Package):
14232 required_pkgs_total += 1
14233 cleanlist = create_cleanlist()
14236 clean_set = set(cleanlist)
14238 # Use a topological sort to create an unmerge order such that
14239 # each package is unmerged before it's dependencies. This is
14240 # necessary to avoid breaking things that may need to run
14241 # during pkg_prerm or pkg_postrm phases.
14243 # Create a new graph to account for dependencies between the
14244 # packages being unmerged.
14248 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14249 runtime = UnmergeDepPriority(runtime=True)
14250 runtime_post = UnmergeDepPriority(runtime_post=True)
14251 buildtime = UnmergeDepPriority(buildtime=True)
14253 "RDEPEND": runtime,
14254 "PDEPEND": runtime_post,
14255 "DEPEND": buildtime,
14258 for node in clean_set:
14259 graph.add(node, None)
14261 node_use = node.metadata["USE"].split()
14262 for dep_type in dep_keys:
14263 depstr = node.metadata[dep_type]
14267 portage.dep._dep_check_strict = False
14268 success, atoms = portage.dep_check(depstr, None, settings,
14269 myuse=node_use, trees=resolver._graph_trees,
14272 portage.dep._dep_check_strict = True
14274 # Ignore invalid deps of packages that will
14275 # be uninstalled anyway.
14278 priority = priority_map[dep_type]
14280 if not isinstance(atom, portage.dep.Atom):
14281 # Ignore invalid atoms returned from dep_check().
14285 matches = vardb.match_pkgs(atom)
14288 for child_node in matches:
14289 if child_node in clean_set:
14290 graph.add(child_node, node, priority=priority)
14293 if len(graph.order) == len(graph.root_nodes()):
14294 # If there are no dependencies between packages
14295 # let unmerge() group them by cat/pn.
14297 cleanlist = [pkg.cpv for pkg in graph.order]
14299 # Order nodes from lowest to highest overall reference count for
14300 # optimal root node selection.
14301 node_refcounts = {}
14302 for node in graph.order:
14303 node_refcounts[node] = len(graph.parent_nodes(node))
14304 def cmp_reference_count(node1, node2):
14305 return node_refcounts[node1] - node_refcounts[node2]
14306 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14308 ignore_priority_range = [None]
14309 ignore_priority_range.extend(
14310 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14311 while not graph.empty():
14312 for ignore_priority in ignore_priority_range:
14313 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14317 raise AssertionError("no root nodes")
14318 if ignore_priority is not None:
14319 # Some deps have been dropped due to circular dependencies,
14320 # so only pop one node in order do minimize the number that
14325 cleanlist.append(node.cpv)
14327 unmerge(root_config, myopts, "unmerge", cleanlist,
14328 ldpath_mtimes, ordered=ordered)
14330 if action == "prune":
14333 if not cleanlist and "--quiet" in myopts:
14336 print "Packages installed: "+str(len(vardb.cpv_all()))
14337 print "Packages in world: " + \
14338 str(len(root_config.sets["world"].getAtoms()))
14339 print "Packages in system: " + \
14340 str(len(root_config.sets["system"].getAtoms()))
14341 print "Required packages: "+str(required_pkgs_total)
14342 if "--pretend" in myopts:
14343 print "Number to remove: "+str(len(cleanlist))
14345 print "Number removed: "+str(len(cleanlist))
14347 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14349 Construct a depgraph for the given resume list. This will raise
14350 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14352 @returns: (success, depgraph, dropped_tasks)
14355 skip_unsatisfied = True
14356 mergelist = mtimedb["resume"]["mergelist"]
14357 dropped_tasks = set()
14359 mydepgraph = depgraph(settings, trees,
14360 myopts, myparams, spinner)
14362 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14363 skip_masked=skip_masked)
14364 except depgraph.UnsatisfiedResumeDep, e:
14365 if not skip_unsatisfied:
14368 graph = mydepgraph.digraph
14369 unsatisfied_parents = dict((dep.parent, dep.parent) \
14370 for dep in e.value)
14371 traversed_nodes = set()
14372 unsatisfied_stack = list(unsatisfied_parents)
14373 while unsatisfied_stack:
14374 pkg = unsatisfied_stack.pop()
14375 if pkg in traversed_nodes:
14377 traversed_nodes.add(pkg)
14379 # If this package was pulled in by a parent
14380 # package scheduled for merge, removing this
14381 # package may cause the the parent package's
14382 # dependency to become unsatisfied.
14383 for parent_node in graph.parent_nodes(pkg):
14384 if not isinstance(parent_node, Package) \
14385 or parent_node.operation not in ("merge", "nomerge"):
14388 graph.child_nodes(parent_node,
14389 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14390 if pkg in unsatisfied:
14391 unsatisfied_parents[parent_node] = parent_node
14392 unsatisfied_stack.append(parent_node)
14394 pruned_mergelist = []
14395 for x in mergelist:
14396 if isinstance(x, list) and \
14397 tuple(x) not in unsatisfied_parents:
14398 pruned_mergelist.append(x)
14400 # If the mergelist doesn't shrink then this loop is infinite.
14401 if len(pruned_mergelist) == len(mergelist):
14402 # This happens if a package can't be dropped because
14403 # it's already installed, but it has unsatisfied PDEPEND.
14405 mergelist[:] = pruned_mergelist
14407 # Exclude installed packages that have been removed from the graph due
14408 # to failure to build/install runtime dependencies after the dependent
14409 # package has already been installed.
14410 dropped_tasks.update(pkg for pkg in \
14411 unsatisfied_parents if pkg.operation != "nomerge")
14412 mydepgraph.break_refs(unsatisfied_parents)
14414 del e, graph, traversed_nodes, \
14415 unsatisfied_parents, unsatisfied_stack
14419 return (success, mydepgraph, dropped_tasks)
14421 def action_build(settings, trees, mtimedb,
14422 myopts, myaction, myfiles, spinner):
14424 # validate the state of the resume data
14425 # so that we can make assumptions later.
14426 for k in ("resume", "resume_backup"):
14427 if k not in mtimedb:
14429 resume_data = mtimedb[k]
14430 if not isinstance(resume_data, dict):
14433 mergelist = resume_data.get("mergelist")
14434 if not isinstance(mergelist, list):
14437 for x in mergelist:
14438 if not (isinstance(x, list) and len(x) == 4):
14440 pkg_type, pkg_root, pkg_key, pkg_action = x
14441 if pkg_root not in trees:
14442 # Current $ROOT setting differs,
14443 # so the list must be stale.
14449 resume_opts = resume_data.get("myopts")
14450 if not isinstance(resume_opts, (dict, list)):
14453 favorites = resume_data.get("favorites")
14454 if not isinstance(favorites, list):
14459 if "--resume" in myopts and \
14460 ("resume" in mtimedb or
14461 "resume_backup" in mtimedb):
14463 if "resume" not in mtimedb:
14464 mtimedb["resume"] = mtimedb["resume_backup"]
14465 del mtimedb["resume_backup"]
14467 # "myopts" is a list for backward compatibility.
14468 resume_opts = mtimedb["resume"].get("myopts", [])
14469 if isinstance(resume_opts, list):
14470 resume_opts = dict((k,True) for k in resume_opts)
14471 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14472 resume_opts.pop(opt, None)
14473 myopts.update(resume_opts)
14475 if "--debug" in myopts:
14476 writemsg_level("myopts %s\n" % (myopts,))
14478 # Adjust config according to options of the command being resumed.
14479 for myroot in trees:
14480 mysettings = trees[myroot]["vartree"].settings
14481 mysettings.unlock()
14482 adjust_config(myopts, mysettings)
14484 del myroot, mysettings
14486 ldpath_mtimes = mtimedb["ldpath"]
14489 buildpkgonly = "--buildpkgonly" in myopts
14490 pretend = "--pretend" in myopts
14491 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14492 ask = "--ask" in myopts
14493 nodeps = "--nodeps" in myopts
14494 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14495 tree = "--tree" in myopts
14496 if nodeps and tree:
14498 del myopts["--tree"]
14499 portage.writemsg(colorize("WARN", " * ") + \
14500 "--tree is broken with --nodeps. Disabling...\n")
14501 debug = "--debug" in myopts
14502 verbose = "--verbose" in myopts
14503 quiet = "--quiet" in myopts
14504 if pretend or fetchonly:
14505 # make the mtimedb readonly
14506 mtimedb.filename = None
14507 if '--digest' in myopts or 'digest' in settings.features:
14508 if '--digest' in myopts:
14509 msg = "The --digest option"
14511 msg = "The FEATURES=digest setting"
14513 msg += " can prevent corruption from being" + \
14514 " noticed. The `repoman manifest` command is the preferred" + \
14515 " way to generate manifests and it is capable of doing an" + \
14516 " entire repository or category at once."
14517 prefix = bad(" * ")
14518 writemsg(prefix + "\n")
14519 from textwrap import wrap
14520 for line in wrap(msg, 72):
14521 writemsg("%s%s\n" % (prefix, line))
14522 writemsg(prefix + "\n")
14524 if "--quiet" not in myopts and \
14525 ("--pretend" in myopts or "--ask" in myopts or \
14526 "--tree" in myopts or "--verbose" in myopts):
14528 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14530 elif "--buildpkgonly" in myopts:
14534 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14536 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14540 print darkgreen("These are the packages that would be %s, in order:") % action
14543 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14544 if not show_spinner:
14545 spinner.update = spinner.update_quiet
14548 favorites = mtimedb["resume"].get("favorites")
14549 if not isinstance(favorites, list):
14553 print "Calculating dependencies ",
14554 myparams = create_depgraph_params(myopts, myaction)
14556 resume_data = mtimedb["resume"]
14557 mergelist = resume_data["mergelist"]
14558 if mergelist and "--skipfirst" in myopts:
14559 for i, task in enumerate(mergelist):
14560 if isinstance(task, list) and \
14561 task and task[-1] == "merge":
14568 success, mydepgraph, dropped_tasks = resume_depgraph(
14569 settings, trees, mtimedb, myopts, myparams, spinner)
14570 except (portage.exception.PackageNotFound,
14571 depgraph.UnsatisfiedResumeDep), e:
14572 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14573 mydepgraph = e.depgraph
14576 from textwrap import wrap
14577 from portage.output import EOutput
14580 resume_data = mtimedb["resume"]
14581 mergelist = resume_data.get("mergelist")
14582 if not isinstance(mergelist, list):
14584 if mergelist and debug or (verbose and not quiet):
14585 out.eerror("Invalid resume list:")
14588 for task in mergelist:
14589 if isinstance(task, list):
14590 out.eerror(indent + str(tuple(task)))
14593 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14594 out.eerror("One or more packages are either masked or " + \
14595 "have missing dependencies:")
14598 for dep in e.value:
14599 if dep.atom is None:
14600 out.eerror(indent + "Masked package:")
14601 out.eerror(2 * indent + str(dep.parent))
14604 out.eerror(indent + str(dep.atom) + " pulled in by:")
14605 out.eerror(2 * indent + str(dep.parent))
14607 msg = "The resume list contains packages " + \
14608 "that are either masked or have " + \
14609 "unsatisfied dependencies. " + \
14610 "Please restart/continue " + \
14611 "the operation manually, or use --skipfirst " + \
14612 "to skip the first package in the list and " + \
14613 "any other packages that may be " + \
14614 "masked or have missing dependencies."
14615 for line in wrap(msg, 72):
14617 elif isinstance(e, portage.exception.PackageNotFound):
14618 out.eerror("An expected package is " + \
14619 "not available: %s" % str(e))
14621 msg = "The resume list contains one or more " + \
14622 "packages that are no longer " + \
14623 "available. Please restart/continue " + \
14624 "the operation manually."
14625 for line in wrap(msg, 72):
14629 print "\b\b... done!"
14633 portage.writemsg("!!! One or more packages have been " + \
14634 "dropped due to\n" + \
14635 "!!! masking or unsatisfied dependencies:\n\n",
14637 for task in dropped_tasks:
14638 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14639 portage.writemsg("\n", noiselevel=-1)
14642 if mydepgraph is not None:
14643 mydepgraph.display_problems()
14644 if not (ask or pretend):
14645 # delete the current list and also the backup
14646 # since it's probably stale too.
14647 for k in ("resume", "resume_backup"):
14648 mtimedb.pop(k, None)
14653 if ("--resume" in myopts):
14654 print darkgreen("emerge: It seems we have nothing to resume...")
14657 myparams = create_depgraph_params(myopts, myaction)
14658 if "--quiet" not in myopts and "--nodeps" not in myopts:
14659 print "Calculating dependencies ",
14661 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14663 retval, favorites = mydepgraph.select_files(myfiles)
14664 except portage.exception.PackageNotFound, e:
14665 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14667 except portage.exception.PackageSetNotFound, e:
14668 root_config = trees[settings["ROOT"]]["root_config"]
14669 display_missing_pkg_set(root_config, e.value)
14672 print "\b\b... done!"
14674 mydepgraph.display_problems()
14677 if "--pretend" not in myopts and \
14678 ("--ask" in myopts or "--tree" in myopts or \
14679 "--verbose" in myopts) and \
14680 not ("--quiet" in myopts and "--ask" not in myopts):
14681 if "--resume" in myopts:
14682 mymergelist = mydepgraph.altlist()
14683 if len(mymergelist) == 0:
14684 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14686 favorites = mtimedb["resume"]["favorites"]
14687 retval = mydepgraph.display(
14688 mydepgraph.altlist(reversed=tree),
14689 favorites=favorites)
14690 mydepgraph.display_problems()
14691 if retval != os.EX_OK:
14693 prompt="Would you like to resume merging these packages?"
14695 retval = mydepgraph.display(
14696 mydepgraph.altlist(reversed=("--tree" in myopts)),
14697 favorites=favorites)
14698 mydepgraph.display_problems()
14699 if retval != os.EX_OK:
14702 for x in mydepgraph.altlist():
14703 if isinstance(x, Package) and x.operation == "merge":
14707 sets = trees[settings["ROOT"]]["root_config"].sets
14708 world_candidates = None
14709 if "--noreplace" in myopts and \
14710 not oneshot and favorites:
14711 # Sets that are not world candidates are filtered
14712 # out here since the favorites list needs to be
14713 # complete for depgraph.loadResumeCommand() to
14714 # operate correctly.
14715 world_candidates = [x for x in favorites \
14716 if not (x.startswith(SETPREFIX) and \
14717 not sets[x[1:]].world_candidate)]
14718 if "--noreplace" in myopts and \
14719 not oneshot and world_candidates:
14721 for x in world_candidates:
14722 print " %s %s" % (good("*"), x)
14723 prompt="Would you like to add these packages to your world favorites?"
14724 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14725 prompt="Nothing to merge; would you like to auto-clean packages?"
14728 print "Nothing to merge; quitting."
14731 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14732 prompt="Would you like to fetch the source files for these packages?"
14734 prompt="Would you like to merge these packages?"
14736 if "--ask" in myopts and userquery(prompt) == "No":
14741 # Don't ask again (e.g. when auto-cleaning packages after merge)
14742 myopts.pop("--ask", None)
14744 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14745 if ("--resume" in myopts):
14746 mymergelist = mydepgraph.altlist()
14747 if len(mymergelist) == 0:
14748 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14750 favorites = mtimedb["resume"]["favorites"]
14751 retval = mydepgraph.display(
14752 mydepgraph.altlist(reversed=tree),
14753 favorites=favorites)
14754 mydepgraph.display_problems()
14755 if retval != os.EX_OK:
14758 retval = mydepgraph.display(
14759 mydepgraph.altlist(reversed=("--tree" in myopts)),
14760 favorites=favorites)
14761 mydepgraph.display_problems()
14762 if retval != os.EX_OK:
14764 if "--buildpkgonly" in myopts:
14765 graph_copy = mydepgraph.digraph.clone()
14766 removed_nodes = set()
14767 for node in graph_copy:
14768 if not isinstance(node, Package) or \
14769 node.operation == "nomerge":
14770 removed_nodes.add(node)
14771 graph_copy.difference_update(removed_nodes)
14772 if not graph_copy.hasallzeros(ignore_priority = \
14773 DepPrioritySatisfiedRange.ignore_medium):
14774 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14775 print "!!! You have to merge the dependencies before you can build this package.\n"
14778 if "--buildpkgonly" in myopts:
14779 graph_copy = mydepgraph.digraph.clone()
14780 removed_nodes = set()
14781 for node in graph_copy:
14782 if not isinstance(node, Package) or \
14783 node.operation == "nomerge":
14784 removed_nodes.add(node)
14785 graph_copy.difference_update(removed_nodes)
14786 if not graph_copy.hasallzeros(ignore_priority = \
14787 DepPrioritySatisfiedRange.ignore_medium):
14788 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14789 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14792 if ("--resume" in myopts):
14793 favorites=mtimedb["resume"]["favorites"]
14794 mymergelist = mydepgraph.altlist()
14795 mydepgraph.break_refs(mymergelist)
14796 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14797 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14798 del mydepgraph, mymergelist
14799 clear_caches(trees)
14801 retval = mergetask.merge()
14802 merge_count = mergetask.curval
14804 if "resume" in mtimedb and \
14805 "mergelist" in mtimedb["resume"] and \
14806 len(mtimedb["resume"]["mergelist"]) > 1:
14807 mtimedb["resume_backup"] = mtimedb["resume"]
14808 del mtimedb["resume"]
14810 mtimedb["resume"]={}
14811 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14812 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14813 # a list type for options.
14814 mtimedb["resume"]["myopts"] = myopts.copy()
14816 # Convert Atom instances to plain str.
14817 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14819 pkglist = mydepgraph.altlist()
14820 mydepgraph.saveNomergeFavorites()
14821 mydepgraph.break_refs(pkglist)
14822 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14823 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14824 del mydepgraph, pkglist
14825 clear_caches(trees)
14827 retval = mergetask.merge()
14828 merge_count = mergetask.curval
14830 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14831 if "yes" == settings.get("AUTOCLEAN"):
14832 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14833 unmerge(trees[settings["ROOT"]]["root_config"],
14834 myopts, "clean", [],
14835 ldpath_mtimes, autoclean=1)
14837 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14838 + " AUTOCLEAN is disabled. This can cause serious"
14839 + " problems due to overlapping packages.\n")
14840 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14844 def multiple_actions(action1, action2):
14845 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14846 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14849 def insert_optional_args(args):
14851 Parse optional arguments and insert a value if one has
14852 not been provided. This is done before feeding the args
14853 to the optparse parser since that parser does not support
14854 this feature natively.
14858 jobs_opts = ("-j", "--jobs")
14859 root_deps_opt = '--root-deps'
14860 root_deps_choices = ('True', 'rdeps')
14861 arg_stack = args[:]
14862 arg_stack.reverse()
14864 arg = arg_stack.pop()
14866 if arg == root_deps_opt:
14867 new_args.append(arg)
14868 if arg_stack and arg_stack[-1] in root_deps_choices:
14869 new_args.append(arg_stack.pop())
14871 # insert default argument
14872 new_args.append('True')
14875 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14876 if not (short_job_opt or arg in jobs_opts):
14877 new_args.append(arg)
14880 # Insert an empty placeholder in order to
14881 # satisfy the requirements of optparse.
14883 new_args.append("--jobs")
14886 if short_job_opt and len(arg) > 2:
14887 if arg[:2] == "-j":
14889 job_count = int(arg[2:])
14891 saved_opts = arg[2:]
14894 saved_opts = arg[1:].replace("j", "")
14896 if job_count is None and arg_stack:
14898 job_count = int(arg_stack[-1])
14902 # Discard the job count from the stack
14903 # since we're consuming it here.
14906 if job_count is None:
14907 # unlimited number of jobs
14908 new_args.append("True")
14910 new_args.append(str(job_count))
14912 if saved_opts is not None:
14913 new_args.append("-" + saved_opts)
14917 def parse_opts(tmpcmdline, silent=False):
14922 global actions, options, shortmapping
14924 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14925 argument_options = {
14927 "help":"specify the location for portage configuration files",
14931 "help":"enable or disable color output",
14933 "choices":("y", "n")
14938 "help" : "Specifies the number of packages to build " + \
14944 "--load-average": {
14946 "help" :"Specifies that no new builds should be started " + \
14947 "if there are other builds running and the load average " + \
14948 "is at least LOAD (a floating-point number).",
14954 "help":"include unnecessary build time dependencies",
14956 "choices":("y", "n")
14959 "help":"specify conditions to trigger package reinstallation",
14961 "choices":["changed-use"]
14964 "help" : "specify the target root filesystem for merging packages",
14969 "help" : "modify interpretation of depedencies",
14971 "choices" :("True", "rdeps")
14975 from optparse import OptionParser
14976 parser = OptionParser()
14977 if parser.has_option("--help"):
14978 parser.remove_option("--help")
14980 for action_opt in actions:
14981 parser.add_option("--" + action_opt, action="store_true",
14982 dest=action_opt.replace("-", "_"), default=False)
14983 for myopt in options:
14984 parser.add_option(myopt, action="store_true",
14985 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14986 for shortopt, longopt in shortmapping.iteritems():
14987 parser.add_option("-" + shortopt, action="store_true",
14988 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14989 for myalias, myopt in longopt_aliases.iteritems():
14990 parser.add_option(myalias, action="store_true",
14991 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14993 for myopt, kwargs in argument_options.iteritems():
14994 parser.add_option(myopt,
14995 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14997 tmpcmdline = insert_optional_args(tmpcmdline)
14999 myoptions, myargs = parser.parse_args(args=tmpcmdline)
15001 if myoptions.root_deps == "True":
15002 myoptions.root_deps = True
15006 if myoptions.jobs == "True":
15010 jobs = int(myoptions.jobs)
15014 if jobs is not True and \
15018 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15019 (myoptions.jobs,), noiselevel=-1)
15021 myoptions.jobs = jobs
15023 if myoptions.load_average:
15025 load_average = float(myoptions.load_average)
15029 if load_average <= 0.0:
15030 load_average = None
15032 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15033 (myoptions.load_average,), noiselevel=-1)
15035 myoptions.load_average = load_average
15037 for myopt in options:
15038 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15040 myopts[myopt] = True
15042 for myopt in argument_options:
15043 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15047 if myoptions.searchdesc:
15048 myoptions.search = True
15050 for action_opt in actions:
15051 v = getattr(myoptions, action_opt.replace("-", "_"))
15054 multiple_actions(myaction, action_opt)
15056 myaction = action_opt
15060 return myaction, myopts, myfiles
15062 def validate_ebuild_environment(trees):
15063 for myroot in trees:
15064 settings = trees[myroot]["vartree"].settings
15065 settings.validate()
15067 def clear_caches(trees):
15068 for d in trees.itervalues():
15069 d["porttree"].dbapi.melt()
15070 d["porttree"].dbapi._aux_cache.clear()
15071 d["bintree"].dbapi._aux_cache.clear()
15072 d["bintree"].dbapi._clear_cache()
15073 d["vartree"].dbapi.linkmap._clear_cache()
15074 portage.dircache.clear()
15077 def load_emerge_config(trees=None):
15079 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15080 v = os.environ.get(envvar, None)
15081 if v and v.strip():
15083 trees = portage.create_trees(trees=trees, **kwargs)
15085 for root, root_trees in trees.iteritems():
15086 settings = root_trees["vartree"].settings
15087 setconfig = load_default_config(settings, root_trees)
15088 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15090 settings = trees["/"]["vartree"].settings
15092 for myroot in trees:
15094 settings = trees[myroot]["vartree"].settings
15097 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15098 mtimedb = portage.MtimeDB(mtimedbfile)
15100 return settings, trees, mtimedb
15102 def adjust_config(myopts, settings):
15103 """Make emerge specific adjustments to the config."""
15105 # To enhance usability, make some vars case insensitive by forcing them to
15107 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15108 if myvar in settings:
15109 settings[myvar] = settings[myvar].lower()
15110 settings.backup_changes(myvar)
15113 # Kill noauto as it will break merges otherwise.
15114 if "noauto" in settings.features:
15115 settings.features.remove('noauto')
15116 settings['FEATURES'] = ' '.join(sorted(settings.features))
15117 settings.backup_changes("FEATURES")
15121 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15122 except ValueError, e:
15123 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15124 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15125 settings["CLEAN_DELAY"], noiselevel=-1)
15126 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15127 settings.backup_changes("CLEAN_DELAY")
15129 EMERGE_WARNING_DELAY = 10
15131 EMERGE_WARNING_DELAY = int(settings.get(
15132 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15133 except ValueError, e:
15134 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15135 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15136 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15137 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15138 settings.backup_changes("EMERGE_WARNING_DELAY")
15140 if "--quiet" in myopts:
15141 settings["PORTAGE_QUIET"]="1"
15142 settings.backup_changes("PORTAGE_QUIET")
15144 if "--verbose" in myopts:
15145 settings["PORTAGE_VERBOSE"] = "1"
15146 settings.backup_changes("PORTAGE_VERBOSE")
15148 # Set so that configs will be merged regardless of remembered status
15149 if ("--noconfmem" in myopts):
15150 settings["NOCONFMEM"]="1"
15151 settings.backup_changes("NOCONFMEM")
15153 # Set various debug markers... They should be merged somehow.
15156 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15157 if PORTAGE_DEBUG not in (0, 1):
15158 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15159 PORTAGE_DEBUG, noiselevel=-1)
15160 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15163 except ValueError, e:
15164 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15165 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15166 settings["PORTAGE_DEBUG"], noiselevel=-1)
15168 if "--debug" in myopts:
15170 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15171 settings.backup_changes("PORTAGE_DEBUG")
15173 if settings.get("NOCOLOR") not in ("yes","true"):
15174 portage.output.havecolor = 1
15176 """The explicit --color < y | n > option overrides the NOCOLOR environment
15177 variable and stdout auto-detection."""
15178 if "--color" in myopts:
15179 if "y" == myopts["--color"]:
15180 portage.output.havecolor = 1
15181 settings["NOCOLOR"] = "false"
15183 portage.output.havecolor = 0
15184 settings["NOCOLOR"] = "true"
15185 settings.backup_changes("NOCOLOR")
15186 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15187 portage.output.havecolor = 0
15188 settings["NOCOLOR"] = "true"
15189 settings.backup_changes("NOCOLOR")
15191 def apply_priorities(settings):
15195 def nice(settings):
15197 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15198 except (OSError, ValueError), e:
15199 out = portage.output.EOutput()
15200 out.eerror("Failed to change nice value to '%s'" % \
15201 settings["PORTAGE_NICENESS"])
15202 out.eerror("%s\n" % str(e))
15204 def ionice(settings):
15206 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15208 ionice_cmd = shlex.split(ionice_cmd)
15212 from portage.util import varexpand
15213 variables = {"PID" : str(os.getpid())}
15214 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15217 rval = portage.process.spawn(cmd, env=os.environ)
15218 except portage.exception.CommandNotFound:
15219 # The OS kernel probably doesn't support ionice,
15220 # so return silently.
15223 if rval != os.EX_OK:
15224 out = portage.output.EOutput()
15225 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15226 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15228 def display_missing_pkg_set(root_config, set_name):
15231 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15232 "The following sets exist:") % \
15233 colorize("INFORM", set_name))
15236 for s in sorted(root_config.sets):
15237 msg.append(" %s" % s)
15240 writemsg_level("".join("%s\n" % l for l in msg),
15241 level=logging.ERROR, noiselevel=-1)
15243 def expand_set_arguments(myfiles, myaction, root_config):
15245 setconfig = root_config.setconfig
15247 sets = setconfig.getSets()
15249 # In order to know exactly which atoms/sets should be added to the
15250 # world file, the depgraph performs set expansion later. It will get
15251 # confused about where the atoms came from if it's not allowed to
15252 # expand them itself.
15253 do_not_expand = (None, )
15256 if a in ("system", "world"):
15257 newargs.append(SETPREFIX+a)
15264 # separators for set arguments
15268 # WARNING: all operators must be of equal length
15270 DIFF_OPERATOR = "-@"
15271 UNION_OPERATOR = "+@"
15273 for i in range(0, len(myfiles)):
15274 if myfiles[i].startswith(SETPREFIX):
15277 x = myfiles[i][len(SETPREFIX):]
15280 start = x.find(ARG_START)
15281 end = x.find(ARG_END)
15282 if start > 0 and start < end:
15283 namepart = x[:start]
15284 argpart = x[start+1:end]
15286 # TODO: implement proper quoting
15287 args = argpart.split(",")
15291 k, v = a.split("=", 1)
15294 options[a] = "True"
15295 setconfig.update(namepart, options)
15296 newset += (x[:start-len(namepart)]+namepart)
15297 x = x[end+len(ARG_END):]
15301 myfiles[i] = SETPREFIX+newset
15303 sets = setconfig.getSets()
15305 # display errors that occured while loading the SetConfig instance
15306 for e in setconfig.errors:
15307 print colorize("BAD", "Error during set creation: %s" % e)
15309 # emerge relies on the existance of sets with names "world" and "system"
15310 required_sets = ("world", "system")
15313 for s in required_sets:
15315 missing_sets.append(s)
15317 if len(missing_sets) > 2:
15318 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15319 missing_sets_str += ', and "%s"' % missing_sets[-1]
15320 elif len(missing_sets) == 2:
15321 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15323 missing_sets_str = '"%s"' % missing_sets[-1]
15324 msg = ["emerge: incomplete set configuration, " + \
15325 "missing set(s): %s" % missing_sets_str]
15327 msg.append(" sets defined: %s" % ", ".join(sets))
15328 msg.append(" This usually means that '%s'" % \
15329 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15330 msg.append(" is missing or corrupt.")
15332 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15334 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15337 if a.startswith(SETPREFIX):
15338 # support simple set operations (intersection, difference and union)
15339 # on the commandline. Expressions are evaluated strictly left-to-right
15340 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15341 expression = a[len(SETPREFIX):]
15344 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15345 is_pos = expression.rfind(IS_OPERATOR)
15346 diff_pos = expression.rfind(DIFF_OPERATOR)
15347 union_pos = expression.rfind(UNION_OPERATOR)
15348 op_pos = max(is_pos, diff_pos, union_pos)
15349 s1 = expression[:op_pos]
15350 s2 = expression[op_pos+len(IS_OPERATOR):]
15351 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15353 display_missing_pkg_set(root_config, s2)
15355 expr_sets.insert(0, s2)
15356 expr_ops.insert(0, op)
15358 if not expression in sets:
15359 display_missing_pkg_set(root_config, expression)
15361 expr_sets.insert(0, expression)
15362 result = set(setconfig.getSetAtoms(expression))
15363 for i in range(0, len(expr_ops)):
15364 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15365 if expr_ops[i] == IS_OPERATOR:
15366 result.intersection_update(s2)
15367 elif expr_ops[i] == DIFF_OPERATOR:
15368 result.difference_update(s2)
15369 elif expr_ops[i] == UNION_OPERATOR:
15372 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15373 newargs.extend(result)
15375 s = a[len(SETPREFIX):]
15377 display_missing_pkg_set(root_config, s)
15379 setconfig.active.append(s)
15381 set_atoms = setconfig.getSetAtoms(s)
15382 except portage.exception.PackageSetNotFound, e:
15383 writemsg_level(("emerge: the given set '%s' " + \
15384 "contains a non-existent set named '%s'.\n") % \
15385 (s, e), level=logging.ERROR, noiselevel=-1)
15387 if myaction in unmerge_actions and \
15388 not sets[s].supportsOperation("unmerge"):
15389 sys.stderr.write("emerge: the given set '%s' does " % s + \
15390 "not support unmerge operations\n")
15392 elif not set_atoms:
15393 print "emerge: '%s' is an empty set" % s
15394 elif myaction not in do_not_expand:
15395 newargs.extend(set_atoms)
15397 newargs.append(SETPREFIX+s)
15398 for e in sets[s].errors:
15402 return (newargs, retval)
15404 def repo_name_check(trees):
15405 missing_repo_names = set()
15406 for root, root_trees in trees.iteritems():
15407 if "porttree" in root_trees:
15408 portdb = root_trees["porttree"].dbapi
15409 missing_repo_names.update(portdb.porttrees)
15410 repos = portdb.getRepositories()
15412 missing_repo_names.discard(portdb.getRepositoryPath(r))
15413 if portdb.porttree_root in missing_repo_names and \
15414 not os.path.exists(os.path.join(
15415 portdb.porttree_root, "profiles")):
15416 # This is normal if $PORTDIR happens to be empty,
15417 # so don't warn about it.
15418 missing_repo_names.remove(portdb.porttree_root)
15420 if missing_repo_names:
15422 msg.append("WARNING: One or more repositories " + \
15423 "have missing repo_name entries:")
15425 for p in missing_repo_names:
15426 msg.append("\t%s/profiles/repo_name" % (p,))
15428 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15429 "should be a plain text file containing a unique " + \
15430 "name for the repository on the first line.", 70))
15431 writemsg_level("".join("%s\n" % l for l in msg),
15432 level=logging.WARNING, noiselevel=-1)
15434 return bool(missing_repo_names)
15436 def config_protect_check(trees):
15437 for root, root_trees in trees.iteritems():
15438 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15439 msg = "!!! CONFIG_PROTECT is empty"
15441 msg += " for '%s'" % root
15442 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15444 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15446 if "--quiet" in myopts:
15447 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15448 print "!!! one of the following fully-qualified ebuild names instead:\n"
15449 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15450 print " " + colorize("INFORM", cp)
15453 s = search(root_config, spinner, "--searchdesc" in myopts,
15454 "--quiet" not in myopts, "--usepkg" in myopts,
15455 "--usepkgonly" in myopts)
15456 null_cp = portage.dep_getkey(insert_category_into_atom(
15458 cat, atom_pn = portage.catsplit(null_cp)
15459 s.searchkey = atom_pn
15460 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15463 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15464 print "!!! one of the above fully-qualified ebuild names instead.\n"
15466 def profile_check(trees, myaction, myopts):
15467 if myaction in ("info", "sync"):
15469 elif "--version" in myopts or "--help" in myopts:
15471 for root, root_trees in trees.iteritems():
15472 if root_trees["root_config"].settings.profiles:
15474 # generate some profile related warning messages
15475 validate_ebuild_environment(trees)
15476 msg = "If you have just changed your profile configuration, you " + \
15477 "should revert back to the previous configuration. Due to " + \
15478 "your current profile being invalid, allowed actions are " + \
15479 "limited to --help, --info, --sync, and --version."
15480 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15481 level=logging.ERROR, noiselevel=-1)
15486 global portage # NFC why this is necessary now - genone
15487 portage._disable_legacy_globals()
15488 # Disable color until we're sure that it should be enabled (after
15489 # EMERGE_DEFAULT_OPTS has been parsed).
15490 portage.output.havecolor = 0
15491 # This first pass is just for options that need to be known as early as
15492 # possible, such as --config-root. They will be parsed again later,
15493 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15494 # the value of --config-root).
15495 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15496 if "--debug" in myopts:
15497 os.environ["PORTAGE_DEBUG"] = "1"
15498 if "--config-root" in myopts:
15499 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15500 if "--root" in myopts:
15501 os.environ["ROOT"] = myopts["--root"]
15503 # Portage needs to ensure a sane umask for the files it creates.
15505 settings, trees, mtimedb = load_emerge_config()
15506 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15507 rval = profile_check(trees, myaction, myopts)
15508 if rval != os.EX_OK:
15511 if portage._global_updates(trees, mtimedb["updates"]):
15513 # Reload the whole config from scratch.
15514 settings, trees, mtimedb = load_emerge_config(trees=trees)
15515 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15517 xterm_titles = "notitles" not in settings.features
15520 if "--ignore-default-opts" not in myopts:
15521 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15522 tmpcmdline.extend(sys.argv[1:])
15523 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15525 if "--digest" in myopts:
15526 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15527 # Reload the whole config from scratch so that the portdbapi internal
15528 # config is updated with new FEATURES.
15529 settings, trees, mtimedb = load_emerge_config(trees=trees)
15530 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15532 for myroot in trees:
15533 mysettings = trees[myroot]["vartree"].settings
15534 mysettings.unlock()
15535 adjust_config(myopts, mysettings)
15536 if '--pretend' not in myopts and myaction in \
15537 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15538 mysettings["PORTAGE_COUNTER_HASH"] = \
15539 trees[myroot]["vartree"].dbapi._counter_hash()
15540 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15542 del myroot, mysettings
15544 apply_priorities(settings)
15546 spinner = stdout_spinner()
15547 if "candy" in settings.features:
15548 spinner.update = spinner.update_scroll
15550 if "--quiet" not in myopts:
15551 portage.deprecated_profile_check(settings=settings)
15552 repo_name_check(trees)
15553 config_protect_check(trees)
15555 for mytrees in trees.itervalues():
15556 mydb = mytrees["porttree"].dbapi
15557 # Freeze the portdbapi for performance (memoize all xmatch results).
15561 if "moo" in myfiles:
15564 Larry loves Gentoo (""" + platform.system() + """)
15566 _______________________
15567 < Have you mooed today? >
15568 -----------------------
15578 ext = os.path.splitext(x)[1]
15579 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15580 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15583 root_config = trees[settings["ROOT"]]["root_config"]
15584 if myaction == "list-sets":
15585 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15589 # only expand sets for actions taking package arguments
15590 oldargs = myfiles[:]
15591 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15592 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15593 if retval != os.EX_OK:
15596 # Need to handle empty sets specially, otherwise emerge will react
15597 # with the help message for empty argument lists
15598 if oldargs and not myfiles:
15599 print "emerge: no targets left after set expansion"
15602 if ("--tree" in myopts) and ("--columns" in myopts):
15603 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15606 if ("--quiet" in myopts):
15607 spinner.update = spinner.update_quiet
15608 portage.util.noiselimit = -1
15610 # Always create packages if FEATURES=buildpkg
15611 # Imply --buildpkg if --buildpkgonly
15612 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15613 if "--buildpkg" not in myopts:
15614 myopts["--buildpkg"] = True
15616 # Always try and fetch binary packages if FEATURES=getbinpkg
15617 if ("getbinpkg" in settings.features):
15618 myopts["--getbinpkg"] = True
15620 if "--buildpkgonly" in myopts:
15621 # --buildpkgonly will not merge anything, so
15622 # it cancels all binary package options.
15623 for opt in ("--getbinpkg", "--getbinpkgonly",
15624 "--usepkg", "--usepkgonly"):
15625 myopts.pop(opt, None)
15627 if "--fetch-all-uri" in myopts:
15628 myopts["--fetchonly"] = True
15630 if "--skipfirst" in myopts and "--resume" not in myopts:
15631 myopts["--resume"] = True
15633 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15634 myopts["--usepkgonly"] = True
15636 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15637 myopts["--getbinpkg"] = True
15639 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15640 myopts["--usepkg"] = True
15642 # Also allow -K to apply --usepkg/-k
15643 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15644 myopts["--usepkg"] = True
15646 # Allow -p to remove --ask
15647 if ("--pretend" in myopts) and ("--ask" in myopts):
15648 print ">>> --pretend disables --ask... removing --ask from options."
15649 del myopts["--ask"]
15651 # forbid --ask when not in a terminal
15652 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15653 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15654 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15658 if settings.get("PORTAGE_DEBUG", "") == "1":
15659 spinner.update = spinner.update_quiet
15661 if "python-trace" in settings.features:
15662 import portage.debug
15663 portage.debug.set_trace(True)
15665 if not ("--quiet" in myopts):
15666 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15667 spinner.update = spinner.update_basic
15669 if myaction == 'version':
15670 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15671 settings.profile_path, settings["CHOST"],
15672 trees[settings["ROOT"]]["vartree"].dbapi)
15674 elif "--help" in myopts:
15675 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15678 if "--debug" in myopts:
15679 print "myaction", myaction
15680 print "myopts", myopts
15682 if not myaction and not myfiles and "--resume" not in myopts:
15683 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15686 pretend = "--pretend" in myopts
15687 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15688 buildpkgonly = "--buildpkgonly" in myopts
15690 # check if root user is the current user for the actions where emerge needs this
15691 if portage.secpass < 2:
15692 # We've already allowed "--version" and "--help" above.
15693 if "--pretend" not in myopts and myaction not in ("search","info"):
15694 need_superuser = not \
15696 (buildpkgonly and secpass >= 1) or \
15697 myaction in ("metadata", "regen") or \
15698 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15699 if portage.secpass < 1 or \
15702 access_desc = "superuser"
15704 access_desc = "portage group"
15705 # Always show portage_group_warning() when only portage group
15706 # access is required but the user is not in the portage group.
15707 from portage.data import portage_group_warning
15708 if "--ask" in myopts:
15709 myopts["--pretend"] = True
15710 del myopts["--ask"]
15711 print ("%s access is required... " + \
15712 "adding --pretend to options.\n") % access_desc
15713 if portage.secpass < 1 and not need_superuser:
15714 portage_group_warning()
15716 sys.stderr.write(("emerge: %s access is " + \
15717 "required.\n\n") % access_desc)
15718 if portage.secpass < 1 and not need_superuser:
15719 portage_group_warning()
15722 disable_emergelog = False
15723 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15725 disable_emergelog = True
15727 if myaction in ("search", "info"):
15728 disable_emergelog = True
15729 if disable_emergelog:
15730 """ Disable emergelog for everything except build or unmerge
15731 operations. This helps minimize parallel emerge.log entries that can
15732 confuse log parsers. We especially want it disabled during
15733 parallel-fetch, which uses --resume --fetchonly."""
15735 def emergelog(*pargs, **kargs):
15738 if not "--pretend" in myopts:
15739 emergelog(xterm_titles, "Started emerge on: "+\
15740 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15743 myelogstr=" ".join(myopts)
15745 myelogstr+=" "+myaction
15747 myelogstr += " " + " ".join(oldargs)
15748 emergelog(xterm_titles, " *** emerge " + myelogstr)
15751 def emergeexitsig(signum, frame):
15752 signal.signal(signal.SIGINT, signal.SIG_IGN)
15753 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15754 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15755 sys.exit(100+signum)
15756 signal.signal(signal.SIGINT, emergeexitsig)
15757 signal.signal(signal.SIGTERM, emergeexitsig)
15760 """This gets out final log message in before we quit."""
15761 if "--pretend" not in myopts:
15762 emergelog(xterm_titles, " *** terminating.")
15763 if "notitles" not in settings.features:
15765 portage.atexit_register(emergeexit)
15767 if myaction in ("config", "metadata", "regen", "sync"):
15768 if "--pretend" in myopts:
15769 sys.stderr.write(("emerge: The '%s' action does " + \
15770 "not support '--pretend'.\n") % myaction)
15773 if "sync" == myaction:
15774 return action_sync(settings, trees, mtimedb, myopts, myaction)
15775 elif "metadata" == myaction:
15776 action_metadata(settings, portdb, myopts)
15777 elif myaction=="regen":
15778 validate_ebuild_environment(trees)
15779 return action_regen(settings, portdb, myopts.get("--jobs"),
15780 myopts.get("--load-average"))
15782 elif "config"==myaction:
15783 validate_ebuild_environment(trees)
15784 action_config(settings, trees, myopts, myfiles)
15787 elif "search"==myaction:
15788 validate_ebuild_environment(trees)
15789 action_search(trees[settings["ROOT"]]["root_config"],
15790 myopts, myfiles, spinner)
15791 elif myaction in ("clean", "unmerge") or \
15792 (myaction == "prune" and "--nodeps" in myopts):
15793 validate_ebuild_environment(trees)
15795 # Ensure atoms are valid before calling unmerge().
15796 # For backward compat, leading '=' is not required.
15798 if is_valid_package_atom(x) or \
15799 is_valid_package_atom("=" + x):
15802 msg.append("'%s' is not a valid package atom." % (x,))
15803 msg.append("Please check ebuild(5) for full details.")
15804 writemsg_level("".join("!!! %s\n" % line for line in msg),
15805 level=logging.ERROR, noiselevel=-1)
15808 # When given a list of atoms, unmerge
15809 # them in the order given.
15810 ordered = myaction == "unmerge"
15811 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15812 mtimedb["ldpath"], ordered=ordered):
15813 if not (buildpkgonly or fetchonly or pretend):
15814 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15816 elif myaction in ("depclean", "info", "prune"):
15818 # Ensure atoms are valid before calling unmerge().
15819 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15822 if is_valid_package_atom(x):
15824 valid_atoms.append(
15825 portage.dep_expand(x, mydb=vardb, settings=settings))
15826 except portage.exception.AmbiguousPackageName, e:
15827 msg = "The short ebuild name \"" + x + \
15828 "\" is ambiguous. Please specify " + \
15829 "one of the following " + \
15830 "fully-qualified ebuild names instead:"
15831 for line in textwrap.wrap(msg, 70):
15832 writemsg_level("!!! %s\n" % (line,),
15833 level=logging.ERROR, noiselevel=-1)
15835 writemsg_level(" %s\n" % colorize("INFORM", i),
15836 level=logging.ERROR, noiselevel=-1)
15837 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15841 msg.append("'%s' is not a valid package atom." % (x,))
15842 msg.append("Please check ebuild(5) for full details.")
15843 writemsg_level("".join("!!! %s\n" % line for line in msg),
15844 level=logging.ERROR, noiselevel=-1)
15847 if myaction == "info":
15848 return action_info(settings, trees, myopts, valid_atoms)
15850 validate_ebuild_environment(trees)
15851 action_depclean(settings, trees, mtimedb["ldpath"],
15852 myopts, myaction, valid_atoms, spinner)
15853 if not (buildpkgonly or fetchonly or pretend):
15854 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15855 # "update", "system", or just process files:
15857 validate_ebuild_environment(trees)
15860 if x.startswith(SETPREFIX) or \
15861 is_valid_package_atom(x):
15863 if x[:1] == os.sep:
15871 msg.append("'%s' is not a valid package atom." % (x,))
15872 msg.append("Please check ebuild(5) for full details.")
15873 writemsg_level("".join("!!! %s\n" % line for line in msg),
15874 level=logging.ERROR, noiselevel=-1)
15877 if "--pretend" not in myopts:
15878 display_news_notification(root_config, myopts)
15879 retval = action_build(settings, trees, mtimedb,
15880 myopts, myaction, myfiles, spinner)
15881 root_config = trees[settings["ROOT"]]["root_config"]
15882 post_emerge(root_config, myopts, mtimedb, retval)