2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if isinstance(mysize, basestring):
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 self.sets = self.setconfig.getSets()
774 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776 def create_world_atom(pkg, args_set, root_config):
777 """Create a new atom for the world file if one does not exist. If the
778 argument atom is precise enough to identify a specific slot then a slot
779 atom will be returned. Atoms that are in the system set may also be stored
780 in world since system atoms can only match one slot while world atoms can
781 be greedy with respect to slots. Unslotted system packages will not be
784 arg_atom = args_set.findAtomForPackage(pkg)
787 cp = portage.dep_getkey(arg_atom)
789 sets = root_config.sets
790 portdb = root_config.trees["porttree"].dbapi
791 vardb = root_config.trees["vartree"].dbapi
792 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793 for cpv in portdb.match(cp))
794 slotted = len(available_slots) > 1 or \
795 (len(available_slots) == 1 and "0" not in available_slots)
797 # check the vdb in case this is multislot
798 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799 for cpv in vardb.match(cp))
800 slotted = len(available_slots) > 1 or \
801 (len(available_slots) == 1 and "0" not in available_slots)
802 if slotted and arg_atom != cp:
803 # If the user gave a specific atom, store it as a
804 # slot atom in the world file.
805 slot_atom = pkg.slot_atom
807 # For USE=multislot, there are a couple of cases to
810 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811 # unknown value, so just record an unslotted atom.
813 # 2) SLOT comes from an installed package and there is no
814 # matching SLOT in the portage tree.
816 # Make sure that the slot atom is available in either the
817 # portdb or the vardb, since otherwise the user certainly
818 # doesn't want the SLOT atom recorded in the world file
819 # (case 1 above). If it's only available in the vardb,
820 # the user may be trying to prevent a USE=multislot
821 # package from being removed by --depclean (case 2 above).
824 if not portdb.match(slot_atom):
825 # SLOT seems to come from an installed multislot package
827 # If there is no installed package matching the SLOT atom,
828 # it probably changed SLOT spontaneously due to USE=multislot,
829 # so just record an unslotted atom.
830 if vardb.match(slot_atom):
831 # Now verify that the argument is precise
832 # enough to identify a specific slot.
833 matches = mydb.match(arg_atom)
834 matched_slots = set()
836 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837 if len(matched_slots) == 1:
838 new_world_atom = slot_atom
840 if new_world_atom == sets["world"].findAtomForPackage(pkg):
841 # Both atoms would be identical, so there's nothing to add.
844 # Unlike world atoms, system atoms are not greedy for slots, so they
845 # can't be safely excluded from world if they are slotted.
846 system_atom = sets["system"].findAtomForPackage(pkg)
848 if not portage.dep_getkey(system_atom).startswith("virtual/"):
850 # System virtuals aren't safe to exclude from world since they can
851 # match multiple old-style virtuals but only one of them will be
852 # pulled in by update or depclean.
853 providers = portdb.mysettings.getvirtuals().get(
854 portage.dep_getkey(system_atom))
855 if providers and len(providers) == 1 and providers[0] == cp:
857 return new_world_atom
859 def filter_iuse_defaults(iuse):
861 if flag.startswith("+") or flag.startswith("-"):
866 class SlotObject(object):
867 __slots__ = ("__weakref__",)
869 def __init__(self, **kwargs):
870 classes = [self.__class__]
875 classes.extend(c.__bases__)
876 slots = getattr(c, "__slots__", None)
880 myvalue = kwargs.get(myattr, None)
881 setattr(self, myattr, myvalue)
885 Create a new instance and copy all attributes
886 defined from __slots__ (including those from
889 obj = self.__class__()
891 classes = [self.__class__]
896 classes.extend(c.__bases__)
897 slots = getattr(c, "__slots__", None)
901 setattr(obj, myattr, getattr(self, myattr))
905 class AbstractDepPriority(SlotObject):
906 __slots__ = ("buildtime", "runtime", "runtime_post")
908 def __lt__(self, other):
909 return self.__int__() < other
911 def __le__(self, other):
912 return self.__int__() <= other
914 def __eq__(self, other):
915 return self.__int__() == other
917 def __ne__(self, other):
918 return self.__int__() != other
920 def __gt__(self, other):
921 return self.__int__() > other
923 def __ge__(self, other):
924 return self.__int__() >= other
928 return copy.copy(self)
930 class DepPriority(AbstractDepPriority):
932 __slots__ = ("satisfied", "optional", "rebuild")
944 if self.runtime_post:
945 return "runtime_post"
948 class BlockerDepPriority(DepPriority):
956 BlockerDepPriority.instance = BlockerDepPriority()
958 class UnmergeDepPriority(AbstractDepPriority):
959 __slots__ = ("optional", "satisfied",)
961 Combination of properties Priority Category
966 (none of the above) -2 SOFT
976 if self.runtime_post:
983 myvalue = self.__int__()
984 if myvalue > self.SOFT:
988 class DepPriorityNormalRange(object):
990 DepPriority properties Index Category
994 runtime_post 2 MEDIUM_SOFT
996 (none of the above) 0 NONE
1004 def _ignore_optional(cls, priority):
1005 if priority.__class__ is not DepPriority:
1007 return bool(priority.optional)
1010 def _ignore_runtime_post(cls, priority):
1011 if priority.__class__ is not DepPriority:
1013 return bool(priority.optional or priority.runtime_post)
1016 def _ignore_runtime(cls, priority):
1017 if priority.__class__ is not DepPriority:
1019 return not priority.buildtime
1021 ignore_medium = _ignore_runtime
1022 ignore_medium_soft = _ignore_runtime_post
1023 ignore_soft = _ignore_optional
1025 DepPriorityNormalRange.ignore_priority = (
1027 DepPriorityNormalRange._ignore_optional,
1028 DepPriorityNormalRange._ignore_runtime_post,
1029 DepPriorityNormalRange._ignore_runtime
1032 class DepPrioritySatisfiedRange(object):
1034 DepPriority Index Category
1036 not satisfied and buildtime HARD
1037 not satisfied and runtime 7 MEDIUM
1038 not satisfied and runtime_post 6 MEDIUM_SOFT
1039 satisfied and buildtime and rebuild 5 SOFT
1040 satisfied and buildtime 4 SOFT
1041 satisfied and runtime 3 SOFT
1042 satisfied and runtime_post 2 SOFT
1044 (none of the above) 0 NONE
1052 def _ignore_optional(cls, priority):
1053 if priority.__class__ is not DepPriority:
1055 return bool(priority.optional)
1058 def _ignore_satisfied_runtime_post(cls, priority):
1059 if priority.__class__ is not DepPriority:
1061 if priority.optional:
1063 if not priority.satisfied:
1065 return bool(priority.runtime_post)
1068 def _ignore_satisfied_runtime(cls, priority):
1069 if priority.__class__ is not DepPriority:
1071 if priority.optional:
1073 if not priority.satisfied:
1075 return not priority.buildtime
1078 def _ignore_satisfied_buildtime(cls, priority):
1079 if priority.__class__ is not DepPriority:
1081 if priority.optional:
1083 if not priority.satisfied:
1085 if priority.buildtime:
1086 return not priority.rebuild
1090 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1091 if priority.__class__ is not DepPriority:
1093 if priority.optional:
1095 return bool(priority.satisfied)
1098 def _ignore_runtime_post(cls, priority):
1099 if priority.__class__ is not DepPriority:
1101 return bool(priority.optional or \
1102 priority.satisfied or \
1103 priority.runtime_post)
1106 def _ignore_runtime(cls, priority):
1107 if priority.__class__ is not DepPriority:
1109 return bool(priority.satisfied or \
1110 not priority.buildtime)
1112 ignore_medium = _ignore_runtime
1113 ignore_medium_soft = _ignore_runtime_post
1114 ignore_soft = _ignore_satisfied_buildtime_rebuild
1116 DepPrioritySatisfiedRange.ignore_priority = (
1118 DepPrioritySatisfiedRange._ignore_optional,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1120 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1122 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1123 DepPrioritySatisfiedRange._ignore_runtime_post,
1124 DepPrioritySatisfiedRange._ignore_runtime
1127 def _find_deep_system_runtime_deps(graph):
1128 deep_system_deps = set()
1131 if not isinstance(node, Package) or \
1132 node.operation == 'uninstall':
1134 if node.root_config.sets['system'].findAtomForPackage(node):
1135 node_stack.append(node)
1137 def ignore_priority(priority):
1139 Ignore non-runtime priorities.
1141 if isinstance(priority, DepPriority) and \
1142 (priority.runtime or priority.runtime_post):
1147 node = node_stack.pop()
1148 if node in deep_system_deps:
1150 deep_system_deps.add(node)
1151 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1152 if not isinstance(child, Package) or \
1153 child.operation == 'uninstall':
1155 node_stack.append(child)
1157 return deep_system_deps
1159 class FakeVartree(portage.vartree):
1160 """This is implements an in-memory copy of a vartree instance that provides
1161 all the interfaces required for use by the depgraph. The vardb is locked
1162 during the constructor call just long enough to read a copy of the
1163 installed package information. This allows the depgraph to do it's
1164 dependency calculations without holding a lock on the vardb. It also
1165 allows things like vardb global updates to be done in memory so that the
1166 user doesn't necessarily need write access to the vardb in cases where
1167 global updates are necessary (updates are performed when necessary if there
1168 is not a matching ebuild in the tree)."""
1169 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1170 self._root_config = root_config
1171 if pkg_cache is None:
1173 real_vartree = root_config.trees["vartree"]
1174 portdb = root_config.trees["porttree"].dbapi
1175 self.root = real_vartree.root
1176 self.settings = real_vartree.settings
1177 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1178 if "_mtime_" not in mykeys:
1179 mykeys.append("_mtime_")
1180 self._db_keys = mykeys
1181 self._pkg_cache = pkg_cache
1182 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1183 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1185 # At least the parent needs to exist for the lock file.
1186 portage.util.ensure_dirs(vdb_path)
1187 except portage.exception.PortageException:
1191 if acquire_lock and os.access(vdb_path, os.W_OK):
1192 vdb_lock = portage.locks.lockdir(vdb_path)
1193 real_dbapi = real_vartree.dbapi
1195 for cpv in real_dbapi.cpv_all():
1196 cache_key = ("installed", self.root, cpv, "nomerge")
1197 pkg = self._pkg_cache.get(cache_key)
1199 metadata = pkg.metadata
1201 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1202 myslot = metadata["SLOT"]
1203 mycp = portage.dep_getkey(cpv)
1204 myslot_atom = "%s:%s" % (mycp, myslot)
1206 mycounter = long(metadata["COUNTER"])
1209 metadata["COUNTER"] = str(mycounter)
1210 other_counter = slot_counters.get(myslot_atom, None)
1211 if other_counter is not None:
1212 if other_counter > mycounter:
1214 slot_counters[myslot_atom] = mycounter
1216 pkg = Package(built=True, cpv=cpv,
1217 installed=True, metadata=metadata,
1218 root_config=root_config, type_name="installed")
1219 self._pkg_cache[pkg] = pkg
1220 self.dbapi.cpv_inject(pkg)
1221 real_dbapi.flush_cache()
1224 portage.locks.unlockdir(vdb_lock)
1225 # Populate the old-style virtuals using the cached values.
1226 if not self.settings.treeVirtuals:
1227 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1228 portage.getCPFromCPV, self.get_all_provides())
1230 # Intialize variables needed for lazy cache pulls of the live ebuild
1231 # metadata. This ensures that the vardb lock is released ASAP, without
1232 # being delayed in case cache generation is triggered.
1233 self._aux_get = self.dbapi.aux_get
1234 self.dbapi.aux_get = self._aux_get_wrapper
1235 self._match = self.dbapi.match
1236 self.dbapi.match = self._match_wrapper
1237 self._aux_get_history = set()
1238 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1239 self._portdb = portdb
1240 self._global_updates = None
1242 def _match_wrapper(self, cpv, use_cache=1):
1244 Make sure the metadata in Package instances gets updated for any
1245 cpv that is returned from a match() call, since the metadata can
1246 be accessed directly from the Package instance instead of via
1249 matches = self._match(cpv, use_cache=use_cache)
1251 if cpv in self._aux_get_history:
1253 self._aux_get_wrapper(cpv, [])
1256 def _aux_get_wrapper(self, pkg, wants):
1257 if pkg in self._aux_get_history:
1258 return self._aux_get(pkg, wants)
1259 self._aux_get_history.add(pkg)
1261 # Use the live ebuild metadata if possible.
1262 live_metadata = dict(izip(self._portdb_keys,
1263 self._portdb.aux_get(pkg, self._portdb_keys)))
1264 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1266 self.dbapi.aux_update(pkg, live_metadata)
1267 except (KeyError, portage.exception.PortageException):
1268 if self._global_updates is None:
1269 self._global_updates = \
1270 grab_global_updates(self._portdb.porttree_root)
1271 perform_global_updates(
1272 pkg, self.dbapi, self._global_updates)
1273 return self._aux_get(pkg, wants)
1275 def sync(self, acquire_lock=1):
1277 Call this method to synchronize state with the real vardb
1278 after one or more packages may have been installed or
1281 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1283 # At least the parent needs to exist for the lock file.
1284 portage.util.ensure_dirs(vdb_path)
1285 except portage.exception.PortageException:
1289 if acquire_lock and os.access(vdb_path, os.W_OK):
1290 vdb_lock = portage.locks.lockdir(vdb_path)
1294 portage.locks.unlockdir(vdb_lock)
1298 real_vardb = self._root_config.trees["vartree"].dbapi
1299 current_cpv_set = frozenset(real_vardb.cpv_all())
1300 pkg_vardb = self.dbapi
1301 aux_get_history = self._aux_get_history
1303 # Remove any packages that have been uninstalled.
1304 for pkg in list(pkg_vardb):
1305 if pkg.cpv not in current_cpv_set:
1306 pkg_vardb.cpv_remove(pkg)
1307 aux_get_history.discard(pkg.cpv)
1309 # Validate counters and timestamps.
1312 validation_keys = ["COUNTER", "_mtime_"]
1313 for cpv in current_cpv_set:
1315 pkg_hash_key = ("installed", root, cpv, "nomerge")
1316 pkg = pkg_vardb.get(pkg_hash_key)
1318 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1320 counter = long(counter)
1324 if counter != pkg.counter or \
1326 pkg_vardb.cpv_remove(pkg)
1327 aux_get_history.discard(pkg.cpv)
1331 pkg = self._pkg(cpv)
1333 other_counter = slot_counters.get(pkg.slot_atom)
1334 if other_counter is not None:
1335 if other_counter > pkg.counter:
1338 slot_counters[pkg.slot_atom] = pkg.counter
1339 pkg_vardb.cpv_inject(pkg)
1341 real_vardb.flush_cache()
1343 def _pkg(self, cpv):
1344 root_config = self._root_config
1345 real_vardb = root_config.trees["vartree"].dbapi
1346 pkg = Package(cpv=cpv, installed=True,
1347 metadata=izip(self._db_keys,
1348 real_vardb.aux_get(cpv, self._db_keys)),
1349 root_config=root_config,
1350 type_name="installed")
1353 mycounter = long(pkg.metadata["COUNTER"])
1356 pkg.metadata["COUNTER"] = str(mycounter)
1360 def grab_global_updates(portdir):
1361 from portage.update import grab_updates, parse_updates
1362 updpath = os.path.join(portdir, "profiles", "updates")
1364 rawupdates = grab_updates(updpath)
1365 except portage.exception.DirectoryNotFound:
1368 for mykey, mystat, mycontent in rawupdates:
1369 commands, errors = parse_updates(mycontent)
1370 upd_commands.extend(commands)
1373 def perform_global_updates(mycpv, mydb, mycommands):
1374 from portage.update import update_dbentries
1375 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1376 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1377 updates = update_dbentries(mycommands, aux_dict)
1379 mydb.aux_update(mycpv, updates)
1381 def visible(pkgsettings, pkg):
1383 Check if a package is visible. This can raise an InvalidDependString
1384 exception if LICENSE is invalid.
1385 TODO: optionally generate a list of masking reasons
1387 @returns: True if the package is visible, False otherwise.
1389 if not pkg.metadata["SLOT"]:
1391 if not pkg.installed:
1392 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1394 eapi = pkg.metadata["EAPI"]
1395 if not portage.eapi_is_supported(eapi):
1397 if not pkg.installed:
1398 if portage._eapi_is_deprecated(eapi):
1400 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1402 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1404 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1407 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1409 except portage.exception.InvalidDependString:
1413 def get_masking_status(pkg, pkgsettings, root_config):
1415 mreasons = portage.getmaskingstatus(
1416 pkg, settings=pkgsettings,
1417 portdb=root_config.trees["porttree"].dbapi)
1419 if not pkg.installed:
1420 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1421 mreasons.append("CHOST: %s" % \
1422 pkg.metadata["CHOST"])
1424 if not pkg.metadata["SLOT"]:
1425 mreasons.append("invalid: SLOT is undefined")
1429 def get_mask_info(root_config, cpv, pkgsettings,
1430 db, pkg_type, built, installed, db_keys):
1433 metadata = dict(izip(db_keys,
1434 db.aux_get(cpv, db_keys)))
1437 if metadata and not built:
1438 pkgsettings.setcpv(cpv, mydb=metadata)
1439 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1440 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1441 if metadata is None:
1442 mreasons = ["corruption"]
1444 eapi = metadata['EAPI']
1447 if not portage.eapi_is_supported(eapi):
1448 mreasons = ['EAPI %s' % eapi]
1450 pkg = Package(type_name=pkg_type, root_config=root_config,
1451 cpv=cpv, built=built, installed=installed, metadata=metadata)
1452 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1453 return metadata, mreasons
1455 def show_masked_packages(masked_packages):
1456 shown_licenses = set()
1457 shown_comments = set()
1458 # Maybe there is both an ebuild and a binary. Only
1459 # show one of them to avoid redundant appearance.
1461 have_eapi_mask = False
1462 for (root_config, pkgsettings, cpv,
1463 metadata, mreasons) in masked_packages:
1464 if cpv in shown_cpvs:
1467 comment, filename = None, None
1468 if "package.mask" in mreasons:
1469 comment, filename = \
1470 portage.getmaskingreason(
1471 cpv, metadata=metadata,
1472 settings=pkgsettings,
1473 portdb=root_config.trees["porttree"].dbapi,
1474 return_location=True)
1475 missing_licenses = []
1477 if not portage.eapi_is_supported(metadata["EAPI"]):
1478 have_eapi_mask = True
1480 missing_licenses = \
1481 pkgsettings._getMissingLicenses(
1483 except portage.exception.InvalidDependString:
1484 # This will have already been reported
1485 # above via mreasons.
1488 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1489 if comment and comment not in shown_comments:
1492 shown_comments.add(comment)
1493 portdb = root_config.trees["porttree"].dbapi
1494 for l in missing_licenses:
1495 l_path = portdb.findLicensePath(l)
1496 if l in shown_licenses:
1498 msg = ("A copy of the '%s' license" + \
1499 " is located at '%s'.") % (l, l_path)
1502 shown_licenses.add(l)
1503 return have_eapi_mask
1505 class Task(SlotObject):
1506 __slots__ = ("_hash_key", "_hash_value")
1508 def _get_hash_key(self):
1509 hash_key = getattr(self, "_hash_key", None)
1510 if hash_key is None:
1511 raise NotImplementedError(self)
1514 def __eq__(self, other):
1515 return self._get_hash_key() == other
1517 def __ne__(self, other):
1518 return self._get_hash_key() != other
1521 hash_value = getattr(self, "_hash_value", None)
1522 if hash_value is None:
1523 self._hash_value = hash(self._get_hash_key())
1524 return self._hash_value
1527 return len(self._get_hash_key())
1529 def __getitem__(self, key):
1530 return self._get_hash_key()[key]
1533 return iter(self._get_hash_key())
1535 def __contains__(self, key):
1536 return key in self._get_hash_key()
1539 return str(self._get_hash_key())
1541 class Blocker(Task):
1543 __hash__ = Task.__hash__
1544 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1546 def __init__(self, **kwargs):
1547 Task.__init__(self, **kwargs)
1548 self.cp = portage.dep_getkey(self.atom)
1550 def _get_hash_key(self):
1551 hash_key = getattr(self, "_hash_key", None)
1552 if hash_key is None:
1554 ("blocks", self.root, self.atom, self.eapi)
1555 return self._hash_key
1557 class Package(Task):
1559 __hash__ = Task.__hash__
1560 __slots__ = ("built", "cpv", "depth",
1561 "installed", "metadata", "onlydeps", "operation",
1562 "root_config", "type_name",
1563 "category", "counter", "cp", "cpv_split",
1564 "inherited", "iuse", "mtime",
1565 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1568 "CHOST", "COUNTER", "DEPEND", "EAPI",
1569 "INHERITED", "IUSE", "KEYWORDS",
1570 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1571 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1573 def __init__(self, **kwargs):
1574 Task.__init__(self, **kwargs)
1575 self.root = self.root_config.root
1576 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1577 self.cp = portage.cpv_getkey(self.cpv)
1580 # Avoid an InvalidAtom exception when creating slot_atom.
1581 # This package instance will be masked due to empty SLOT.
1583 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1584 self.category, self.pf = portage.catsplit(self.cpv)
1585 self.cpv_split = portage.catpkgsplit(self.cpv)
1586 self.pv_split = self.cpv_split[1:]
1590 __slots__ = ("__weakref__", "enabled")
1592 def __init__(self, use):
1593 self.enabled = frozenset(use)
1595 class _iuse(object):
1597 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1599 def __init__(self, tokens, iuse_implicit):
1600 self.tokens = tuple(tokens)
1601 self.iuse_implicit = iuse_implicit
1608 enabled.append(x[1:])
1610 disabled.append(x[1:])
1613 self.enabled = frozenset(enabled)
1614 self.disabled = frozenset(disabled)
1615 self.all = frozenset(chain(enabled, disabled, other))
1617 def __getattribute__(self, name):
1620 return object.__getattribute__(self, "regex")
1621 except AttributeError:
1622 all = object.__getattribute__(self, "all")
1623 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1624 # Escape anything except ".*" which is supposed
1625 # to pass through from _get_implicit_iuse()
1626 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1627 regex = "^(%s)$" % "|".join(regex)
1628 regex = regex.replace("\\.\\*", ".*")
1629 self.regex = re.compile(regex)
1630 return object.__getattribute__(self, name)
1632 def _get_hash_key(self):
1633 hash_key = getattr(self, "_hash_key", None)
1634 if hash_key is None:
1635 if self.operation is None:
1636 self.operation = "merge"
1637 if self.onlydeps or self.installed:
1638 self.operation = "nomerge"
1640 (self.type_name, self.root, self.cpv, self.operation)
1641 return self._hash_key
1643 def __lt__(self, other):
1644 if other.cp != self.cp:
1646 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1650 def __le__(self, other):
1651 if other.cp != self.cp:
1653 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1657 def __gt__(self, other):
1658 if other.cp != self.cp:
1660 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1664 def __ge__(self, other):
1665 if other.cp != self.cp:
1667 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1671 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1672 if not x.startswith("UNUSED_"))
1673 _all_metadata_keys.discard("CDEPEND")
1674 _all_metadata_keys.update(Package.metadata_keys)
1676 from portage.cache.mappings import slot_dict_class
1677 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1679 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1681 Detect metadata updates and synchronize Package attributes.
1684 __slots__ = ("_pkg",)
1685 _wrapped_keys = frozenset(
1686 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1688 def __init__(self, pkg, metadata):
1689 _PackageMetadataWrapperBase.__init__(self)
1691 self.update(metadata)
1693 def __setitem__(self, k, v):
1694 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1695 if k in self._wrapped_keys:
1696 getattr(self, "_set_" + k.lower())(k, v)
1698 def _set_inherited(self, k, v):
1699 if isinstance(v, basestring):
1700 v = frozenset(v.split())
1701 self._pkg.inherited = v
1703 def _set_iuse(self, k, v):
1704 self._pkg.iuse = self._pkg._iuse(
1705 v.split(), self._pkg.root_config.iuse_implicit)
1707 def _set_slot(self, k, v):
1710 def _set_use(self, k, v):
1711 self._pkg.use = self._pkg._use(v.split())
1713 def _set_counter(self, k, v):
1714 if isinstance(v, basestring):
1719 self._pkg.counter = v
1721 def _set__mtime_(self, k, v):
1722 if isinstance(v, basestring):
1729 class EbuildFetchonly(SlotObject):
1731 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1734 settings = self.settings
1736 portdb = pkg.root_config.trees["porttree"].dbapi
1737 ebuild_path = portdb.findname(pkg.cpv)
1738 settings.setcpv(pkg)
1739 debug = settings.get("PORTAGE_DEBUG") == "1"
1740 use_cache = 1 # always true
1741 portage.doebuild_environment(ebuild_path, "fetch",
1742 settings["ROOT"], settings, debug, use_cache, portdb)
1743 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1746 rval = self._execute_with_builddir()
1748 rval = portage.doebuild(ebuild_path, "fetch",
1749 settings["ROOT"], settings, debug=debug,
1750 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1751 mydbapi=portdb, tree="porttree")
1753 if rval != os.EX_OK:
1754 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1755 eerror(msg, phase="unpack", key=pkg.cpv)
1759 def _execute_with_builddir(self):
1760 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1761 # ensuring sane $PWD (bug #239560) and storing elog
1762 # messages. Use a private temp directory, in order
1763 # to avoid locking the main one.
1764 settings = self.settings
1765 global_tmpdir = settings["PORTAGE_TMPDIR"]
1766 from tempfile import mkdtemp
1768 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1770 if e.errno != portage.exception.PermissionDenied.errno:
1772 raise portage.exception.PermissionDenied(global_tmpdir)
1773 settings["PORTAGE_TMPDIR"] = private_tmpdir
1774 settings.backup_changes("PORTAGE_TMPDIR")
1776 retval = self._execute()
1778 settings["PORTAGE_TMPDIR"] = global_tmpdir
1779 settings.backup_changes("PORTAGE_TMPDIR")
1780 shutil.rmtree(private_tmpdir)
1784 settings = self.settings
1786 root_config = pkg.root_config
1787 portdb = root_config.trees["porttree"].dbapi
1788 ebuild_path = portdb.findname(pkg.cpv)
1789 debug = settings.get("PORTAGE_DEBUG") == "1"
1790 retval = portage.doebuild(ebuild_path, "fetch",
1791 self.settings["ROOT"], self.settings, debug=debug,
1792 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1793 mydbapi=portdb, tree="porttree")
1795 if retval != os.EX_OK:
1796 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1797 eerror(msg, phase="unpack", key=pkg.cpv)
1799 portage.elog.elog_process(self.pkg.cpv, self.settings)
1802 class PollConstants(object):
1805 Provides POLL* constants that are equivalent to those from the
1806 select module, for use by PollSelectAdapter.
1809 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1812 locals()[k] = getattr(select, k, v)
1816 class AsynchronousTask(SlotObject):
1818 Subclasses override _wait() and _poll() so that calls
1819 to public methods can be wrapped for implementing
1820 hooks such as exit listener notification.
1822 Sublasses should call self.wait() to notify exit listeners after
1823 the task is complete and self.returncode has been set.
1826 __slots__ = ("background", "cancelled", "returncode") + \
1827 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1831 Start an asynchronous task and then return as soon as possible.
1837 raise NotImplementedError(self)
1840 return self.returncode is None
1847 return self.returncode
1850 if self.returncode is None:
1853 return self.returncode
1856 return self.returncode
1859 self.cancelled = True
1862 def addStartListener(self, f):
1864 The function will be called with one argument, a reference to self.
1866 if self._start_listeners is None:
1867 self._start_listeners = []
1868 self._start_listeners.append(f)
1870 def removeStartListener(self, f):
1871 if self._start_listeners is None:
1873 self._start_listeners.remove(f)
1875 def _start_hook(self):
1876 if self._start_listeners is not None:
1877 start_listeners = self._start_listeners
1878 self._start_listeners = None
1880 for f in start_listeners:
1883 def addExitListener(self, f):
1885 The function will be called with one argument, a reference to self.
1887 if self._exit_listeners is None:
1888 self._exit_listeners = []
1889 self._exit_listeners.append(f)
1891 def removeExitListener(self, f):
1892 if self._exit_listeners is None:
1893 if self._exit_listener_stack is not None:
1894 self._exit_listener_stack.remove(f)
1896 self._exit_listeners.remove(f)
1898 def _wait_hook(self):
1900 Call this method after the task completes, just before returning
1901 the returncode from wait() or poll(). This hook is
1902 used to trigger exit listeners when the returncode first
1905 if self.returncode is not None and \
1906 self._exit_listeners is not None:
1908 # This prevents recursion, in case one of the
1909 # exit handlers triggers this method again by
1910 # calling wait(). Use a stack that gives
1911 # removeExitListener() an opportunity to consume
1912 # listeners from the stack, before they can get
1913 # called below. This is necessary because a call
1914 # to one exit listener may result in a call to
1915 # removeExitListener() for another listener on
1916 # the stack. That listener needs to be removed
1917 # from the stack since it would be inconsistent
1918 # to call it after it has been been passed into
1919 # removeExitListener().
1920 self._exit_listener_stack = self._exit_listeners
1921 self._exit_listeners = None
1923 self._exit_listener_stack.reverse()
1924 while self._exit_listener_stack:
1925 self._exit_listener_stack.pop()(self)
1927 class AbstractPollTask(AsynchronousTask):
1929 __slots__ = ("scheduler",) + \
1933 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1934 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1937 def _unregister(self):
1938 raise NotImplementedError(self)
1940 def _unregister_if_appropriate(self, event):
1941 if self._registered:
1942 if event & self._exceptional_events:
1945 elif event & PollConstants.POLLHUP:
1949 class PipeReader(AbstractPollTask):
1952 Reads output from one or more files and saves it in memory,
1953 for retrieval via the getvalue() method. This is driven by
1954 the scheduler's poll() loop, so it runs entirely within the
1958 __slots__ = ("input_files",) + \
1959 ("_read_data", "_reg_ids")
1962 self._reg_ids = set()
1963 self._read_data = []
1964 for k, f in self.input_files.iteritems():
1965 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1966 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1967 self._reg_ids.add(self.scheduler.register(f.fileno(),
1968 self._registered_events, self._output_handler))
1969 self._registered = True
1972 return self._registered
1975 if self.returncode is None:
1977 self.cancelled = True
1981 if self.returncode is not None:
1982 return self.returncode
1984 if self._registered:
1985 self.scheduler.schedule(self._reg_ids)
1988 self.returncode = os.EX_OK
1989 return self.returncode
1992 """Retrieve the entire contents"""
1993 if sys.hexversion >= 0x3000000:
1994 return bytes().join(self._read_data)
1995 return "".join(self._read_data)
1998 """Free the memory buffer."""
1999 self._read_data = None
2001 def _output_handler(self, fd, event):
2003 if event & PollConstants.POLLIN:
2005 for f in self.input_files.itervalues():
2006 if fd == f.fileno():
2009 buf = array.array('B')
2011 buf.fromfile(f, self._bufsize)
2016 self._read_data.append(buf.tostring())
2021 self._unregister_if_appropriate(event)
2022 return self._registered
2024 def _unregister(self):
2026 Unregister from the scheduler and close open files.
2029 self._registered = False
2031 if self._reg_ids is not None:
2032 for reg_id in self._reg_ids:
2033 self.scheduler.unregister(reg_id)
2034 self._reg_ids = None
2036 if self.input_files is not None:
2037 for f in self.input_files.itervalues():
2039 self.input_files = None
2041 class CompositeTask(AsynchronousTask):
2043 __slots__ = ("scheduler",) + ("_current_task",)
2046 return self._current_task is not None
2049 self.cancelled = True
2050 if self._current_task is not None:
2051 self._current_task.cancel()
2055 This does a loop calling self._current_task.poll()
2056 repeatedly as long as the value of self._current_task
2057 keeps changing. It calls poll() a maximum of one time
2058 for a given self._current_task instance. This is useful
2059 since calling poll() on a task can trigger advance to
2060 the next task could eventually lead to the returncode
2061 being set in cases when polling only a single task would
2062 not have the same effect.
2067 task = self._current_task
2068 if task is None or task is prev:
2069 # don't poll the same task more than once
2074 return self.returncode
2080 task = self._current_task
2082 # don't wait for the same task more than once
2085 # Before the task.wait() method returned, an exit
2086 # listener should have set self._current_task to either
2087 # a different task or None. Something is wrong.
2088 raise AssertionError("self._current_task has not " + \
2089 "changed since calling wait", self, task)
2093 return self.returncode
2095 def _assert_current(self, task):
2097 Raises an AssertionError if the given task is not the
2098 same one as self._current_task. This can be useful
2101 if task is not self._current_task:
2102 raise AssertionError("Unrecognized task: %s" % (task,))
2104 def _default_exit(self, task):
2106 Calls _assert_current() on the given task and then sets the
2107 composite returncode attribute if task.returncode != os.EX_OK.
2108 If the task failed then self._current_task will be set to None.
2109 Subclasses can use this as a generic task exit callback.
2112 @returns: The task.returncode attribute.
2114 self._assert_current(task)
2115 if task.returncode != os.EX_OK:
2116 self.returncode = task.returncode
2117 self._current_task = None
2118 return task.returncode
2120 def _final_exit(self, task):
2122 Assumes that task is the final task of this composite task.
2123 Calls _default_exit() and sets self.returncode to the task's
2124 returncode and sets self._current_task to None.
2126 self._default_exit(task)
2127 self._current_task = None
2128 self.returncode = task.returncode
2129 return self.returncode
2131 def _default_final_exit(self, task):
2133 This calls _final_exit() and then wait().
2135 Subclasses can use this as a generic final task exit callback.
2138 self._final_exit(task)
2141 def _start_task(self, task, exit_handler):
2143 Register exit handler for the given task, set it
2144 as self._current_task, and call task.start().
2146 Subclasses can use this as a generic way to start
2150 task.addExitListener(exit_handler)
2151 self._current_task = task
2154 class TaskSequence(CompositeTask):
2156 A collection of tasks that executes sequentially. Each task
2157 must have a addExitListener() method that can be used as
2158 a means to trigger movement from one task to the next.
2161 __slots__ = ("_task_queue",)
2163 def __init__(self, **kwargs):
2164 AsynchronousTask.__init__(self, **kwargs)
2165 self._task_queue = deque()
2167 def add(self, task):
2168 self._task_queue.append(task)
2171 self._start_next_task()
2174 self._task_queue.clear()
2175 CompositeTask.cancel(self)
2177 def _start_next_task(self):
2178 self._start_task(self._task_queue.popleft(),
2179 self._task_exit_handler)
2181 def _task_exit_handler(self, task):
2182 if self._default_exit(task) != os.EX_OK:
2184 elif self._task_queue:
2185 self._start_next_task()
2187 self._final_exit(task)
2190 class SubProcess(AbstractPollTask):
2192 __slots__ = ("pid",) + \
2193 ("_files", "_reg_id")
2195 # A file descriptor is required for the scheduler to monitor changes from
2196 # inside a poll() loop. When logging is not enabled, create a pipe just to
2197 # serve this purpose alone.
2201 if self.returncode is not None:
2202 return self.returncode
2203 if self.pid is None:
2204 return self.returncode
2205 if self._registered:
2206 return self.returncode
2209 retval = os.waitpid(self.pid, os.WNOHANG)
2211 if e.errno != errno.ECHILD:
2214 retval = (self.pid, 1)
2216 if retval == (0, 0):
2218 self._set_returncode(retval)
2219 return self.returncode
2224 os.kill(self.pid, signal.SIGTERM)
2226 if e.errno != errno.ESRCH:
2230 self.cancelled = True
2231 if self.pid is not None:
2233 return self.returncode
2236 return self.pid is not None and \
2237 self.returncode is None
2241 if self.returncode is not None:
2242 return self.returncode
2244 if self._registered:
2245 self.scheduler.schedule(self._reg_id)
2247 if self.returncode is not None:
2248 return self.returncode
2251 wait_retval = os.waitpid(self.pid, 0)
2253 if e.errno != errno.ECHILD:
2256 self._set_returncode((self.pid, 1))
2258 self._set_returncode(wait_retval)
2260 return self.returncode
2262 def _unregister(self):
2264 Unregister from the scheduler and close open files.
2267 self._registered = False
2269 if self._reg_id is not None:
2270 self.scheduler.unregister(self._reg_id)
2273 if self._files is not None:
2274 for f in self._files.itervalues():
2278 def _set_returncode(self, wait_retval):
2280 retval = wait_retval[1]
2282 if retval != os.EX_OK:
2284 retval = (retval & 0xff) << 8
2286 retval = retval >> 8
2288 self.returncode = retval
2290 class SpawnProcess(SubProcess):
2293 Constructor keyword args are passed into portage.process.spawn().
2294 The required "args" keyword argument will be passed as the first
2298 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2299 "uid", "gid", "groups", "umask", "logfile",
2300 "path_lookup", "pre_exec")
2302 __slots__ = ("args",) + \
2305 _file_names = ("log", "process", "stdout")
2306 _files_dict = slot_dict_class(_file_names, prefix="")
2313 if self.fd_pipes is None:
2315 fd_pipes = self.fd_pipes
2316 fd_pipes.setdefault(0, sys.stdin.fileno())
2317 fd_pipes.setdefault(1, sys.stdout.fileno())
2318 fd_pipes.setdefault(2, sys.stderr.fileno())
2320 # flush any pending output
2321 for fd in fd_pipes.itervalues():
2322 if fd == sys.stdout.fileno():
2324 if fd == sys.stderr.fileno():
2327 logfile = self.logfile
2328 self._files = self._files_dict()
2331 master_fd, slave_fd = self._pipe(fd_pipes)
2332 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2333 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2336 fd_pipes_orig = fd_pipes.copy()
2338 # TODO: Use job control functions like tcsetpgrp() to control
2339 # access to stdin. Until then, use /dev/null so that any
2340 # attempts to read from stdin will immediately return EOF
2341 # instead of blocking indefinitely.
2342 null_input = open('/dev/null', 'rb')
2343 fd_pipes[0] = null_input.fileno()
2345 fd_pipes[0] = fd_pipes_orig[0]
2347 files.process = os.fdopen(master_fd, 'rb')
2348 if logfile is not None:
2350 fd_pipes[1] = slave_fd
2351 fd_pipes[2] = slave_fd
2353 files.log = open(logfile, mode='ab')
2354 portage.util.apply_secpass_permissions(logfile,
2355 uid=portage.portage_uid, gid=portage.portage_gid,
2358 if not self.background:
2359 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2361 output_handler = self._output_handler
2365 # Create a dummy pipe so the scheduler can monitor
2366 # the process from inside a poll() loop.
2367 fd_pipes[self._dummy_pipe_fd] = slave_fd
2369 fd_pipes[1] = slave_fd
2370 fd_pipes[2] = slave_fd
2371 output_handler = self._dummy_handler
2374 for k in self._spawn_kwarg_names:
2375 v = getattr(self, k)
2379 kwargs["fd_pipes"] = fd_pipes
2380 kwargs["returnpid"] = True
2381 kwargs.pop("logfile", None)
2383 self._reg_id = self.scheduler.register(files.process.fileno(),
2384 self._registered_events, output_handler)
2385 self._registered = True
2387 retval = self._spawn(self.args, **kwargs)
2390 if null_input is not None:
2393 if isinstance(retval, int):
2396 self.returncode = retval
2400 self.pid = retval[0]
2401 portage.process.spawned_pids.remove(self.pid)
2403 def _pipe(self, fd_pipes):
2405 @type fd_pipes: dict
2406 @param fd_pipes: pipes from which to copy terminal size if desired.
2410 def _spawn(self, args, **kwargs):
2411 return portage.process.spawn(args, **kwargs)
2413 def _output_handler(self, fd, event):
2415 if event & PollConstants.POLLIN:
2418 buf = array.array('B')
2420 buf.fromfile(files.process, self._bufsize)
2425 if not self.background:
2426 buf.tofile(files.stdout)
2427 files.stdout.flush()
2428 buf.tofile(files.log)
2434 self._unregister_if_appropriate(event)
2435 return self._registered
2437 def _dummy_handler(self, fd, event):
2439 This method is mainly interested in detecting EOF, since
2440 the only purpose of the pipe is to allow the scheduler to
2441 monitor the process from inside a poll() loop.
2444 if event & PollConstants.POLLIN:
2446 buf = array.array('B')
2448 buf.fromfile(self._files.process, self._bufsize)
2458 self._unregister_if_appropriate(event)
2459 return self._registered
2461 class MiscFunctionsProcess(SpawnProcess):
2463 Spawns misc-functions.sh with an existing ebuild environment.
2466 __slots__ = ("commands", "phase", "pkg", "settings")
2469 settings = self.settings
2470 settings.pop("EBUILD_PHASE", None)
2471 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2472 misc_sh_binary = os.path.join(portage_bin_path,
2473 os.path.basename(portage.const.MISC_SH_BINARY))
2475 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2476 self.logfile = settings.get("PORTAGE_LOG_FILE")
2478 portage._doebuild_exit_status_unlink(
2479 settings.get("EBUILD_EXIT_STATUS_FILE"))
2481 SpawnProcess._start(self)
2483 def _spawn(self, args, **kwargs):
2484 settings = self.settings
2485 debug = settings.get("PORTAGE_DEBUG") == "1"
2486 return portage.spawn(" ".join(args), settings,
2487 debug=debug, **kwargs)
2489 def _set_returncode(self, wait_retval):
2490 SpawnProcess._set_returncode(self, wait_retval)
2491 self.returncode = portage._doebuild_exit_status_check_and_log(
2492 self.settings, self.phase, self.returncode)
2494 class EbuildFetcher(SpawnProcess):
2496 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2501 root_config = self.pkg.root_config
2502 portdb = root_config.trees["porttree"].dbapi
2503 ebuild_path = portdb.findname(self.pkg.cpv)
2504 settings = self.config_pool.allocate()
2505 settings.setcpv(self.pkg)
2507 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2508 # should not be touched since otherwise it could interfere with
2509 # another instance of the same cpv concurrently being built for a
2510 # different $ROOT (currently, builds only cooperate with prefetchers
2511 # that are spawned for the same $ROOT).
2512 if not self.prefetch:
2513 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2514 self._build_dir.lock()
2515 self._build_dir.clean_log()
2516 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2517 if self.logfile is None:
2518 self.logfile = settings.get("PORTAGE_LOG_FILE")
2524 # If any incremental variables have been overridden
2525 # via the environment, those values need to be passed
2526 # along here so that they are correctly considered by
2527 # the config instance in the subproccess.
2528 fetch_env = os.environ.copy()
2530 nocolor = settings.get("NOCOLOR")
2531 if nocolor is not None:
2532 fetch_env["NOCOLOR"] = nocolor
2534 fetch_env["PORTAGE_NICENESS"] = "0"
2536 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2538 ebuild_binary = os.path.join(
2539 settings["PORTAGE_BIN_PATH"], "ebuild")
2541 fetch_args = [ebuild_binary, ebuild_path, phase]
2542 debug = settings.get("PORTAGE_DEBUG") == "1"
2544 fetch_args.append("--debug")
2546 self.args = fetch_args
2547 self.env = fetch_env
2548 SpawnProcess._start(self)
2550 def _pipe(self, fd_pipes):
2551 """When appropriate, use a pty so that fetcher progress bars,
2552 like wget has, will work properly."""
2553 if self.background or not sys.stdout.isatty():
2554 # When the output only goes to a log file,
2555 # there's no point in creating a pty.
2557 stdout_pipe = fd_pipes.get(1)
2558 got_pty, master_fd, slave_fd = \
2559 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2560 return (master_fd, slave_fd)
2562 def _set_returncode(self, wait_retval):
2563 SpawnProcess._set_returncode(self, wait_retval)
2564 # Collect elog messages that might have been
2565 # created by the pkg_nofetch phase.
2566 if self._build_dir is not None:
2567 # Skip elog messages for prefetch, in order to avoid duplicates.
2568 if not self.prefetch and self.returncode != os.EX_OK:
2570 if self.logfile is not None:
2572 elog_out = open(self.logfile, 'a')
2573 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2574 if self.logfile is not None:
2575 msg += ", Log file:"
2576 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2577 if self.logfile is not None:
2578 eerror(" '%s'" % (self.logfile,),
2579 phase="unpack", key=self.pkg.cpv, out=elog_out)
2580 if elog_out is not None:
2582 if not self.prefetch:
2583 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2584 features = self._build_dir.settings.features
2585 if self.returncode == os.EX_OK:
2586 self._build_dir.clean_log()
2587 self._build_dir.unlock()
2588 self.config_pool.deallocate(self._build_dir.settings)
2589 self._build_dir = None
2591 class EbuildBuildDir(SlotObject):
2593 __slots__ = ("dir_path", "pkg", "settings",
2594 "locked", "_catdir", "_lock_obj")
2596 def __init__(self, **kwargs):
2597 SlotObject.__init__(self, **kwargs)
2602 This raises an AlreadyLocked exception if lock() is called
2603 while a lock is already held. In order to avoid this, call
2604 unlock() or check whether the "locked" attribute is True
2605 or False before calling lock().
2607 if self._lock_obj is not None:
2608 raise self.AlreadyLocked((self._lock_obj,))
2610 dir_path = self.dir_path
2611 if dir_path is None:
2612 root_config = self.pkg.root_config
2613 portdb = root_config.trees["porttree"].dbapi
2614 ebuild_path = portdb.findname(self.pkg.cpv)
2615 settings = self.settings
2616 settings.setcpv(self.pkg)
2617 debug = settings.get("PORTAGE_DEBUG") == "1"
2618 use_cache = 1 # always true
2619 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2620 self.settings, debug, use_cache, portdb)
2621 dir_path = self.settings["PORTAGE_BUILDDIR"]
2623 catdir = os.path.dirname(dir_path)
2624 self._catdir = catdir
2626 portage.util.ensure_dirs(os.path.dirname(catdir),
2627 gid=portage.portage_gid,
2631 catdir_lock = portage.locks.lockdir(catdir)
2632 portage.util.ensure_dirs(catdir,
2633 gid=portage.portage_gid,
2635 self._lock_obj = portage.locks.lockdir(dir_path)
2637 self.locked = self._lock_obj is not None
2638 if catdir_lock is not None:
2639 portage.locks.unlockdir(catdir_lock)
2641 def clean_log(self):
2642 """Discard existing log."""
2643 settings = self.settings
2645 for x in ('.logid', 'temp/build.log'):
2647 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2652 if self._lock_obj is None:
2655 portage.locks.unlockdir(self._lock_obj)
2656 self._lock_obj = None
2659 catdir = self._catdir
2662 catdir_lock = portage.locks.lockdir(catdir)
2668 if e.errno not in (errno.ENOENT,
2669 errno.ENOTEMPTY, errno.EEXIST):
2672 portage.locks.unlockdir(catdir_lock)
2674 class AlreadyLocked(portage.exception.PortageException):
2677 class EbuildBuild(CompositeTask):
2679 __slots__ = ("args_set", "config_pool", "find_blockers",
2680 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2681 "prefetcher", "settings", "world_atom") + \
2682 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2686 logger = self.logger
2689 settings = self.settings
2690 world_atom = self.world_atom
2691 root_config = pkg.root_config
2694 portdb = root_config.trees[tree].dbapi
2695 settings.setcpv(pkg)
2696 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2697 ebuild_path = portdb.findname(self.pkg.cpv)
2698 self._ebuild_path = ebuild_path
2700 prefetcher = self.prefetcher
2701 if prefetcher is None:
2703 elif not prefetcher.isAlive():
2705 elif prefetcher.poll() is None:
2707 waiting_msg = "Fetching files " + \
2708 "in the background. " + \
2709 "To view fetch progress, run `tail -f " + \
2710 "/var/log/emerge-fetch.log` in another " + \
2712 msg_prefix = colorize("GOOD", " * ")
2713 from textwrap import wrap
2714 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2715 for line in wrap(waiting_msg, 65))
2716 if not self.background:
2717 writemsg(waiting_msg, noiselevel=-1)
2719 self._current_task = prefetcher
2720 prefetcher.addExitListener(self._prefetch_exit)
2723 self._prefetch_exit(prefetcher)
2725 def _prefetch_exit(self, prefetcher):
2729 settings = self.settings
2732 fetcher = EbuildFetchonly(
2733 fetch_all=opts.fetch_all_uri,
2734 pkg=pkg, pretend=opts.pretend,
2736 retval = fetcher.execute()
2737 self.returncode = retval
2741 fetcher = EbuildFetcher(config_pool=self.config_pool,
2742 fetchall=opts.fetch_all_uri,
2743 fetchonly=opts.fetchonly,
2744 background=self.background,
2745 pkg=pkg, scheduler=self.scheduler)
2747 self._start_task(fetcher, self._fetch_exit)
2749 def _fetch_exit(self, fetcher):
2753 fetch_failed = False
2755 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2757 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2759 if fetch_failed and fetcher.logfile is not None and \
2760 os.path.exists(fetcher.logfile):
2761 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2763 if not fetch_failed and fetcher.logfile is not None:
2764 # Fetch was successful, so remove the fetch log.
2766 os.unlink(fetcher.logfile)
2770 if fetch_failed or opts.fetchonly:
2774 logger = self.logger
2776 pkg_count = self.pkg_count
2777 scheduler = self.scheduler
2778 settings = self.settings
2779 features = settings.features
2780 ebuild_path = self._ebuild_path
2781 system_set = pkg.root_config.sets["system"]
2783 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2784 self._build_dir.lock()
2786 # Cleaning is triggered before the setup
2787 # phase, in portage.doebuild().
2788 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2789 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2790 short_msg = "emerge: (%s of %s) %s Clean" % \
2791 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2792 logger.log(msg, short_msg=short_msg)
2794 #buildsyspkg: Check if we need to _force_ binary package creation
2795 self._issyspkg = "buildsyspkg" in features and \
2796 system_set.findAtomForPackage(pkg) and \
2799 if opts.buildpkg or self._issyspkg:
2801 self._buildpkg = True
2803 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2804 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2805 short_msg = "emerge: (%s of %s) %s Compile" % \
2806 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2807 logger.log(msg, short_msg=short_msg)
2810 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2811 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2812 short_msg = "emerge: (%s of %s) %s Compile" % \
2813 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2814 logger.log(msg, short_msg=short_msg)
2816 build = EbuildExecuter(background=self.background, pkg=pkg,
2817 scheduler=scheduler, settings=settings)
2818 self._start_task(build, self._build_exit)
2820 def _unlock_builddir(self):
2821 portage.elog.elog_process(self.pkg.cpv, self.settings)
2822 self._build_dir.unlock()
2824 def _build_exit(self, build):
2825 if self._default_exit(build) != os.EX_OK:
2826 self._unlock_builddir()
2831 buildpkg = self._buildpkg
2834 self._final_exit(build)
2839 msg = ">>> This is a system package, " + \
2840 "let's pack a rescue tarball.\n"
2842 log_path = self.settings.get("PORTAGE_LOG_FILE")
2843 if log_path is not None:
2844 log_file = open(log_path, 'a')
2850 if not self.background:
2851 portage.writemsg_stdout(msg, noiselevel=-1)
2853 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2854 scheduler=self.scheduler, settings=self.settings)
2856 self._start_task(packager, self._buildpkg_exit)
2858 def _buildpkg_exit(self, packager):
2860 Released build dir lock when there is a failure or
2861 when in buildpkgonly mode. Otherwise, the lock will
2862 be released when merge() is called.
2865 if self._default_exit(packager) != os.EX_OK:
2866 self._unlock_builddir()
2870 if self.opts.buildpkgonly:
2871 # Need to call "clean" phase for buildpkgonly mode
2872 portage.elog.elog_process(self.pkg.cpv, self.settings)
2874 clean_phase = EbuildPhase(background=self.background,
2875 pkg=self.pkg, phase=phase,
2876 scheduler=self.scheduler, settings=self.settings,
2878 self._start_task(clean_phase, self._clean_exit)
2881 # Continue holding the builddir lock until
2882 # after the package has been installed.
2883 self._current_task = None
2884 self.returncode = packager.returncode
2887 def _clean_exit(self, clean_phase):
2888 if self._final_exit(clean_phase) != os.EX_OK or \
2889 self.opts.buildpkgonly:
2890 self._unlock_builddir()
2895 Install the package and then clean up and release locks.
2896 Only call this after the build has completed successfully
2897 and neither fetchonly nor buildpkgonly mode are enabled.
2900 find_blockers = self.find_blockers
2901 ldpath_mtimes = self.ldpath_mtimes
2902 logger = self.logger
2904 pkg_count = self.pkg_count
2905 settings = self.settings
2906 world_atom = self.world_atom
2907 ebuild_path = self._ebuild_path
2910 merge = EbuildMerge(find_blockers=self.find_blockers,
2911 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2912 pkg_count=pkg_count, pkg_path=ebuild_path,
2913 scheduler=self.scheduler,
2914 settings=settings, tree=tree, world_atom=world_atom)
2916 msg = " === (%s of %s) Merging (%s::%s)" % \
2917 (pkg_count.curval, pkg_count.maxval,
2918 pkg.cpv, ebuild_path)
2919 short_msg = "emerge: (%s of %s) %s Merge" % \
2920 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2921 logger.log(msg, short_msg=short_msg)
2924 rval = merge.execute()
2926 self._unlock_builddir()
2930 class EbuildExecuter(CompositeTask):
2932 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2934 _phases = ("prepare", "configure", "compile", "test", "install")
2936 _live_eclasses = frozenset([
2946 self._tree = "porttree"
2949 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2950 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2951 self._start_task(clean_phase, self._clean_phase_exit)
2953 def _clean_phase_exit(self, clean_phase):
2955 if self._default_exit(clean_phase) != os.EX_OK:
2960 scheduler = self.scheduler
2961 settings = self.settings
2964 # This initializes PORTAGE_LOG_FILE.
2965 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2967 setup_phase = EbuildPhase(background=self.background,
2968 pkg=pkg, phase="setup", scheduler=scheduler,
2969 settings=settings, tree=self._tree)
2971 setup_phase.addExitListener(self._setup_exit)
2972 self._current_task = setup_phase
2973 self.scheduler.scheduleSetup(setup_phase)
2975 def _setup_exit(self, setup_phase):
2977 if self._default_exit(setup_phase) != os.EX_OK:
2981 unpack_phase = EbuildPhase(background=self.background,
2982 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2983 settings=self.settings, tree=self._tree)
2985 if self._live_eclasses.intersection(self.pkg.inherited):
2986 # Serialize $DISTDIR access for live ebuilds since
2987 # otherwise they can interfere with eachother.
2989 unpack_phase.addExitListener(self._unpack_exit)
2990 self._current_task = unpack_phase
2991 self.scheduler.scheduleUnpack(unpack_phase)
2994 self._start_task(unpack_phase, self._unpack_exit)
2996 def _unpack_exit(self, unpack_phase):
2998 if self._default_exit(unpack_phase) != os.EX_OK:
3002 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3005 phases = self._phases
3006 eapi = pkg.metadata["EAPI"]
3007 if eapi in ("0", "1"):
3008 # skip src_prepare and src_configure
3011 for phase in phases:
3012 ebuild_phases.add(EbuildPhase(background=self.background,
3013 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3014 settings=self.settings, tree=self._tree))
3016 self._start_task(ebuild_phases, self._default_final_exit)
3018 class EbuildMetadataPhase(SubProcess):
3021 Asynchronous interface for the ebuild "depend" phase which is
3022 used to extract metadata from the ebuild.
3025 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3026 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3029 _file_names = ("ebuild",)
3030 _files_dict = slot_dict_class(_file_names, prefix="")
3034 settings = self.settings
3035 settings.setcpv(self.cpv)
3036 ebuild_path = self.ebuild_path
3039 if 'parse-eapi-glep-55' in settings.features:
3040 pf, eapi = portage._split_ebuild_name_glep55(
3041 os.path.basename(ebuild_path))
3042 if eapi is None and \
3043 'parse-eapi-ebuild-head' in settings.features:
3044 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3045 mode='r', encoding='utf_8', errors='replace'))
3047 if eapi is not None:
3048 if not portage.eapi_is_supported(eapi):
3049 self.metadata_callback(self.cpv, self.ebuild_path,
3050 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3051 self.returncode = os.EX_OK
3055 settings.configdict['pkg']['EAPI'] = eapi
3057 debug = settings.get("PORTAGE_DEBUG") == "1"
3061 if self.fd_pipes is not None:
3062 fd_pipes = self.fd_pipes.copy()
3066 fd_pipes.setdefault(0, sys.stdin.fileno())
3067 fd_pipes.setdefault(1, sys.stdout.fileno())
3068 fd_pipes.setdefault(2, sys.stderr.fileno())
3070 # flush any pending output
3071 for fd in fd_pipes.itervalues():
3072 if fd == sys.stdout.fileno():
3074 if fd == sys.stderr.fileno():
3077 fd_pipes_orig = fd_pipes.copy()
3078 self._files = self._files_dict()
3081 master_fd, slave_fd = os.pipe()
3082 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3083 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3085 fd_pipes[self._metadata_fd] = slave_fd
3087 self._raw_metadata = []
3088 files.ebuild = os.fdopen(master_fd, 'r')
3089 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3090 self._registered_events, self._output_handler)
3091 self._registered = True
3093 retval = portage.doebuild(ebuild_path, "depend",
3094 settings["ROOT"], settings, debug,
3095 mydbapi=self.portdb, tree="porttree",
3096 fd_pipes=fd_pipes, returnpid=True)
3100 if isinstance(retval, int):
3101 # doebuild failed before spawning
3103 self.returncode = retval
3107 self.pid = retval[0]
3108 portage.process.spawned_pids.remove(self.pid)
3110 def _output_handler(self, fd, event):
3112 if event & PollConstants.POLLIN:
3113 self._raw_metadata.append(self._files.ebuild.read())
3114 if not self._raw_metadata[-1]:
3118 self._unregister_if_appropriate(event)
3119 return self._registered
3121 def _set_returncode(self, wait_retval):
3122 SubProcess._set_returncode(self, wait_retval)
3123 if self.returncode == os.EX_OK:
3124 metadata_lines = "".join(self._raw_metadata).splitlines()
3125 if len(portage.auxdbkeys) != len(metadata_lines):
3126 # Don't trust bash's returncode if the
3127 # number of lines is incorrect.
3130 metadata = izip(portage.auxdbkeys, metadata_lines)
3131 self.metadata = self.metadata_callback(self.cpv,
3132 self.ebuild_path, self.repo_path, metadata,
3135 class EbuildProcess(SpawnProcess):
3137 __slots__ = ("phase", "pkg", "settings", "tree")
3140 # Don't open the log file during the clean phase since the
3141 # open file can result in an nfs lock on $T/build.log which
3142 # prevents the clean phase from removing $T.
3143 if self.phase not in ("clean", "cleanrm"):
3144 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3145 SpawnProcess._start(self)
3147 def _pipe(self, fd_pipes):
3148 stdout_pipe = fd_pipes.get(1)
3149 got_pty, master_fd, slave_fd = \
3150 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3151 return (master_fd, slave_fd)
3153 def _spawn(self, args, **kwargs):
3155 root_config = self.pkg.root_config
3157 mydbapi = root_config.trees[tree].dbapi
3158 settings = self.settings
3159 ebuild_path = settings["EBUILD"]
3160 debug = settings.get("PORTAGE_DEBUG") == "1"
3162 rval = portage.doebuild(ebuild_path, self.phase,
3163 root_config.root, settings, debug,
3164 mydbapi=mydbapi, tree=tree, **kwargs)
3168 def _set_returncode(self, wait_retval):
3169 SpawnProcess._set_returncode(self, wait_retval)
3171 if self.phase not in ("clean", "cleanrm"):
3172 self.returncode = portage._doebuild_exit_status_check_and_log(
3173 self.settings, self.phase, self.returncode)
3175 if self.phase == "test" and self.returncode != os.EX_OK and \
3176 "test-fail-continue" in self.settings.features:
3177 self.returncode = os.EX_OK
3179 portage._post_phase_userpriv_perms(self.settings)
3181 class EbuildPhase(CompositeTask):
3183 __slots__ = ("background", "pkg", "phase",
3184 "scheduler", "settings", "tree")
3186 _post_phase_cmds = portage._post_phase_cmds
3190 ebuild_process = EbuildProcess(background=self.background,
3191 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3192 settings=self.settings, tree=self.tree)
3194 self._start_task(ebuild_process, self._ebuild_exit)
3196 def _ebuild_exit(self, ebuild_process):
3198 if self.phase == "install":
3200 log_path = self.settings.get("PORTAGE_LOG_FILE")
3202 if self.background and log_path is not None:
3203 log_file = open(log_path, 'a')
3206 portage._check_build_log(self.settings, out=out)
3208 if log_file is not None:
3211 if self._default_exit(ebuild_process) != os.EX_OK:
3215 settings = self.settings
3217 if self.phase == "install":
3218 portage._post_src_install_chost_fix(settings)
3219 portage._post_src_install_uid_fix(settings)
3221 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3222 if post_phase_cmds is not None:
3223 post_phase = MiscFunctionsProcess(background=self.background,
3224 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3225 scheduler=self.scheduler, settings=settings)
3226 self._start_task(post_phase, self._post_phase_exit)
3229 self.returncode = ebuild_process.returncode
3230 self._current_task = None
3233 def _post_phase_exit(self, post_phase):
3234 if self._final_exit(post_phase) != os.EX_OK:
3235 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3237 self._current_task = None
3241 class EbuildBinpkg(EbuildProcess):
3243 This assumes that src_install() has successfully completed.
3245 __slots__ = ("_binpkg_tmpfile",)
3248 self.phase = "package"
3249 self.tree = "porttree"
3251 root_config = pkg.root_config
3252 portdb = root_config.trees["porttree"].dbapi
3253 bintree = root_config.trees["bintree"]
3254 ebuild_path = portdb.findname(self.pkg.cpv)
3255 settings = self.settings
3256 debug = settings.get("PORTAGE_DEBUG") == "1"
3258 bintree.prevent_collision(pkg.cpv)
3259 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3260 pkg.cpv + ".tbz2." + str(os.getpid()))
3261 self._binpkg_tmpfile = binpkg_tmpfile
3262 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3263 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3266 EbuildProcess._start(self)
3268 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3270 def _set_returncode(self, wait_retval):
3271 EbuildProcess._set_returncode(self, wait_retval)
3274 bintree = pkg.root_config.trees["bintree"]
3275 binpkg_tmpfile = self._binpkg_tmpfile
3276 if self.returncode == os.EX_OK:
3277 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3279 class EbuildMerge(SlotObject):
3281 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3282 "pkg", "pkg_count", "pkg_path", "pretend",
3283 "scheduler", "settings", "tree", "world_atom")
3286 root_config = self.pkg.root_config
3287 settings = self.settings
3288 retval = portage.merge(settings["CATEGORY"],
3289 settings["PF"], settings["D"],
3290 os.path.join(settings["PORTAGE_BUILDDIR"],
3291 "build-info"), root_config.root, settings,
3292 myebuild=settings["EBUILD"],
3293 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3294 vartree=root_config.trees["vartree"],
3295 prev_mtimes=self.ldpath_mtimes,
3296 scheduler=self.scheduler,
3297 blockers=self.find_blockers)
3299 if retval == os.EX_OK:
3300 self.world_atom(self.pkg)
3305 def _log_success(self):
3307 pkg_count = self.pkg_count
3308 pkg_path = self.pkg_path
3309 logger = self.logger
3310 if "noclean" not in self.settings.features:
3311 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3312 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3313 logger.log((" === (%s of %s) " + \
3314 "Post-Build Cleaning (%s::%s)") % \
3315 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3316 short_msg=short_msg)
3317 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3318 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3320 class PackageUninstall(AsynchronousTask):
3322 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3326 unmerge(self.pkg.root_config, self.opts, "unmerge",
3327 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3328 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3329 writemsg_level=self._writemsg_level)
3330 except UninstallFailure, e:
3331 self.returncode = e.status
3333 self.returncode = os.EX_OK
3336 def _writemsg_level(self, msg, level=0, noiselevel=0):
3338 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339 background = self.background
3341 if log_path is None:
3342 if not (background and level < logging.WARNING):
3343 portage.util.writemsg_level(msg,
3344 level=level, noiselevel=noiselevel)
3347 portage.util.writemsg_level(msg,
3348 level=level, noiselevel=noiselevel)
3350 f = open(log_path, 'a')
3356 class Binpkg(CompositeTask):
3358 __slots__ = ("find_blockers",
3359 "ldpath_mtimes", "logger", "opts",
3360 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3361 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3362 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3364 def _writemsg_level(self, msg, level=0, noiselevel=0):
3366 if not self.background:
3367 portage.util.writemsg_level(msg,
3368 level=level, noiselevel=noiselevel)
3370 log_path = self.settings.get("PORTAGE_LOG_FILE")
3371 if log_path is not None:
3372 f = open(log_path, 'a')
3381 settings = self.settings
3382 settings.setcpv(pkg)
3383 self._tree = "bintree"
3384 self._bintree = self.pkg.root_config.trees[self._tree]
3385 self._verify = not self.opts.pretend
3387 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3388 "portage", pkg.category, pkg.pf)
3389 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3390 pkg=pkg, settings=settings)
3391 self._image_dir = os.path.join(dir_path, "image")
3392 self._infloc = os.path.join(dir_path, "build-info")
3393 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3394 settings["EBUILD"] = self._ebuild_path
3395 debug = settings.get("PORTAGE_DEBUG") == "1"
3396 portage.doebuild_environment(self._ebuild_path, "setup",
3397 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3398 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3400 # The prefetcher has already completed or it
3401 # could be running now. If it's running now,
3402 # wait for it to complete since it holds
3403 # a lock on the file being fetched. The
3404 # portage.locks functions are only designed
3405 # to work between separate processes. Since
3406 # the lock is held by the current process,
3407 # use the scheduler and fetcher methods to
3408 # synchronize with the fetcher.
3409 prefetcher = self.prefetcher
3410 if prefetcher is None:
3412 elif not prefetcher.isAlive():
3414 elif prefetcher.poll() is None:
3416 waiting_msg = ("Fetching '%s' " + \
3417 "in the background. " + \
3418 "To view fetch progress, run `tail -f " + \
3419 "/var/log/emerge-fetch.log` in another " + \
3420 "terminal.") % prefetcher.pkg_path
3421 msg_prefix = colorize("GOOD", " * ")
3422 from textwrap import wrap
3423 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3424 for line in wrap(waiting_msg, 65))
3425 if not self.background:
3426 writemsg(waiting_msg, noiselevel=-1)
3428 self._current_task = prefetcher
3429 prefetcher.addExitListener(self._prefetch_exit)
3432 self._prefetch_exit(prefetcher)
3434 def _prefetch_exit(self, prefetcher):
3437 pkg_count = self.pkg_count
3438 if not (self.opts.pretend or self.opts.fetchonly):
3439 self._build_dir.lock()
3440 # If necessary, discard old log so that we don't
3442 self._build_dir.clean_log()
3443 # Initialze PORTAGE_LOG_FILE.
3444 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3445 fetcher = BinpkgFetcher(background=self.background,
3446 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3447 pretend=self.opts.pretend, scheduler=self.scheduler)
3448 pkg_path = fetcher.pkg_path
3449 self._pkg_path = pkg_path
3451 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3453 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3454 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3455 short_msg = "emerge: (%s of %s) %s Fetch" % \
3456 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3457 self.logger.log(msg, short_msg=short_msg)
3458 self._start_task(fetcher, self._fetcher_exit)
3461 self._fetcher_exit(fetcher)
3463 def _fetcher_exit(self, fetcher):
3465 # The fetcher only has a returncode when
3466 # --getbinpkg is enabled.
3467 if fetcher.returncode is not None:
3468 self._fetched_pkg = True
3469 if self._default_exit(fetcher) != os.EX_OK:
3470 self._unlock_builddir()
3474 if self.opts.pretend:
3475 self._current_task = None
3476 self.returncode = os.EX_OK
3484 logfile = self.settings.get("PORTAGE_LOG_FILE")
3485 verifier = BinpkgVerifier(background=self.background,
3486 logfile=logfile, pkg=self.pkg)
3487 self._start_task(verifier, self._verifier_exit)
3490 self._verifier_exit(verifier)
3492 def _verifier_exit(self, verifier):
3493 if verifier is not None and \
3494 self._default_exit(verifier) != os.EX_OK:
3495 self._unlock_builddir()
3499 logger = self.logger
3501 pkg_count = self.pkg_count
3502 pkg_path = self._pkg_path
3504 if self._fetched_pkg:
3505 self._bintree.inject(pkg.cpv, filename=pkg_path)
3507 if self.opts.fetchonly:
3508 self._current_task = None
3509 self.returncode = os.EX_OK
3513 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3514 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3515 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3516 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3517 logger.log(msg, short_msg=short_msg)
3520 settings = self.settings
3521 ebuild_phase = EbuildPhase(background=self.background,
3522 pkg=pkg, phase=phase, scheduler=self.scheduler,
3523 settings=settings, tree=self._tree)
3525 self._start_task(ebuild_phase, self._clean_exit)
3527 def _clean_exit(self, clean_phase):
3528 if self._default_exit(clean_phase) != os.EX_OK:
3529 self._unlock_builddir()
3533 dir_path = self._build_dir.dir_path
3535 infloc = self._infloc
3537 pkg_path = self._pkg_path
3540 for mydir in (dir_path, self._image_dir, infloc):
3541 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3542 gid=portage.data.portage_gid, mode=dir_mode)
3544 # This initializes PORTAGE_LOG_FILE.
3545 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3546 self._writemsg_level(">>> Extracting info\n")
3548 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3549 check_missing_metadata = ("CATEGORY", "PF")
3550 missing_metadata = set()
3551 for k in check_missing_metadata:
3552 v = pkg_xpak.getfile(k)
3554 missing_metadata.add(k)
3556 pkg_xpak.unpackinfo(infloc)
3557 for k in missing_metadata:
3565 f = open(os.path.join(infloc, k), 'wb')
3571 # Store the md5sum in the vdb.
3572 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3574 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3578 # This gives bashrc users an opportunity to do various things
3579 # such as remove binary packages after they're installed.
3580 settings = self.settings
3581 settings.setcpv(self.pkg)
3582 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3583 settings.backup_changes("PORTAGE_BINPKG_FILE")
3586 setup_phase = EbuildPhase(background=self.background,
3587 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3588 settings=settings, tree=self._tree)
3590 setup_phase.addExitListener(self._setup_exit)
3591 self._current_task = setup_phase
3592 self.scheduler.scheduleSetup(setup_phase)
3594 def _setup_exit(self, setup_phase):
3595 if self._default_exit(setup_phase) != os.EX_OK:
3596 self._unlock_builddir()
3600 extractor = BinpkgExtractorAsync(background=self.background,
3601 image_dir=self._image_dir,
3602 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3603 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3604 self._start_task(extractor, self._extractor_exit)
3606 def _extractor_exit(self, extractor):
3607 if self._final_exit(extractor) != os.EX_OK:
3608 self._unlock_builddir()
3609 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3613 def _unlock_builddir(self):
3614 if self.opts.pretend or self.opts.fetchonly:
3616 portage.elog.elog_process(self.pkg.cpv, self.settings)
3617 self._build_dir.unlock()
3621 # This gives bashrc users an opportunity to do various things
3622 # such as remove binary packages after they're installed.
3623 settings = self.settings
3624 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3625 settings.backup_changes("PORTAGE_BINPKG_FILE")
3627 merge = EbuildMerge(find_blockers=self.find_blockers,
3628 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3629 pkg=self.pkg, pkg_count=self.pkg_count,
3630 pkg_path=self._pkg_path, scheduler=self.scheduler,
3631 settings=settings, tree=self._tree, world_atom=self.world_atom)
3634 retval = merge.execute()
3636 settings.pop("PORTAGE_BINPKG_FILE", None)
3637 self._unlock_builddir()
3640 class BinpkgFetcher(SpawnProcess):
3642 __slots__ = ("pkg", "pretend",
3643 "locked", "pkg_path", "_lock_obj")
3645 def __init__(self, **kwargs):
3646 SpawnProcess.__init__(self, **kwargs)
3648 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3656 pretend = self.pretend
3657 bintree = pkg.root_config.trees["bintree"]
3658 settings = bintree.settings
3659 use_locks = "distlocks" in settings.features
3660 pkg_path = self.pkg_path
3663 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3666 exists = os.path.exists(pkg_path)
3667 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3668 if not (pretend or resume):
3669 # Remove existing file or broken symlink.
3675 # urljoin doesn't work correctly with
3676 # unrecognized protocols like sftp
3677 if bintree._remote_has_index:
3678 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3680 rel_uri = pkg.cpv + ".tbz2"
3681 uri = bintree._remote_base_uri.rstrip("/") + \
3682 "/" + rel_uri.lstrip("/")
3684 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3685 "/" + pkg.pf + ".tbz2"
3688 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3689 self.returncode = os.EX_OK
3693 protocol = urlparse.urlparse(uri)[0]
3694 fcmd_prefix = "FETCHCOMMAND"
3696 fcmd_prefix = "RESUMECOMMAND"
3697 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3699 fcmd = settings.get(fcmd_prefix)
3702 "DISTDIR" : os.path.dirname(pkg_path),
3704 "FILE" : os.path.basename(pkg_path)
3707 fetch_env = dict(settings.iteritems())
3708 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3709 for x in shlex.split(fcmd)]
3711 if self.fd_pipes is None:
3713 fd_pipes = self.fd_pipes
3715 # Redirect all output to stdout since some fetchers like
3716 # wget pollute stderr (if portage detects a problem then it
3717 # can send it's own message to stderr).
3718 fd_pipes.setdefault(0, sys.stdin.fileno())
3719 fd_pipes.setdefault(1, sys.stdout.fileno())
3720 fd_pipes.setdefault(2, sys.stdout.fileno())
3722 self.args = fetch_args
3723 self.env = fetch_env
3724 SpawnProcess._start(self)
3726 def _set_returncode(self, wait_retval):
3727 SpawnProcess._set_returncode(self, wait_retval)
3728 if self.returncode == os.EX_OK:
3729 # If possible, update the mtime to match the remote package if
3730 # the fetcher didn't already do it automatically.
3731 bintree = self.pkg.root_config.trees["bintree"]
3732 if bintree._remote_has_index:
3733 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3734 if remote_mtime is not None:
3736 remote_mtime = long(remote_mtime)
3741 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3745 if remote_mtime != local_mtime:
3747 os.utime(self.pkg_path,
3748 (remote_mtime, remote_mtime))
3757 This raises an AlreadyLocked exception if lock() is called
3758 while a lock is already held. In order to avoid this, call
3759 unlock() or check whether the "locked" attribute is True
3760 or False before calling lock().
3762 if self._lock_obj is not None:
3763 raise self.AlreadyLocked((self._lock_obj,))
3765 self._lock_obj = portage.locks.lockfile(
3766 self.pkg_path, wantnewlockfile=1)
3769 class AlreadyLocked(portage.exception.PortageException):
3773 if self._lock_obj is None:
3775 portage.locks.unlockfile(self._lock_obj)
3776 self._lock_obj = None
3779 class BinpkgVerifier(AsynchronousTask):
3780 __slots__ = ("logfile", "pkg",)
3784 Note: Unlike a normal AsynchronousTask.start() method,
3785 this one does all work is synchronously. The returncode
3786 attribute will be set before it returns.
3790 root_config = pkg.root_config
3791 bintree = root_config.trees["bintree"]
3793 stdout_orig = sys.stdout
3794 stderr_orig = sys.stderr
3796 if self.background and self.logfile is not None:
3797 log_file = open(self.logfile, 'a')
3799 if log_file is not None:
3800 sys.stdout = log_file
3801 sys.stderr = log_file
3803 bintree.digestCheck(pkg)
3804 except portage.exception.FileNotFound:
3805 writemsg("!!! Fetching Binary failed " + \
3806 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3808 except portage.exception.DigestException, e:
3809 writemsg("\n!!! Digest verification failed:\n",
3811 writemsg("!!! %s\n" % e.value[0],
3813 writemsg("!!! Reason: %s\n" % e.value[1],
3815 writemsg("!!! Got: %s\n" % e.value[2],
3817 writemsg("!!! Expected: %s\n" % e.value[3],
3820 if rval != os.EX_OK:
3821 pkg_path = bintree.getname(pkg.cpv)
3822 head, tail = os.path.split(pkg_path)
3823 temp_filename = portage._checksum_failure_temp_file(head, tail)
3824 writemsg("File renamed to '%s'\n" % (temp_filename,),
3827 sys.stdout = stdout_orig
3828 sys.stderr = stderr_orig
3829 if log_file is not None:
3832 self.returncode = rval
3835 class BinpkgPrefetcher(CompositeTask):
3837 __slots__ = ("pkg",) + \
3838 ("pkg_path", "_bintree",)
3841 self._bintree = self.pkg.root_config.trees["bintree"]
3842 fetcher = BinpkgFetcher(background=self.background,
3843 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3844 scheduler=self.scheduler)
3845 self.pkg_path = fetcher.pkg_path
3846 self._start_task(fetcher, self._fetcher_exit)
3848 def _fetcher_exit(self, fetcher):
3850 if self._default_exit(fetcher) != os.EX_OK:
3854 verifier = BinpkgVerifier(background=self.background,
3855 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3856 self._start_task(verifier, self._verifier_exit)
3858 def _verifier_exit(self, verifier):
3859 if self._default_exit(verifier) != os.EX_OK:
3863 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3865 self._current_task = None
3866 self.returncode = os.EX_OK
3869 class BinpkgExtractorAsync(SpawnProcess):
3871 __slots__ = ("image_dir", "pkg", "pkg_path")
3873 _shell_binary = portage.const.BASH_BINARY
3876 self.args = [self._shell_binary, "-c",
3877 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3878 (portage._shell_quote(self.pkg_path),
3879 portage._shell_quote(self.image_dir))]
3881 self.env = self.pkg.root_config.settings.environ()
3882 SpawnProcess._start(self)
3884 class MergeListItem(CompositeTask):
3887 TODO: For parallel scheduling, everything here needs asynchronous
3888 execution support (start, poll, and wait methods).
3891 __slots__ = ("args_set",
3892 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3893 "find_blockers", "logger", "mtimedb", "pkg",
3894 "pkg_count", "pkg_to_replace", "prefetcher",
3895 "settings", "statusMessage", "world_atom") + \
3901 build_opts = self.build_opts
3904 # uninstall, executed by self.merge()
3905 self.returncode = os.EX_OK
3909 args_set = self.args_set
3910 find_blockers = self.find_blockers
3911 logger = self.logger
3912 mtimedb = self.mtimedb
3913 pkg_count = self.pkg_count
3914 scheduler = self.scheduler
3915 settings = self.settings
3916 world_atom = self.world_atom
3917 ldpath_mtimes = mtimedb["ldpath"]
3919 action_desc = "Emerging"
3921 if pkg.type_name == "binary":
3922 action_desc += " binary"
3924 if build_opts.fetchonly:
3925 action_desc = "Fetching"
3927 msg = "%s (%s of %s) %s" % \
3929 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3930 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3931 colorize("GOOD", pkg.cpv))
3933 portdb = pkg.root_config.trees["porttree"].dbapi
3934 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3935 if portdir_repo_name:
3936 pkg_repo_name = pkg.metadata.get("repository")
3937 if pkg_repo_name != portdir_repo_name:
3938 if not pkg_repo_name:
3939 pkg_repo_name = "unknown repo"
3940 msg += " from %s" % pkg_repo_name
3943 msg += " %s %s" % (preposition, pkg.root)
3945 if not build_opts.pretend:
3946 self.statusMessage(msg)
3947 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3948 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3950 if pkg.type_name == "ebuild":
3952 build = EbuildBuild(args_set=args_set,
3953 background=self.background,
3954 config_pool=self.config_pool,
3955 find_blockers=find_blockers,
3956 ldpath_mtimes=ldpath_mtimes, logger=logger,
3957 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3958 prefetcher=self.prefetcher, scheduler=scheduler,
3959 settings=settings, world_atom=world_atom)
3961 self._install_task = build
3962 self._start_task(build, self._default_final_exit)
3965 elif pkg.type_name == "binary":
3967 binpkg = Binpkg(background=self.background,
3968 find_blockers=find_blockers,
3969 ldpath_mtimes=ldpath_mtimes, logger=logger,
3970 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3971 prefetcher=self.prefetcher, settings=settings,
3972 scheduler=scheduler, world_atom=world_atom)
3974 self._install_task = binpkg
3975 self._start_task(binpkg, self._default_final_exit)
3979 self._install_task.poll()
3980 return self.returncode
3983 self._install_task.wait()
3984 return self.returncode
3989 build_opts = self.build_opts
3990 find_blockers = self.find_blockers
3991 logger = self.logger
3992 mtimedb = self.mtimedb
3993 pkg_count = self.pkg_count
3994 prefetcher = self.prefetcher
3995 scheduler = self.scheduler
3996 settings = self.settings
3997 world_atom = self.world_atom
3998 ldpath_mtimes = mtimedb["ldpath"]
4001 if not (build_opts.buildpkgonly or \
4002 build_opts.fetchonly or build_opts.pretend):
4004 uninstall = PackageUninstall(background=self.background,
4005 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4006 pkg=pkg, scheduler=scheduler, settings=settings)
4009 retval = uninstall.wait()
4010 if retval != os.EX_OK:
4014 if build_opts.fetchonly or \
4015 build_opts.buildpkgonly:
4016 return self.returncode
4018 retval = self._install_task.install()
4021 class PackageMerge(AsynchronousTask):
4023 TODO: Implement asynchronous merge so that the scheduler can
4024 run while a merge is executing.
4027 __slots__ = ("merge",)
4031 pkg = self.merge.pkg
4032 pkg_count = self.merge.pkg_count
4035 action_desc = "Uninstalling"
4036 preposition = "from"
4039 action_desc = "Installing"
4041 counter_str = "(%s of %s) " % \
4042 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4043 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4048 colorize("GOOD", pkg.cpv))
4051 msg += " %s %s" % (preposition, pkg.root)
4053 if not self.merge.build_opts.fetchonly and \
4054 not self.merge.build_opts.pretend and \
4055 not self.merge.build_opts.buildpkgonly:
4056 self.merge.statusMessage(msg)
4058 self.returncode = self.merge.merge()
4061 class DependencyArg(object):
4062 def __init__(self, arg=None, root_config=None):
4064 self.root_config = root_config
4067 return str(self.arg)
4069 class AtomArg(DependencyArg):
4070 def __init__(self, atom=None, **kwargs):
4071 DependencyArg.__init__(self, **kwargs)
4073 if not isinstance(self.atom, portage.dep.Atom):
4074 self.atom = portage.dep.Atom(self.atom)
4075 self.set = (self.atom, )
4077 class PackageArg(DependencyArg):
4078 def __init__(self, package=None, **kwargs):
4079 DependencyArg.__init__(self, **kwargs)
4080 self.package = package
4081 self.atom = portage.dep.Atom("=" + package.cpv)
4082 self.set = (self.atom, )
4084 class SetArg(DependencyArg):
4085 def __init__(self, set=None, **kwargs):
4086 DependencyArg.__init__(self, **kwargs)
4088 self.name = self.arg[len(SETPREFIX):]
4090 class Dependency(SlotObject):
4091 __slots__ = ("atom", "blocker", "depth",
4092 "parent", "onlydeps", "priority", "root")
4093 def __init__(self, **kwargs):
4094 SlotObject.__init__(self, **kwargs)
4095 if self.priority is None:
4096 self.priority = DepPriority()
4097 if self.depth is None:
4100 class BlockerCache(portage.cache.mappings.MutableMapping):
4101 """This caches blockers of installed packages so that dep_check does not
4102 have to be done for every single installed package on every invocation of
4103 emerge. The cache is invalidated whenever it is detected that something
4104 has changed that might alter the results of dep_check() calls:
4105 1) the set of installed packages (including COUNTER) has changed
4106 2) the old-style virtuals have changed
4109 # Number of uncached packages to trigger cache update, since
4110 # it's wasteful to update it for every vdb change.
4111 _cache_threshold = 5
4113 class BlockerData(object):
4115 __slots__ = ("__weakref__", "atoms", "counter")
4117 def __init__(self, counter, atoms):
4118 self.counter = counter
4121 def __init__(self, myroot, vardb):
4123 self._virtuals = vardb.settings.getvirtuals()
4124 self._cache_filename = os.path.join(myroot,
4125 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4126 self._cache_version = "1"
4127 self._cache_data = None
4128 self._modified = set()
4133 f = open(self._cache_filename, mode='rb')
4134 mypickle = pickle.Unpickler(f)
4136 mypickle.find_global = None
4137 except AttributeError:
4138 # TODO: If py3k, override Unpickler.find_class().
4140 self._cache_data = mypickle.load()
4143 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4144 if isinstance(e, pickle.UnpicklingError):
4145 writemsg("!!! Error loading '%s': %s\n" % \
4146 (self._cache_filename, str(e)), noiselevel=-1)
4149 cache_valid = self._cache_data and \
4150 isinstance(self._cache_data, dict) and \
4151 self._cache_data.get("version") == self._cache_version and \
4152 isinstance(self._cache_data.get("blockers"), dict)
4154 # Validate all the atoms and counters so that
4155 # corruption is detected as soon as possible.
4156 invalid_items = set()
4157 for k, v in self._cache_data["blockers"].iteritems():
4158 if not isinstance(k, basestring):
4159 invalid_items.add(k)
4162 if portage.catpkgsplit(k) is None:
4163 invalid_items.add(k)
4165 except portage.exception.InvalidData:
4166 invalid_items.add(k)
4168 if not isinstance(v, tuple) or \
4170 invalid_items.add(k)
4173 if not isinstance(counter, (int, long)):
4174 invalid_items.add(k)
4176 if not isinstance(atoms, (list, tuple)):
4177 invalid_items.add(k)
4179 invalid_atom = False
4181 if not isinstance(atom, basestring):
4184 if atom[:1] != "!" or \
4185 not portage.isvalidatom(
4186 atom, allow_blockers=True):
4190 invalid_items.add(k)
4193 for k in invalid_items:
4194 del self._cache_data["blockers"][k]
4195 if not self._cache_data["blockers"]:
4199 self._cache_data = {"version":self._cache_version}
4200 self._cache_data["blockers"] = {}
4201 self._cache_data["virtuals"] = self._virtuals
4202 self._modified.clear()
4205 """If the current user has permission and the internal blocker cache
4206 been updated, save it to disk and mark it unmodified. This is called
4207 by emerge after it has proccessed blockers for all installed packages.
4208 Currently, the cache is only written if the user has superuser
4209 privileges (since that's required to obtain a lock), but all users
4210 have read access and benefit from faster blocker lookups (as long as
4211 the entire cache is still valid). The cache is stored as a pickled
4212 dict object with the following format:
4216 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4217 "virtuals" : vardb.settings.getvirtuals()
4220 if len(self._modified) >= self._cache_threshold and \
4223 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4224 pickle.dump(self._cache_data, f, protocol=2)
4226 portage.util.apply_secpass_permissions(
4227 self._cache_filename, gid=portage.portage_gid, mode=0644)
4228 except (IOError, OSError), e:
4230 self._modified.clear()
4232 def __setitem__(self, cpv, blocker_data):
4234 Update the cache and mark it as modified for a future call to
4237 @param cpv: Package for which to cache blockers.
4239 @param blocker_data: An object with counter and atoms attributes.
4240 @type blocker_data: BlockerData
4242 self._cache_data["blockers"][cpv] = \
4243 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4244 self._modified.add(cpv)
4247 if self._cache_data is None:
4248 # triggered by python-trace
4250 return iter(self._cache_data["blockers"])
4252 def __delitem__(self, cpv):
4253 del self._cache_data["blockers"][cpv]
4255 def __getitem__(self, cpv):
4258 @returns: An object with counter and atoms attributes.
4260 return self.BlockerData(*self._cache_data["blockers"][cpv])
4262 class BlockerDB(object):
4264 def __init__(self, root_config):
4265 self._root_config = root_config
4266 self._vartree = root_config.trees["vartree"]
4267 self._portdb = root_config.trees["porttree"].dbapi
4269 self._dep_check_trees = None
4270 self._fake_vartree = None
4272 def _get_fake_vartree(self, acquire_lock=0):
4273 fake_vartree = self._fake_vartree
4274 if fake_vartree is None:
4275 fake_vartree = FakeVartree(self._root_config,
4276 acquire_lock=acquire_lock)
4277 self._fake_vartree = fake_vartree
4278 self._dep_check_trees = { self._vartree.root : {
4279 "porttree" : fake_vartree,
4280 "vartree" : fake_vartree,
4283 fake_vartree.sync(acquire_lock=acquire_lock)
4286 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4287 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4288 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4289 settings = self._vartree.settings
4290 stale_cache = set(blocker_cache)
4291 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4292 dep_check_trees = self._dep_check_trees
4293 vardb = fake_vartree.dbapi
4294 installed_pkgs = list(vardb)
4296 for inst_pkg in installed_pkgs:
4297 stale_cache.discard(inst_pkg.cpv)
4298 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4299 if cached_blockers is not None and \
4300 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4301 cached_blockers = None
4302 if cached_blockers is not None:
4303 blocker_atoms = cached_blockers.atoms
4305 # Use aux_get() to trigger FakeVartree global
4306 # updates on *DEPEND when appropriate.
4307 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4309 portage.dep._dep_check_strict = False
4310 success, atoms = portage.dep_check(depstr,
4311 vardb, settings, myuse=inst_pkg.use.enabled,
4312 trees=dep_check_trees, myroot=inst_pkg.root)
4314 portage.dep._dep_check_strict = True
4316 pkg_location = os.path.join(inst_pkg.root,
4317 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4318 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4319 (pkg_location, atoms), noiselevel=-1)
4322 blocker_atoms = [atom for atom in atoms \
4323 if atom.startswith("!")]
4324 blocker_atoms.sort()
4325 counter = long(inst_pkg.metadata["COUNTER"])
4326 blocker_cache[inst_pkg.cpv] = \
4327 blocker_cache.BlockerData(counter, blocker_atoms)
4328 for cpv in stale_cache:
4329 del blocker_cache[cpv]
4330 blocker_cache.flush()
4332 blocker_parents = digraph()
4334 for pkg in installed_pkgs:
4335 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4336 blocker_atom = blocker_atom.lstrip("!")
4337 blocker_atoms.append(blocker_atom)
4338 blocker_parents.add(blocker_atom, pkg)
4340 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4341 blocking_pkgs = set()
4342 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4343 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4345 # Check for blockers in the other direction.
4346 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4348 portage.dep._dep_check_strict = False
4349 success, atoms = portage.dep_check(depstr,
4350 vardb, settings, myuse=new_pkg.use.enabled,
4351 trees=dep_check_trees, myroot=new_pkg.root)
4353 portage.dep._dep_check_strict = True
4355 # We should never get this far with invalid deps.
4356 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4359 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4362 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4363 for inst_pkg in installed_pkgs:
4365 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4366 except (portage.exception.InvalidDependString, StopIteration):
4368 blocking_pkgs.add(inst_pkg)
4370 return blocking_pkgs
4372 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4374 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4375 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4376 p_type, p_root, p_key, p_status = parent_node
4378 if p_status == "nomerge":
4379 category, pf = portage.catsplit(p_key)
4380 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4381 msg.append("Portage is unable to process the dependencies of the ")
4382 msg.append("'%s' package. " % p_key)
4383 msg.append("In order to correct this problem, the package ")
4384 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4385 msg.append("As a temporary workaround, the --nodeps option can ")
4386 msg.append("be used to ignore all dependencies. For reference, ")
4387 msg.append("the problematic dependencies can be found in the ")
4388 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4390 msg.append("This package can not be installed. ")
4391 msg.append("Please notify the '%s' package maintainer " % p_key)
4392 msg.append("about this problem.")
4394 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4395 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4397 class PackageVirtualDbapi(portage.dbapi):
4399 A dbapi-like interface class that represents the state of the installed
4400 package database as new packages are installed, replacing any packages
4401 that previously existed in the same slot. The main difference between
4402 this class and fakedbapi is that this one uses Package instances
4403 internally (passed in via cpv_inject() and cpv_remove() calls).
4405 def __init__(self, settings):
4406 portage.dbapi.__init__(self)
4407 self.settings = settings
4408 self._match_cache = {}
4414 Remove all packages.
4418 self._cp_map.clear()
4419 self._cpv_map.clear()
4422 obj = PackageVirtualDbapi(self.settings)
4423 obj._match_cache = self._match_cache.copy()
4424 obj._cp_map = self._cp_map.copy()
4425 for k, v in obj._cp_map.iteritems():
4426 obj._cp_map[k] = v[:]
4427 obj._cpv_map = self._cpv_map.copy()
4431 return self._cpv_map.itervalues()
4433 def __contains__(self, item):
4434 existing = self._cpv_map.get(item.cpv)
4435 if existing is not None and \
4440 def get(self, item, default=None):
4441 cpv = getattr(item, "cpv", None)
4445 type_name, root, cpv, operation = item
4447 existing = self._cpv_map.get(cpv)
4448 if existing is not None and \
4453 def match_pkgs(self, atom):
4454 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4456 def _clear_cache(self):
4457 if self._categories is not None:
4458 self._categories = None
4459 if self._match_cache:
4460 self._match_cache = {}
4462 def match(self, origdep, use_cache=1):
4463 result = self._match_cache.get(origdep)
4464 if result is not None:
4466 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4467 self._match_cache[origdep] = result
4470 def cpv_exists(self, cpv):
4471 return cpv in self._cpv_map
4473 def cp_list(self, mycp, use_cache=1):
4474 cachelist = self._match_cache.get(mycp)
4475 # cp_list() doesn't expand old-style virtuals
4476 if cachelist and cachelist[0].startswith(mycp):
4478 cpv_list = self._cp_map.get(mycp)
4479 if cpv_list is None:
4482 cpv_list = [pkg.cpv for pkg in cpv_list]
4483 self._cpv_sort_ascending(cpv_list)
4484 if not (not cpv_list and mycp.startswith("virtual/")):
4485 self._match_cache[mycp] = cpv_list
4489 return list(self._cp_map)
4492 return list(self._cpv_map)
4494 def cpv_inject(self, pkg):
4495 cp_list = self._cp_map.get(pkg.cp)
4498 self._cp_map[pkg.cp] = cp_list
4499 e_pkg = self._cpv_map.get(pkg.cpv)
4500 if e_pkg is not None:
4503 self.cpv_remove(e_pkg)
4504 for e_pkg in cp_list:
4505 if e_pkg.slot_atom == pkg.slot_atom:
4508 self.cpv_remove(e_pkg)
4511 self._cpv_map[pkg.cpv] = pkg
4514 def cpv_remove(self, pkg):
4515 old_pkg = self._cpv_map.get(pkg.cpv)
4518 self._cp_map[pkg.cp].remove(pkg)
4519 del self._cpv_map[pkg.cpv]
4522 def aux_get(self, cpv, wants):
4523 metadata = self._cpv_map[cpv].metadata
4524 return [metadata.get(x, "") for x in wants]
4526 def aux_update(self, cpv, values):
4527 self._cpv_map[cpv].metadata.update(values)
4530 class depgraph(object):
4532 pkg_tree_map = RootConfig.pkg_tree_map
4534 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4536 def __init__(self, settings, trees, myopts, myparams, spinner):
4537 self.settings = settings
4538 self.target_root = settings["ROOT"]
4539 self.myopts = myopts
4540 self.myparams = myparams
4542 if settings.get("PORTAGE_DEBUG", "") == "1":
4544 self.spinner = spinner
4545 self._running_root = trees["/"]["root_config"]
4546 self._opts_no_restart = Scheduler._opts_no_restart
4547 self.pkgsettings = {}
4548 # Maps slot atom to package for each Package added to the graph.
4549 self._slot_pkg_map = {}
4550 # Maps nodes to the reasons they were selected for reinstallation.
4551 self._reinstall_nodes = {}
4554 self._trees_orig = trees
4556 # Contains a filtered view of preferred packages that are selected
4557 # from available repositories.
4558 self._filtered_trees = {}
4559 # Contains installed packages and new packages that have been added
4561 self._graph_trees = {}
4562 # All Package instances
4563 self._pkg_cache = {}
4564 for myroot in trees:
4565 self.trees[myroot] = {}
4566 # Create a RootConfig instance that references
4567 # the FakeVartree instead of the real one.
4568 self.roots[myroot] = RootConfig(
4569 trees[myroot]["vartree"].settings,
4571 trees[myroot]["root_config"].setconfig)
4572 for tree in ("porttree", "bintree"):
4573 self.trees[myroot][tree] = trees[myroot][tree]
4574 self.trees[myroot]["vartree"] = \
4575 FakeVartree(trees[myroot]["root_config"],
4576 pkg_cache=self._pkg_cache)
4577 self.pkgsettings[myroot] = portage.config(
4578 clone=self.trees[myroot]["vartree"].settings)
4579 self._slot_pkg_map[myroot] = {}
4580 vardb = self.trees[myroot]["vartree"].dbapi
4581 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4582 "--buildpkgonly" not in self.myopts
4583 # This fakedbapi instance will model the state that the vdb will
4584 # have after new packages have been installed.
4585 fakedb = PackageVirtualDbapi(vardb.settings)
4586 if preload_installed_pkgs:
4588 self.spinner.update()
4589 # This triggers metadata updates via FakeVartree.
4590 vardb.aux_get(pkg.cpv, [])
4591 fakedb.cpv_inject(pkg)
4593 # Now that the vardb state is cached in our FakeVartree,
4594 # we won't be needing the real vartree cache for awhile.
4595 # To make some room on the heap, clear the vardbapi
4597 trees[myroot]["vartree"].dbapi._clear_cache()
4600 self.mydbapi[myroot] = fakedb
4603 graph_tree.dbapi = fakedb
4604 self._graph_trees[myroot] = {}
4605 self._filtered_trees[myroot] = {}
4606 # Substitute the graph tree for the vartree in dep_check() since we
4607 # want atom selections to be consistent with package selections
4608 # have already been made.
4609 self._graph_trees[myroot]["porttree"] = graph_tree
4610 self._graph_trees[myroot]["vartree"] = graph_tree
4611 def filtered_tree():
4613 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4614 self._filtered_trees[myroot]["porttree"] = filtered_tree
4616 # Passing in graph_tree as the vartree here could lead to better
4617 # atom selections in some cases by causing atoms for packages that
4618 # have been added to the graph to be preferred over other choices.
4619 # However, it can trigger atom selections that result in
4620 # unresolvable direct circular dependencies. For example, this
4621 # happens with gwydion-dylan which depends on either itself or
4622 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4623 # gwydion-dylan-bin needs to be selected in order to avoid a
4624 # an unresolvable direct circular dependency.
4626 # To solve the problem described above, pass in "graph_db" so that
4627 # packages that have been added to the graph are distinguishable
4628 # from other available packages and installed packages. Also, pass
4629 # the parent package into self._select_atoms() calls so that
4630 # unresolvable direct circular dependencies can be detected and
4631 # avoided when possible.
4632 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4633 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4636 portdb = self.trees[myroot]["porttree"].dbapi
4637 bindb = self.trees[myroot]["bintree"].dbapi
4638 vardb = self.trees[myroot]["vartree"].dbapi
4639 # (db, pkg_type, built, installed, db_keys)
4640 if "--usepkgonly" not in self.myopts:
4641 db_keys = list(portdb._aux_cache_keys)
4642 dbs.append((portdb, "ebuild", False, False, db_keys))
4643 if "--usepkg" in self.myopts:
4644 db_keys = list(bindb._aux_cache_keys)
4645 dbs.append((bindb, "binary", True, False, db_keys))
4646 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4647 dbs.append((vardb, "installed", True, True, db_keys))
4648 self._filtered_trees[myroot]["dbs"] = dbs
4649 if "--usepkg" in self.myopts:
4650 self.trees[myroot]["bintree"].populate(
4651 "--getbinpkg" in self.myopts,
4652 "--getbinpkgonly" in self.myopts)
4655 self.digraph=portage.digraph()
4656 # contains all sets added to the graph
4658 # contains atoms given as arguments
4659 self._sets["args"] = InternalPackageSet()
4660 # contains all atoms from all sets added to the graph, including
4661 # atoms given as arguments
4662 self._set_atoms = InternalPackageSet()
4663 self._atom_arg_map = {}
4664 # contains all nodes pulled in by self._set_atoms
4665 self._set_nodes = set()
4666 # Contains only Blocker -> Uninstall edges
4667 self._blocker_uninstalls = digraph()
4668 # Contains only Package -> Blocker edges
4669 self._blocker_parents = digraph()
4670 # Contains only irrelevant Package -> Blocker edges
4671 self._irrelevant_blockers = digraph()
4672 # Contains only unsolvable Package -> Blocker edges
4673 self._unsolvable_blockers = digraph()
4674 # Contains all Blocker -> Blocked Package edges
4675 self._blocked_pkgs = digraph()
4676 # Contains world packages that have been protected from
4677 # uninstallation but may not have been added to the graph
4678 # if the graph is not complete yet.
4679 self._blocked_world_pkgs = {}
4680 self._slot_collision_info = {}
4681 # Slot collision nodes are not allowed to block other packages since
4682 # blocker validation is only able to account for one package per slot.
4683 self._slot_collision_nodes = set()
4684 self._parent_atoms = {}
4685 self._slot_conflict_parent_atoms = set()
4686 self._serialized_tasks_cache = None
4687 self._scheduler_graph = None
4688 self._displayed_list = None
4689 self._pprovided_args = []
4690 self._missing_args = []
4691 self._masked_installed = set()
4692 self._unsatisfied_deps_for_display = []
4693 self._unsatisfied_blockers_for_display = None
4694 self._circular_deps_for_display = None
4695 self._dep_stack = []
4696 self._unsatisfied_deps = []
4697 self._initially_unsatisfied_deps = []
4698 self._ignored_deps = []
4699 self._required_set_names = set(["system", "world"])
4700 self._select_atoms = self._select_atoms_highest_available
4701 self._select_package = self._select_pkg_highest_available
4702 self._highest_pkg_cache = {}
4704 def _show_slot_collision_notice(self):
4705 """Show an informational message advising the user to mask one of the
4706 the packages. In some cases it may be possible to resolve this
4707 automatically, but support for backtracking (removal nodes that have
4708 already been selected) will be required in order to handle all possible
4712 if not self._slot_collision_info:
4715 self._show_merge_list()
4718 msg.append("\n!!! Multiple package instances within a single " + \
4719 "package slot have been pulled\n")
4720 msg.append("!!! into the dependency graph, resulting" + \
4721 " in a slot conflict:\n\n")
4723 # Max number of parents shown, to avoid flooding the display.
4725 explanation_columns = 70
4727 for (slot_atom, root), slot_nodes \
4728 in self._slot_collision_info.iteritems():
4729 msg.append(str(slot_atom))
4732 for node in slot_nodes:
4734 msg.append(str(node))
4735 parent_atoms = self._parent_atoms.get(node)
4738 # Prefer conflict atoms over others.
4739 for parent_atom in parent_atoms:
4740 if len(pruned_list) >= max_parents:
4742 if parent_atom in self._slot_conflict_parent_atoms:
4743 pruned_list.add(parent_atom)
4745 # If this package was pulled in by conflict atoms then
4746 # show those alone since those are the most interesting.
4748 # When generating the pruned list, prefer instances
4749 # of DependencyArg over instances of Package.
4750 for parent_atom in parent_atoms:
4751 if len(pruned_list) >= max_parents:
4753 parent, atom = parent_atom
4754 if isinstance(parent, DependencyArg):
4755 pruned_list.add(parent_atom)
4756 # Prefer Packages instances that themselves have been
4757 # pulled into collision slots.
4758 for parent_atom in parent_atoms:
4759 if len(pruned_list) >= max_parents:
4761 parent, atom = parent_atom
4762 if isinstance(parent, Package) and \
4763 (parent.slot_atom, parent.root) \
4764 in self._slot_collision_info:
4765 pruned_list.add(parent_atom)
4766 for parent_atom in parent_atoms:
4767 if len(pruned_list) >= max_parents:
4769 pruned_list.add(parent_atom)
4770 omitted_parents = len(parent_atoms) - len(pruned_list)
4771 parent_atoms = pruned_list
4772 msg.append(" pulled in by\n")
4773 for parent_atom in parent_atoms:
4774 parent, atom = parent_atom
4775 msg.append(2*indent)
4776 if isinstance(parent,
4777 (PackageArg, AtomArg)):
4778 # For PackageArg and AtomArg types, it's
4779 # redundant to display the atom attribute.
4780 msg.append(str(parent))
4782 # Display the specific atom from SetArg or
4784 msg.append("%s required by %s" % (atom, parent))
4787 msg.append(2*indent)
4788 msg.append("(and %d more)\n" % omitted_parents)
4790 msg.append(" (no parents)\n")
4792 explanation = self._slot_conflict_explanation(slot_nodes)
4795 msg.append(indent + "Explanation:\n\n")
4796 for line in textwrap.wrap(explanation, explanation_columns):
4797 msg.append(2*indent + line + "\n")
4800 sys.stderr.write("".join(msg))
4803 explanations_for_all = explanations == len(self._slot_collision_info)
4805 if explanations_for_all or "--quiet" in self.myopts:
4809 msg.append("It may be possible to solve this problem ")
4810 msg.append("by using package.mask to prevent one of ")
4811 msg.append("those packages from being selected. ")
4812 msg.append("However, it is also possible that conflicting ")
4813 msg.append("dependencies exist such that they are impossible to ")
4814 msg.append("satisfy simultaneously. If such a conflict exists in ")
4815 msg.append("the dependencies of two different packages, then those ")
4816 msg.append("packages can not be installed simultaneously.")
4818 from formatter import AbstractFormatter, DumbWriter
4819 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4821 f.add_flowing_data(x)
4825 msg.append("For more information, see MASKED PACKAGES ")
4826 msg.append("section in the emerge man page or refer ")
4827 msg.append("to the Gentoo Handbook.")
4829 f.add_flowing_data(x)
4833 def _slot_conflict_explanation(self, slot_nodes):
4835 When a slot conflict occurs due to USE deps, there are a few
4836 different cases to consider:
4838 1) New USE are correctly set but --newuse wasn't requested so an
4839 installed package with incorrect USE happened to get pulled
4840 into graph before the new one.
4842 2) New USE are incorrectly set but an installed package has correct
4843 USE so it got pulled into the graph, and a new instance also got
4844 pulled in due to --newuse or an upgrade.
4846 3) Multiple USE deps exist that can't be satisfied simultaneously,
4847 and multiple package instances got pulled into the same slot to
4848 satisfy the conflicting deps.
4850 Currently, explanations and suggested courses of action are generated
4851 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4854 if len(slot_nodes) != 2:
4855 # Suggestions are only implemented for
4856 # conflicts between two packages.
4859 all_conflict_atoms = self._slot_conflict_parent_atoms
4861 matched_atoms = None
4862 unmatched_node = None
4863 for node in slot_nodes:
4864 parent_atoms = self._parent_atoms.get(node)
4865 if not parent_atoms:
4866 # Normally, there are always parent atoms. If there are
4867 # none then something unexpected is happening and there's
4868 # currently no suggestion for this case.
4870 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4871 for parent_atom in conflict_atoms:
4872 parent, atom = parent_atom
4874 # Suggestions are currently only implemented for cases
4875 # in which all conflict atoms have USE deps.
4878 if matched_node is not None:
4879 # If conflict atoms match multiple nodes
4880 # then there's no suggestion.
4883 matched_atoms = conflict_atoms
4885 if unmatched_node is not None:
4886 # Neither node is matched by conflict atoms, and
4887 # there is no suggestion for this case.
4889 unmatched_node = node
4891 if matched_node is None or unmatched_node is None:
4892 # This shouldn't happen.
4895 if unmatched_node.installed and not matched_node.installed and \
4896 unmatched_node.cpv == matched_node.cpv:
4897 # If the conflicting packages are the same version then
4898 # --newuse should be all that's needed. If they are different
4899 # versions then there's some other problem.
4900 return "New USE are correctly set, but --newuse wasn't" + \
4901 " requested, so an installed package with incorrect USE " + \
4902 "happened to get pulled into the dependency graph. " + \
4903 "In order to solve " + \
4904 "this, either specify the --newuse option or explicitly " + \
4905 " reinstall '%s'." % matched_node.slot_atom
4907 if matched_node.installed and not unmatched_node.installed:
4908 atoms = sorted(set(atom for parent, atom in matched_atoms))
4909 explanation = ("New USE for '%s' are incorrectly set. " + \
4910 "In order to solve this, adjust USE to satisfy '%s'") % \
4911 (matched_node.slot_atom, atoms[0])
4913 for atom in atoms[1:-1]:
4914 explanation += ", '%s'" % (atom,)
4917 explanation += " and '%s'" % (atoms[-1],)
4923 def _process_slot_conflicts(self):
4925 Process slot conflict data to identify specific atoms which
4926 lead to conflict. These atoms only match a subset of the
4927 packages that have been pulled into a given slot.
4929 for (slot_atom, root), slot_nodes \
4930 in self._slot_collision_info.iteritems():
4932 all_parent_atoms = set()
4933 for pkg in slot_nodes:
4934 parent_atoms = self._parent_atoms.get(pkg)
4935 if not parent_atoms:
4937 all_parent_atoms.update(parent_atoms)
4939 for pkg in slot_nodes:
4940 parent_atoms = self._parent_atoms.get(pkg)
4941 if parent_atoms is None:
4942 parent_atoms = set()
4943 self._parent_atoms[pkg] = parent_atoms
4944 for parent_atom in all_parent_atoms:
4945 if parent_atom in parent_atoms:
4947 # Use package set for matching since it will match via
4948 # PROVIDE when necessary, while match_from_list does not.
4949 parent, atom = parent_atom
4950 atom_set = InternalPackageSet(
4951 initial_atoms=(atom,))
4952 if atom_set.findAtomForPackage(pkg):
4953 parent_atoms.add(parent_atom)
4955 self._slot_conflict_parent_atoms.add(parent_atom)
4957 def _reinstall_for_flags(self, forced_flags,
4958 orig_use, orig_iuse, cur_use, cur_iuse):
4959 """Return a set of flags that trigger reinstallation, or None if there
4960 are no such flags."""
4961 if "--newuse" in self.myopts:
4962 flags = set(orig_iuse.symmetric_difference(
4963 cur_iuse).difference(forced_flags))
4964 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4965 cur_iuse.intersection(cur_use)))
4968 elif "changed-use" == self.myopts.get("--reinstall"):
4969 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4970 cur_iuse.intersection(cur_use))
4975 def _create_graph(self, allow_unsatisfied=False):
4976 dep_stack = self._dep_stack
4978 self.spinner.update()
4979 dep = dep_stack.pop()
4980 if isinstance(dep, Package):
4981 if not self._add_pkg_deps(dep,
4982 allow_unsatisfied=allow_unsatisfied):
4985 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4989 def _add_dep(self, dep, allow_unsatisfied=False):
4990 debug = "--debug" in self.myopts
4991 buildpkgonly = "--buildpkgonly" in self.myopts
4992 nodeps = "--nodeps" in self.myopts
4993 empty = "empty" in self.myparams
4994 deep = "deep" in self.myparams
4995 update = "--update" in self.myopts and dep.depth <= 1
4997 if not buildpkgonly and \
4999 dep.parent not in self._slot_collision_nodes:
5000 if dep.parent.onlydeps:
5001 # It's safe to ignore blockers if the
5002 # parent is an --onlydeps node.
5004 # The blocker applies to the root where
5005 # the parent is or will be installed.
5006 blocker = Blocker(atom=dep.atom,
5007 eapi=dep.parent.metadata["EAPI"],
5008 root=dep.parent.root)
5009 self._blocker_parents.add(blocker, dep.parent)
5011 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5012 onlydeps=dep.onlydeps)
5014 if dep.priority.optional:
5015 # This could be an unecessary build-time dep
5016 # pulled in by --with-bdeps=y.
5018 if allow_unsatisfied:
5019 self._unsatisfied_deps.append(dep)
5021 self._unsatisfied_deps_for_display.append(
5022 ((dep.root, dep.atom), {"myparent":dep.parent}))
5024 # In some cases, dep_check will return deps that shouldn't
5025 # be proccessed any further, so they are identified and
5026 # discarded here. Try to discard as few as possible since
5027 # discarded dependencies reduce the amount of information
5028 # available for optimization of merge order.
5029 if dep.priority.satisfied and \
5030 not dep_pkg.installed and \
5031 not (existing_node or empty or deep or update):
5033 if dep.root == self.target_root:
5035 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5036 except StopIteration:
5038 except portage.exception.InvalidDependString:
5039 if not dep_pkg.installed:
5040 # This shouldn't happen since the package
5041 # should have been masked.
5044 self._ignored_deps.append(dep)
5047 if not self._add_pkg(dep_pkg, dep):
5051 def _add_pkg(self, pkg, dep):
5058 myparent = dep.parent
5059 priority = dep.priority
5061 if priority is None:
5062 priority = DepPriority()
5064 Fills the digraph with nodes comprised of packages to merge.
5065 mybigkey is the package spec of the package to merge.
5066 myparent is the package depending on mybigkey ( or None )
5067 addme = Should we add this package to the digraph or are we just looking at it's deps?
5068 Think --onlydeps, we need to ignore packages in that case.
5071 #IUSE-aware emerge -> USE DEP aware depgraph
5072 #"no downgrade" emerge
5074 # Ensure that the dependencies of the same package
5075 # are never processed more than once.
5076 previously_added = pkg in self.digraph
5078 # select the correct /var database that we'll be checking against
5079 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5080 pkgsettings = self.pkgsettings[pkg.root]
5085 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5086 except portage.exception.InvalidDependString, e:
5087 if not pkg.installed:
5088 show_invalid_depstring_notice(
5089 pkg, pkg.metadata["PROVIDE"], str(e))
5093 if not pkg.onlydeps:
5094 if not pkg.installed and \
5095 "empty" not in self.myparams and \
5096 vardbapi.match(pkg.slot_atom):
5097 # Increase the priority of dependencies on packages that
5098 # are being rebuilt. This optimizes merge order so that
5099 # dependencies are rebuilt/updated as soon as possible,
5100 # which is needed especially when emerge is called by
5101 # revdep-rebuild since dependencies may be affected by ABI
5102 # breakage that has rendered them useless. Don't adjust
5103 # priority here when in "empty" mode since all packages
5104 # are being merged in that case.
5105 priority.rebuild = True
5107 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5108 slot_collision = False
5110 existing_node_matches = pkg.cpv == existing_node.cpv
5111 if existing_node_matches and \
5112 pkg != existing_node and \
5113 dep.atom is not None:
5114 # Use package set for matching since it will match via
5115 # PROVIDE when necessary, while match_from_list does not.
5116 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5117 if not atom_set.findAtomForPackage(existing_node):
5118 existing_node_matches = False
5119 if existing_node_matches:
5120 # The existing node can be reused.
5122 for parent_atom in arg_atoms:
5123 parent, atom = parent_atom
5124 self.digraph.add(existing_node, parent,
5126 self._add_parent_atom(existing_node, parent_atom)
5127 # If a direct circular dependency is not an unsatisfied
5128 # buildtime dependency then drop it here since otherwise
5129 # it can skew the merge order calculation in an unwanted
5131 if existing_node != myparent or \
5132 (priority.buildtime and not priority.satisfied):
5133 self.digraph.addnode(existing_node, myparent,
5135 if dep.atom is not None and dep.parent is not None:
5136 self._add_parent_atom(existing_node,
5137 (dep.parent, dep.atom))
5141 # A slot collision has occurred. Sometimes this coincides
5142 # with unresolvable blockers, so the slot collision will be
5143 # shown later if there are no unresolvable blockers.
5144 self._add_slot_conflict(pkg)
5145 slot_collision = True
5148 # Now add this node to the graph so that self.display()
5149 # can show use flags and --tree portage.output. This node is
5150 # only being partially added to the graph. It must not be
5151 # allowed to interfere with the other nodes that have been
5152 # added. Do not overwrite data for existing nodes in
5153 # self.mydbapi since that data will be used for blocker
5155 # Even though the graph is now invalid, continue to process
5156 # dependencies so that things like --fetchonly can still
5157 # function despite collisions.
5159 elif not previously_added:
5160 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5161 self.mydbapi[pkg.root].cpv_inject(pkg)
5162 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5164 if not pkg.installed:
5165 # Allow this package to satisfy old-style virtuals in case it
5166 # doesn't already. Any pre-existing providers will be preferred
5169 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5170 # For consistency, also update the global virtuals.
5171 settings = self.roots[pkg.root].settings
5173 settings.setinst(pkg.cpv, pkg.metadata)
5175 except portage.exception.InvalidDependString, e:
5176 show_invalid_depstring_notice(
5177 pkg, pkg.metadata["PROVIDE"], str(e))
5182 self._set_nodes.add(pkg)
5184 # Do this even when addme is False (--onlydeps) so that the
5185 # parent/child relationship is always known in case
5186 # self._show_slot_collision_notice() needs to be called later.
5187 self.digraph.add(pkg, myparent, priority=priority)
5188 if dep.atom is not None and dep.parent is not None:
5189 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5192 for parent_atom in arg_atoms:
5193 parent, atom = parent_atom
5194 self.digraph.add(pkg, parent, priority=priority)
5195 self._add_parent_atom(pkg, parent_atom)
5197 """ This section determines whether we go deeper into dependencies or not.
5198 We want to go deeper on a few occasions:
5199 Installing package A, we need to make sure package A's deps are met.
5200 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5201 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5203 dep_stack = self._dep_stack
5204 if "recurse" not in self.myparams:
5206 elif pkg.installed and \
5207 "deep" not in self.myparams:
5208 dep_stack = self._ignored_deps
5210 self.spinner.update()
5215 if not previously_added:
5216 dep_stack.append(pkg)
5219 def _add_parent_atom(self, pkg, parent_atom):
5220 parent_atoms = self._parent_atoms.get(pkg)
5221 if parent_atoms is None:
5222 parent_atoms = set()
5223 self._parent_atoms[pkg] = parent_atoms
5224 parent_atoms.add(parent_atom)
5226 def _add_slot_conflict(self, pkg):
5227 self._slot_collision_nodes.add(pkg)
5228 slot_key = (pkg.slot_atom, pkg.root)
5229 slot_nodes = self._slot_collision_info.get(slot_key)
5230 if slot_nodes is None:
5232 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5233 self._slot_collision_info[slot_key] = slot_nodes
5236 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5238 mytype = pkg.type_name
5241 metadata = pkg.metadata
5242 myuse = pkg.use.enabled
5244 depth = pkg.depth + 1
5245 removal_action = "remove" in self.myparams
5248 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5250 edepend[k] = metadata[k]
5252 if not pkg.built and \
5253 "--buildpkgonly" in self.myopts and \
5254 "deep" not in self.myparams and \
5255 "empty" not in self.myparams:
5256 edepend["RDEPEND"] = ""
5257 edepend["PDEPEND"] = ""
5258 bdeps_optional = False
5260 if pkg.built and not removal_action:
5261 if self.myopts.get("--with-bdeps", "n") == "y":
5262 # Pull in build time deps as requested, but marked them as
5263 # "optional" since they are not strictly required. This allows
5264 # more freedom in the merge order calculation for solving
5265 # circular dependencies. Don't convert to PDEPEND since that
5266 # could make --with-bdeps=y less effective if it is used to
5267 # adjust merge order to prevent built_with_use() calls from
5269 bdeps_optional = True
5271 # built packages do not have build time dependencies.
5272 edepend["DEPEND"] = ""
5274 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5275 edepend["DEPEND"] = ""
5278 root_deps = self.myopts.get("--root-deps")
5279 if root_deps is not None:
5280 if root_deps is True:
5282 elif root_deps == "rdeps":
5283 edepend["DEPEND"] = ""
5286 (bdeps_root, edepend["DEPEND"],
5287 self._priority(buildtime=(not bdeps_optional),
5288 optional=bdeps_optional)),
5289 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5290 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5293 debug = "--debug" in self.myopts
5294 strict = mytype != "installed"
5296 for dep_root, dep_string, dep_priority in deps:
5301 print "Parent: ", jbigkey
5302 print "Depstring:", dep_string
5303 print "Priority:", dep_priority
5304 vardb = self.roots[dep_root].trees["vartree"].dbapi
5306 selected_atoms = self._select_atoms(dep_root,
5307 dep_string, myuse=myuse, parent=pkg, strict=strict,
5308 priority=dep_priority)
5309 except portage.exception.InvalidDependString, e:
5310 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5313 print "Candidates:", selected_atoms
5315 for atom in selected_atoms:
5318 atom = portage.dep.Atom(atom)
5320 mypriority = dep_priority.copy()
5321 if not atom.blocker and vardb.match(atom):
5322 mypriority.satisfied = True
5324 if not self._add_dep(Dependency(atom=atom,
5325 blocker=atom.blocker, depth=depth, parent=pkg,
5326 priority=mypriority, root=dep_root),
5327 allow_unsatisfied=allow_unsatisfied):
5330 except portage.exception.InvalidAtom, e:
5331 show_invalid_depstring_notice(
5332 pkg, dep_string, str(e))
5334 if not pkg.installed:
5338 print "Exiting...", jbigkey
5339 except portage.exception.AmbiguousPackageName, e:
5341 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5342 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5344 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5345 portage.writemsg("\n", noiselevel=-1)
5346 if mytype == "binary":
5348 "!!! This binary package cannot be installed: '%s'\n" % \
5349 mykey, noiselevel=-1)
5350 elif mytype == "ebuild":
5351 portdb = self.roots[myroot].trees["porttree"].dbapi
5352 myebuild, mylocation = portdb.findname2(mykey)
5353 portage.writemsg("!!! This ebuild cannot be installed: " + \
5354 "'%s'\n" % myebuild, noiselevel=-1)
5355 portage.writemsg("!!! Please notify the package maintainer " + \
5356 "that atoms must be fully-qualified.\n", noiselevel=-1)
5360 def _priority(self, **kwargs):
5361 if "remove" in self.myparams:
5362 priority_constructor = UnmergeDepPriority
5364 priority_constructor = DepPriority
5365 return priority_constructor(**kwargs)
5367 def _dep_expand(self, root_config, atom_without_category):
5369 @param root_config: a root config instance
5370 @type root_config: RootConfig
5371 @param atom_without_category: an atom without a category component
5372 @type atom_without_category: String
5374 @returns: a list of atoms containing categories (possibly empty)
5376 null_cp = portage.dep_getkey(insert_category_into_atom(
5377 atom_without_category, "null"))
5378 cat, atom_pn = portage.catsplit(null_cp)
5380 dbs = self._filtered_trees[root_config.root]["dbs"]
5382 for db, pkg_type, built, installed, db_keys in dbs:
5383 for cat in db.categories:
5384 if db.cp_list("%s/%s" % (cat, atom_pn)):
5388 for cat in categories:
5389 deps.append(insert_category_into_atom(
5390 atom_without_category, cat))
5393 def _have_new_virt(self, root, atom_cp):
5395 for db, pkg_type, built, installed, db_keys in \
5396 self._filtered_trees[root]["dbs"]:
5397 if db.cp_list(atom_cp):
5402 def _iter_atoms_for_pkg(self, pkg):
5403 # TODO: add multiple $ROOT support
5404 if pkg.root != self.target_root:
5406 atom_arg_map = self._atom_arg_map
5407 root_config = self.roots[pkg.root]
5408 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5409 atom_cp = portage.dep_getkey(atom)
5410 if atom_cp != pkg.cp and \
5411 self._have_new_virt(pkg.root, atom_cp):
5413 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5414 visible_pkgs.reverse() # descending order
5416 for visible_pkg in visible_pkgs:
5417 if visible_pkg.cp != atom_cp:
5419 if pkg >= visible_pkg:
5420 # This is descending order, and we're not
5421 # interested in any versions <= pkg given.
5423 if pkg.slot_atom != visible_pkg.slot_atom:
5424 higher_slot = visible_pkg
5426 if higher_slot is not None:
5428 for arg in atom_arg_map[(atom, pkg.root)]:
5429 if isinstance(arg, PackageArg) and \
5434 def select_files(self, myfiles):
5435 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5436 appropriate depgraph and return a favorite list."""
5437 debug = "--debug" in self.myopts
5438 root_config = self.roots[self.target_root]
5439 sets = root_config.sets
5440 getSetAtoms = root_config.setconfig.getSetAtoms
5442 myroot = self.target_root
5443 dbs = self._filtered_trees[myroot]["dbs"]
5444 vardb = self.trees[myroot]["vartree"].dbapi
5445 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5446 portdb = self.trees[myroot]["porttree"].dbapi
5447 bindb = self.trees[myroot]["bintree"].dbapi
5448 pkgsettings = self.pkgsettings[myroot]
5450 onlydeps = "--onlydeps" in self.myopts
5453 ext = os.path.splitext(x)[1]
5455 if not os.path.exists(x):
5457 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5458 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5459 elif os.path.exists(
5460 os.path.join(pkgsettings["PKGDIR"], x)):
5461 x = os.path.join(pkgsettings["PKGDIR"], x)
5463 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5464 print "!!! Please ensure the tbz2 exists as specified.\n"
5465 return 0, myfavorites
5466 mytbz2=portage.xpak.tbz2(x)
5467 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5468 if os.path.realpath(x) != \
5469 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5470 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5471 return 0, myfavorites
5472 db_keys = list(bindb._aux_cache_keys)
5473 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5474 pkg = Package(type_name="binary", root_config=root_config,
5475 cpv=mykey, built=True, metadata=metadata,
5477 self._pkg_cache[pkg] = pkg
5478 args.append(PackageArg(arg=x, package=pkg,
5479 root_config=root_config))
5480 elif ext==".ebuild":
5481 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5482 pkgdir = os.path.dirname(ebuild_path)
5483 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5484 cp = pkgdir[len(tree_root)+1:]
5485 e = portage.exception.PackageNotFound(
5486 ("%s is not in a valid portage tree " + \
5487 "hierarchy or does not exist") % x)
5488 if not portage.isvalidatom(cp):
5490 cat = portage.catsplit(cp)[0]
5491 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5492 if not portage.isvalidatom("="+mykey):
5494 ebuild_path = portdb.findname(mykey)
5496 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5497 cp, os.path.basename(ebuild_path)):
5498 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5499 return 0, myfavorites
5500 if mykey not in portdb.xmatch(
5501 "match-visible", portage.dep_getkey(mykey)):
5502 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5503 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5504 print colorize("BAD", "*** page for details.")
5505 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5508 raise portage.exception.PackageNotFound(
5509 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5510 db_keys = list(portdb._aux_cache_keys)
5511 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5512 pkg = Package(type_name="ebuild", root_config=root_config,
5513 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5514 pkgsettings.setcpv(pkg)
5515 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5516 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5517 self._pkg_cache[pkg] = pkg
5518 args.append(PackageArg(arg=x, package=pkg,
5519 root_config=root_config))
5520 elif x.startswith(os.path.sep):
5521 if not x.startswith(myroot):
5522 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5523 " $ROOT.\n") % x, noiselevel=-1)
5525 # Queue these up since it's most efficient to handle
5526 # multiple files in a single iter_owners() call.
5527 lookup_owners.append(x)
5529 if x in ("system", "world"):
5531 if x.startswith(SETPREFIX):
5532 s = x[len(SETPREFIX):]
5534 raise portage.exception.PackageSetNotFound(s)
5537 # Recursively expand sets so that containment tests in
5538 # self._get_parent_sets() properly match atoms in nested
5539 # sets (like if world contains system).
5540 expanded_set = InternalPackageSet(
5541 initial_atoms=getSetAtoms(s))
5542 self._sets[s] = expanded_set
5543 args.append(SetArg(arg=x, set=expanded_set,
5544 root_config=root_config))
5546 if not is_valid_package_atom(x):
5547 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5549 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5550 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5552 # Don't expand categories or old-style virtuals here unless
5553 # necessary. Expansion of old-style virtuals here causes at
5554 # least the following problems:
5555 # 1) It's more difficult to determine which set(s) an atom
5556 # came from, if any.
5557 # 2) It takes away freedom from the resolver to choose other
5558 # possible expansions when necessary.
5560 args.append(AtomArg(arg=x, atom=x,
5561 root_config=root_config))
5563 expanded_atoms = self._dep_expand(root_config, x)
5564 installed_cp_set = set()
5565 for atom in expanded_atoms:
5566 atom_cp = portage.dep_getkey(atom)
5567 if vardb.cp_list(atom_cp):
5568 installed_cp_set.add(atom_cp)
5569 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5570 installed_cp = iter(installed_cp_set).next()
5571 expanded_atoms = [atom for atom in expanded_atoms \
5572 if portage.dep_getkey(atom) == installed_cp]
5574 if len(expanded_atoms) > 1:
5577 ambiguous_package_name(x, expanded_atoms, root_config,
5578 self.spinner, self.myopts)
5579 return False, myfavorites
5581 atom = expanded_atoms[0]
5583 null_atom = insert_category_into_atom(x, "null")
5584 null_cp = portage.dep_getkey(null_atom)
5585 cat, atom_pn = portage.catsplit(null_cp)
5586 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5588 # Allow the depgraph to choose which virtual.
5589 atom = insert_category_into_atom(x, "virtual")
5591 atom = insert_category_into_atom(x, "null")
5593 args.append(AtomArg(arg=x, atom=atom,
5594 root_config=root_config))
5598 search_for_multiple = False
5599 if len(lookup_owners) > 1:
5600 search_for_multiple = True
5602 for x in lookup_owners:
5603 if not search_for_multiple and os.path.isdir(x):
5604 search_for_multiple = True
5605 relative_paths.append(x[len(myroot):])
5608 for pkg, relative_path in \
5609 real_vardb._owners.iter_owners(relative_paths):
5610 owners.add(pkg.mycpv)
5611 if not search_for_multiple:
5615 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5616 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5620 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5622 # portage now masks packages with missing slot, but it's
5623 # possible that one was installed by an older version
5624 atom = portage.cpv_getkey(cpv)
5626 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5627 args.append(AtomArg(arg=atom, atom=atom,
5628 root_config=root_config))
5630 if "--update" in self.myopts:
5631 # In some cases, the greedy slots behavior can pull in a slot that
5632 # the user would want to uninstall due to it being blocked by a
5633 # newer version in a different slot. Therefore, it's necessary to
5634 # detect and discard any that should be uninstalled. Each time
5635 # that arguments are updated, package selections are repeated in
5636 # order to ensure consistency with the current arguments:
5638 # 1) Initialize args
5639 # 2) Select packages and generate initial greedy atoms
5640 # 3) Update args with greedy atoms
5641 # 4) Select packages and generate greedy atoms again, while
5642 # accounting for any blockers between selected packages
5643 # 5) Update args with revised greedy atoms
5645 self._set_args(args)
5648 greedy_args.append(arg)
5649 if not isinstance(arg, AtomArg):
5651 for atom in self._greedy_slots(arg.root_config, arg.atom):
5653 AtomArg(arg=arg.arg, atom=atom,
5654 root_config=arg.root_config))
5656 self._set_args(greedy_args)
5659 # Revise greedy atoms, accounting for any blockers
5660 # between selected packages.
5661 revised_greedy_args = []
5663 revised_greedy_args.append(arg)
5664 if not isinstance(arg, AtomArg):
5666 for atom in self._greedy_slots(arg.root_config, arg.atom,
5667 blocker_lookahead=True):
5668 revised_greedy_args.append(
5669 AtomArg(arg=arg.arg, atom=atom,
5670 root_config=arg.root_config))
5671 args = revised_greedy_args
5672 del revised_greedy_args
5674 self._set_args(args)
5676 myfavorites = set(myfavorites)
5678 if isinstance(arg, (AtomArg, PackageArg)):
5679 myfavorites.add(arg.atom)
5680 elif isinstance(arg, SetArg):
5681 myfavorites.add(arg.arg)
5682 myfavorites = list(myfavorites)
5684 pprovideddict = pkgsettings.pprovideddict
5686 portage.writemsg("\n", noiselevel=-1)
5687 # Order needs to be preserved since a feature of --nodeps
5688 # is to allow the user to force a specific merge order.
5692 for atom in arg.set:
5693 self.spinner.update()
5694 dep = Dependency(atom=atom, onlydeps=onlydeps,
5695 root=myroot, parent=arg)
5696 atom_cp = portage.dep_getkey(atom)
5698 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5699 if pprovided and portage.match_from_list(atom, pprovided):
5700 # A provided package has been specified on the command line.
5701 self._pprovided_args.append((arg, atom))
5703 if isinstance(arg, PackageArg):
5704 if not self._add_pkg(arg.package, dep) or \
5705 not self._create_graph():
5706 sys.stderr.write(("\n\n!!! Problem resolving " + \
5707 "dependencies for %s\n") % arg.arg)
5708 return 0, myfavorites
5711 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5712 (arg, atom), noiselevel=-1)
5713 pkg, existing_node = self._select_package(
5714 myroot, atom, onlydeps=onlydeps)
5716 if not (isinstance(arg, SetArg) and \
5717 arg.name in ("system", "world")):
5718 self._unsatisfied_deps_for_display.append(
5719 ((myroot, atom), {}))
5720 return 0, myfavorites
5721 self._missing_args.append((arg, atom))
5723 if atom_cp != pkg.cp:
5724 # For old-style virtuals, we need to repeat the
5725 # package.provided check against the selected package.
5726 expanded_atom = atom.replace(atom_cp, pkg.cp)
5727 pprovided = pprovideddict.get(pkg.cp)
5729 portage.match_from_list(expanded_atom, pprovided):
5730 # A provided package has been
5731 # specified on the command line.
5732 self._pprovided_args.append((arg, atom))
5734 if pkg.installed and "selective" not in self.myparams:
5735 self._unsatisfied_deps_for_display.append(
5736 ((myroot, atom), {}))
5737 # Previous behavior was to bail out in this case, but
5738 # since the dep is satisfied by the installed package,
5739 # it's more friendly to continue building the graph
5740 # and just show a warning message. Therefore, only bail
5741 # out here if the atom is not from either the system or
5743 if not (isinstance(arg, SetArg) and \
5744 arg.name in ("system", "world")):
5745 return 0, myfavorites
5747 # Add the selected package to the graph as soon as possible
5748 # so that later dep_check() calls can use it as feedback
5749 # for making more consistent atom selections.
5750 if not self._add_pkg(pkg, dep):
5751 if isinstance(arg, SetArg):
5752 sys.stderr.write(("\n\n!!! Problem resolving " + \
5753 "dependencies for %s from %s\n") % \
5756 sys.stderr.write(("\n\n!!! Problem resolving " + \
5757 "dependencies for %s\n") % atom)
5758 return 0, myfavorites
5760 except portage.exception.MissingSignature, e:
5761 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5762 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5763 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5764 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5765 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5766 return 0, myfavorites
5767 except portage.exception.InvalidSignature, e:
5768 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5769 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5770 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5771 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5772 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5773 return 0, myfavorites
5774 except SystemExit, e:
5775 raise # Needed else can't exit
5776 except Exception, e:
5777 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5778 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5781 # Now that the root packages have been added to the graph,
5782 # process the dependencies.
5783 if not self._create_graph():
5784 return 0, myfavorites
5787 if "--usepkgonly" in self.myopts:
5788 for xs in self.digraph.all_nodes():
5789 if not isinstance(xs, Package):
5791 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5795 print "Missing binary for:",xs[2]
5799 except self._unknown_internal_error:
5800 return False, myfavorites
5802 # We're true here unless we are missing binaries.
5803 return (not missing,myfavorites)
5805 def _set_args(self, args):
5807 Create the "args" package set from atoms and packages given as
5808 arguments. This method can be called multiple times if necessary.
5809 The package selection cache is automatically invalidated, since
5810 arguments influence package selections.
5812 args_set = self._sets["args"]
5815 if not isinstance(arg, (AtomArg, PackageArg)):
5818 if atom in args_set:
5822 self._set_atoms.clear()
5823 self._set_atoms.update(chain(*self._sets.itervalues()))
5824 atom_arg_map = self._atom_arg_map
5825 atom_arg_map.clear()
5827 for atom in arg.set:
5828 atom_key = (atom, arg.root_config.root)
5829 refs = atom_arg_map.get(atom_key)
5832 atom_arg_map[atom_key] = refs
5836 # Invalidate the package selection cache, since
5837 # arguments influence package selections.
5838 self._highest_pkg_cache.clear()
5839 for trees in self._filtered_trees.itervalues():
5840 trees["porttree"].dbapi._clear_cache()
5842 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5844 Return a list of slot atoms corresponding to installed slots that
5845 differ from the slot of the highest visible match. When
5846 blocker_lookahead is True, slot atoms that would trigger a blocker
5847 conflict are automatically discarded, potentially allowing automatic
5848 uninstallation of older slots when appropriate.
5850 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5851 if highest_pkg is None:
5853 vardb = root_config.trees["vartree"].dbapi
5855 for cpv in vardb.match(atom):
5856 # don't mix new virtuals with old virtuals
5857 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5858 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5860 slots.add(highest_pkg.metadata["SLOT"])
5864 slots.remove(highest_pkg.metadata["SLOT"])
5867 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5868 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5869 if pkg is not None and \
5870 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5871 greedy_pkgs.append(pkg)
5874 if not blocker_lookahead:
5875 return [pkg.slot_atom for pkg in greedy_pkgs]
5878 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5879 for pkg in greedy_pkgs + [highest_pkg]:
5880 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5882 atoms = self._select_atoms(
5883 pkg.root, dep_str, pkg.use.enabled,
5884 parent=pkg, strict=True)
5885 except portage.exception.InvalidDependString:
5887 blocker_atoms = (x for x in atoms if x.blocker)
5888 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5890 if highest_pkg not in blockers:
5893 # filter packages with invalid deps
5894 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5896 # filter packages that conflict with highest_pkg
5897 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5898 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5899 blockers[pkg].findAtomForPackage(highest_pkg))]
5904 # If two packages conflict, discard the lower version.
5905 discard_pkgs = set()
5906 greedy_pkgs.sort(reverse=True)
5907 for i in xrange(len(greedy_pkgs) - 1):
5908 pkg1 = greedy_pkgs[i]
5909 if pkg1 in discard_pkgs:
5911 for j in xrange(i + 1, len(greedy_pkgs)):
5912 pkg2 = greedy_pkgs[j]
5913 if pkg2 in discard_pkgs:
5915 if blockers[pkg1].findAtomForPackage(pkg2) or \
5916 blockers[pkg2].findAtomForPackage(pkg1):
5918 discard_pkgs.add(pkg2)
5920 return [pkg.slot_atom for pkg in greedy_pkgs \
5921 if pkg not in discard_pkgs]
5923 def _select_atoms_from_graph(self, *pargs, **kwargs):
5925 Prefer atoms matching packages that have already been
5926 added to the graph or those that are installed and have
5927 not been scheduled for replacement.
5929 kwargs["trees"] = self._graph_trees
5930 return self._select_atoms_highest_available(*pargs, **kwargs)
5932 def _select_atoms_highest_available(self, root, depstring,
5933 myuse=None, parent=None, strict=True, trees=None, priority=None):
5934 """This will raise InvalidDependString if necessary. If trees is
5935 None then self._filtered_trees is used."""
5936 pkgsettings = self.pkgsettings[root]
5938 trees = self._filtered_trees
5939 if not getattr(priority, "buildtime", False):
5940 # The parent should only be passed to dep_check() for buildtime
5941 # dependencies since that's the only case when it's appropriate
5942 # to trigger the circular dependency avoidance code which uses it.
5943 # It's important not to trigger the same circular dependency
5944 # avoidance code for runtime dependencies since it's not needed
5945 # and it can promote an incorrect package choice.
5949 if parent is not None:
5950 trees[root]["parent"] = parent
5952 portage.dep._dep_check_strict = False
5953 mycheck = portage.dep_check(depstring, None,
5954 pkgsettings, myuse=myuse,
5955 myroot=root, trees=trees)
5957 if parent is not None:
5958 trees[root].pop("parent")
5959 portage.dep._dep_check_strict = True
5961 raise portage.exception.InvalidDependString(mycheck[1])
5962 selected_atoms = mycheck[1]
5963 return selected_atoms
5965 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5966 atom = portage.dep.Atom(atom)
5967 atom_set = InternalPackageSet(initial_atoms=(atom,))
5968 atom_without_use = atom
5970 atom_without_use = portage.dep.remove_slot(atom)
5972 atom_without_use += ":" + atom.slot
5973 atom_without_use = portage.dep.Atom(atom_without_use)
5974 xinfo = '"%s"' % atom
5977 # Discard null/ from failed cpv_expand category expansion.
5978 xinfo = xinfo.replace("null/", "")
5979 masked_packages = []
5981 masked_pkg_instances = set()
5982 missing_licenses = []
5983 have_eapi_mask = False
5984 pkgsettings = self.pkgsettings[root]
5985 implicit_iuse = pkgsettings._get_implicit_iuse()
5986 root_config = self.roots[root]
5987 portdb = self.roots[root].trees["porttree"].dbapi
5988 dbs = self._filtered_trees[root]["dbs"]
5989 for db, pkg_type, built, installed, db_keys in dbs:
5993 if hasattr(db, "xmatch"):
5994 cpv_list = db.xmatch("match-all", atom_without_use)
5996 cpv_list = db.match(atom_without_use)
5999 for cpv in cpv_list:
6000 metadata, mreasons = get_mask_info(root_config, cpv,
6001 pkgsettings, db, pkg_type, built, installed, db_keys)
6002 if metadata is not None:
6003 pkg = Package(built=built, cpv=cpv,
6004 installed=installed, metadata=metadata,
6005 root_config=root_config)
6006 if pkg.cp != atom.cp:
6007 # A cpv can be returned from dbapi.match() as an
6008 # old-style virtual match even in cases when the
6009 # package does not actually PROVIDE the virtual.
6010 # Filter out any such false matches here.
6011 if not atom_set.findAtomForPackage(pkg):
6014 masked_pkg_instances.add(pkg)
6016 missing_use.append(pkg)
6019 masked_packages.append(
6020 (root_config, pkgsettings, cpv, metadata, mreasons))
6022 missing_use_reasons = []
6023 missing_iuse_reasons = []
6024 for pkg in missing_use:
6025 use = pkg.use.enabled
6026 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6027 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6029 for x in atom.use.required:
6030 if iuse_re.match(x) is None:
6031 missing_iuse.append(x)
6034 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6035 missing_iuse_reasons.append((pkg, mreasons))
6037 need_enable = sorted(atom.use.enabled.difference(use))
6038 need_disable = sorted(atom.use.disabled.intersection(use))
6039 if need_enable or need_disable:
6041 changes.extend(colorize("red", "+" + x) \
6042 for x in need_enable)
6043 changes.extend(colorize("blue", "-" + x) \
6044 for x in need_disable)
6045 mreasons.append("Change USE: %s" % " ".join(changes))
6046 missing_use_reasons.append((pkg, mreasons))
6048 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6049 in missing_use_reasons if pkg not in masked_pkg_instances]
6051 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6052 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6054 show_missing_use = False
6055 if unmasked_use_reasons:
6056 # Only show the latest version.
6057 show_missing_use = unmasked_use_reasons[:1]
6058 elif unmasked_iuse_reasons:
6059 if missing_use_reasons:
6060 # All packages with required IUSE are masked,
6061 # so display a normal masking message.
6064 show_missing_use = unmasked_iuse_reasons
6066 if show_missing_use:
6067 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6068 print "!!! One of the following packages is required to complete your request:"
6069 for pkg, mreasons in show_missing_use:
6070 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6072 elif masked_packages:
6074 colorize("BAD", "All ebuilds that could satisfy ") + \
6075 colorize("INFORM", xinfo) + \
6076 colorize("BAD", " have been masked.")
6077 print "!!! One of the following masked packages is required to complete your request:"
6078 have_eapi_mask = show_masked_packages(masked_packages)
6081 msg = ("The current version of portage supports " + \
6082 "EAPI '%s'. You must upgrade to a newer version" + \
6083 " of portage before EAPI masked packages can" + \
6084 " be installed.") % portage.const.EAPI
6085 from textwrap import wrap
6086 for line in wrap(msg, 75):
6091 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6093 # Show parent nodes and the argument that pulled them in.
6094 traversed_nodes = set()
6097 while node is not None:
6098 traversed_nodes.add(node)
6099 msg.append('(dependency required by "%s" [%s])' % \
6100 (colorize('INFORM', str(node.cpv)), node.type_name))
6101 # When traversing to parents, prefer arguments over packages
6102 # since arguments are root nodes. Never traverse the same
6103 # package twice, in order to prevent an infinite loop.
6104 selected_parent = None
6105 for parent in self.digraph.parent_nodes(node):
6106 if isinstance(parent, DependencyArg):
6107 msg.append('(dependency required by "%s" [argument])' % \
6108 (colorize('INFORM', str(parent))))
6109 selected_parent = None
6111 if parent not in traversed_nodes:
6112 selected_parent = parent
6113 node = selected_parent
6119 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6120 cache_key = (root, atom, onlydeps)
6121 ret = self._highest_pkg_cache.get(cache_key)
6124 if pkg and not existing:
6125 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6126 if existing and existing == pkg:
6127 # Update the cache to reflect that the
6128 # package has been added to the graph.
6130 self._highest_pkg_cache[cache_key] = ret
6132 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6133 self._highest_pkg_cache[cache_key] = ret
6136 settings = pkg.root_config.settings
6137 if visible(settings, pkg) and not (pkg.installed and \
6138 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6139 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6142 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6143 root_config = self.roots[root]
6144 pkgsettings = self.pkgsettings[root]
6145 dbs = self._filtered_trees[root]["dbs"]
6146 vardb = self.roots[root].trees["vartree"].dbapi
6147 portdb = self.roots[root].trees["porttree"].dbapi
6148 # List of acceptable packages, ordered by type preference.
6149 matched_packages = []
6150 highest_version = None
6151 if not isinstance(atom, portage.dep.Atom):
6152 atom = portage.dep.Atom(atom)
6154 atom_set = InternalPackageSet(initial_atoms=(atom,))
6155 existing_node = None
6157 usepkgonly = "--usepkgonly" in self.myopts
6158 empty = "empty" in self.myparams
6159 selective = "selective" in self.myparams
6161 noreplace = "--noreplace" in self.myopts
6162 # Behavior of the "selective" parameter depends on
6163 # whether or not a package matches an argument atom.
6164 # If an installed package provides an old-style
6165 # virtual that is no longer provided by an available
6166 # package, the installed package may match an argument
6167 # atom even though none of the available packages do.
6168 # Therefore, "selective" logic does not consider
6169 # whether or not an installed package matches an
6170 # argument atom. It only considers whether or not
6171 # available packages match argument atoms, which is
6172 # represented by the found_available_arg flag.
6173 found_available_arg = False
6174 for find_existing_node in True, False:
6177 for db, pkg_type, built, installed, db_keys in dbs:
6180 if installed and not find_existing_node:
6181 want_reinstall = reinstall or empty or \
6182 (found_available_arg and not selective)
6183 if want_reinstall and matched_packages:
6185 if hasattr(db, "xmatch"):
6186 cpv_list = db.xmatch("match-all", atom)
6188 cpv_list = db.match(atom)
6190 # USE=multislot can make an installed package appear as if
6191 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6192 # won't do any good as long as USE=multislot is enabled since
6193 # the newly built package still won't have the expected slot.
6194 # Therefore, assume that such SLOT dependencies are already
6195 # satisfied rather than forcing a rebuild.
6196 if installed and not cpv_list and atom.slot:
6197 for cpv in db.match(atom.cp):
6198 slot_available = False
6199 for other_db, other_type, other_built, \
6200 other_installed, other_keys in dbs:
6203 other_db.aux_get(cpv, ["SLOT"])[0]:
6204 slot_available = True
6208 if not slot_available:
6210 inst_pkg = self._pkg(cpv, "installed",
6211 root_config, installed=installed)
6212 # Remove the slot from the atom and verify that
6213 # the package matches the resulting atom.
6214 atom_without_slot = portage.dep.remove_slot(atom)
6216 atom_without_slot += str(atom.use)
6217 atom_without_slot = portage.dep.Atom(atom_without_slot)
6218 if portage.match_from_list(
6219 atom_without_slot, [inst_pkg]):
6220 cpv_list = [inst_pkg.cpv]
6225 pkg_status = "merge"
6226 if installed or onlydeps:
6227 pkg_status = "nomerge"
6230 for cpv in cpv_list:
6231 # Make --noreplace take precedence over --newuse.
6232 if not installed and noreplace and \
6233 cpv in vardb.match(atom):
6234 # If the installed version is masked, it may
6235 # be necessary to look at lower versions,
6236 # in case there is a visible downgrade.
6238 reinstall_for_flags = None
6239 cache_key = (pkg_type, root, cpv, pkg_status)
6240 calculated_use = True
6241 pkg = self._pkg_cache.get(cache_key)
6243 calculated_use = False
6245 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6248 pkg = Package(built=built, cpv=cpv,
6249 installed=installed, metadata=metadata,
6250 onlydeps=onlydeps, root_config=root_config,
6252 metadata = pkg.metadata
6254 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6255 if not built and ("?" in metadata["LICENSE"] or \
6256 "?" in metadata["PROVIDE"]):
6257 # This is avoided whenever possible because
6258 # it's expensive. It only needs to be done here
6259 # if it has an effect on visibility.
6260 pkgsettings.setcpv(pkg)
6261 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6262 calculated_use = True
6263 self._pkg_cache[pkg] = pkg
6265 if not installed or (built and matched_packages):
6266 # Only enforce visibility on installed packages
6267 # if there is at least one other visible package
6268 # available. By filtering installed masked packages
6269 # here, packages that have been masked since they
6270 # were installed can be automatically downgraded
6271 # to an unmasked version.
6273 if not visible(pkgsettings, pkg):
6275 except portage.exception.InvalidDependString:
6279 # Enable upgrade or downgrade to a version
6280 # with visible KEYWORDS when the installed
6281 # version is masked by KEYWORDS, but never
6282 # reinstall the same exact version only due
6283 # to a KEYWORDS mask.
6284 if built and matched_packages:
6286 different_version = None
6287 for avail_pkg in matched_packages:
6288 if not portage.dep.cpvequal(
6289 pkg.cpv, avail_pkg.cpv):
6290 different_version = avail_pkg
6292 if different_version is not None:
6295 pkgsettings._getMissingKeywords(
6296 pkg.cpv, pkg.metadata):
6299 # If the ebuild no longer exists or it's
6300 # keywords have been dropped, reject built
6301 # instances (installed or binary).
6302 # If --usepkgonly is enabled, assume that
6303 # the ebuild status should be ignored.
6307 pkg.cpv, "ebuild", root_config)
6308 except portage.exception.PackageNotFound:
6311 if not visible(pkgsettings, pkg_eb):
6314 if not pkg.built and not calculated_use:
6315 # This is avoided whenever possible because
6317 pkgsettings.setcpv(pkg)
6318 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6320 if pkg.cp != atom.cp:
6321 # A cpv can be returned from dbapi.match() as an
6322 # old-style virtual match even in cases when the
6323 # package does not actually PROVIDE the virtual.
6324 # Filter out any such false matches here.
6325 if not atom_set.findAtomForPackage(pkg):
6329 if root == self.target_root:
6331 # Ebuild USE must have been calculated prior
6332 # to this point, in case atoms have USE deps.
6333 myarg = self._iter_atoms_for_pkg(pkg).next()
6334 except StopIteration:
6336 except portage.exception.InvalidDependString:
6338 # masked by corruption
6340 if not installed and myarg:
6341 found_available_arg = True
6343 if atom.use and not pkg.built:
6344 use = pkg.use.enabled
6345 if atom.use.enabled.difference(use):
6347 if atom.use.disabled.intersection(use):
6349 if pkg.cp == atom_cp:
6350 if highest_version is None:
6351 highest_version = pkg
6352 elif pkg > highest_version:
6353 highest_version = pkg
6354 # At this point, we've found the highest visible
6355 # match from the current repo. Any lower versions
6356 # from this repo are ignored, so this so the loop
6357 # will always end with a break statement below
6359 if find_existing_node:
6360 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6363 if portage.dep.match_from_list(atom, [e_pkg]):
6364 if highest_version and \
6365 e_pkg.cp == atom_cp and \
6366 e_pkg < highest_version and \
6367 e_pkg.slot_atom != highest_version.slot_atom:
6368 # There is a higher version available in a
6369 # different slot, so this existing node is
6373 matched_packages.append(e_pkg)
6374 existing_node = e_pkg
6376 # Compare built package to current config and
6377 # reject the built package if necessary.
6378 if built and not installed and \
6379 ("--newuse" in self.myopts or \
6380 "--reinstall" in self.myopts):
6381 iuses = pkg.iuse.all
6382 old_use = pkg.use.enabled
6384 pkgsettings.setcpv(myeb)
6386 pkgsettings.setcpv(pkg)
6387 now_use = pkgsettings["PORTAGE_USE"].split()
6388 forced_flags = set()
6389 forced_flags.update(pkgsettings.useforce)
6390 forced_flags.update(pkgsettings.usemask)
6392 if myeb and not usepkgonly:
6393 cur_iuse = myeb.iuse.all
6394 if self._reinstall_for_flags(forced_flags,
6398 # Compare current config to installed package
6399 # and do not reinstall if possible.
6400 if not installed and \
6401 ("--newuse" in self.myopts or \
6402 "--reinstall" in self.myopts) and \
6403 cpv in vardb.match(atom):
6404 pkgsettings.setcpv(pkg)
6405 forced_flags = set()
6406 forced_flags.update(pkgsettings.useforce)
6407 forced_flags.update(pkgsettings.usemask)
6408 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6409 old_iuse = set(filter_iuse_defaults(
6410 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6411 cur_use = pkg.use.enabled
6412 cur_iuse = pkg.iuse.all
6413 reinstall_for_flags = \
6414 self._reinstall_for_flags(
6415 forced_flags, old_use, old_iuse,
6417 if reinstall_for_flags:
6421 matched_packages.append(pkg)
6422 if reinstall_for_flags:
6423 self._reinstall_nodes[pkg] = \
6427 if not matched_packages:
6430 if "--debug" in self.myopts:
6431 for pkg in matched_packages:
6432 portage.writemsg("%s %s\n" % \
6433 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6435 # Filter out any old-style virtual matches if they are
6436 # mixed with new-style virtual matches.
6437 cp = portage.dep_getkey(atom)
6438 if len(matched_packages) > 1 and \
6439 "virtual" == portage.catsplit(cp)[0]:
6440 for pkg in matched_packages:
6443 # Got a new-style virtual, so filter
6444 # out any old-style virtuals.
6445 matched_packages = [pkg for pkg in matched_packages \
6449 if len(matched_packages) > 1:
6450 bestmatch = portage.best(
6451 [pkg.cpv for pkg in matched_packages])
6452 matched_packages = [pkg for pkg in matched_packages \
6453 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6455 # ordered by type preference ("ebuild" type is the last resort)
6456 return matched_packages[-1], existing_node
6458 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6460 Select packages that have already been added to the graph or
6461 those that are installed and have not been scheduled for
6464 graph_db = self._graph_trees[root]["porttree"].dbapi
6465 matches = graph_db.match_pkgs(atom)
6468 pkg = matches[-1] # highest match
6469 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6470 return pkg, in_graph
6472 def _complete_graph(self):
6474 Add any deep dependencies of required sets (args, system, world) that
6475 have not been pulled into the graph yet. This ensures that the graph
6476 is consistent such that initially satisfied deep dependencies are not
6477 broken in the new graph. Initially unsatisfied dependencies are
6478 irrelevant since we only want to avoid breaking dependencies that are
6481 Since this method can consume enough time to disturb users, it is
6482 currently only enabled by the --complete-graph option.
6484 if "--buildpkgonly" in self.myopts or \
6485 "recurse" not in self.myparams:
6488 if "complete" not in self.myparams:
6489 # Skip this to avoid consuming enough time to disturb users.
6492 # Put the depgraph into a mode that causes it to only
6493 # select packages that have already been added to the
6494 # graph or those that are installed and have not been
6495 # scheduled for replacement. Also, toggle the "deep"
6496 # parameter so that all dependencies are traversed and
6498 self._select_atoms = self._select_atoms_from_graph
6499 self._select_package = self._select_pkg_from_graph
6500 already_deep = "deep" in self.myparams
6501 if not already_deep:
6502 self.myparams.add("deep")
6504 for root in self.roots:
6505 required_set_names = self._required_set_names.copy()
6506 if root == self.target_root and \
6507 (already_deep or "empty" in self.myparams):
6508 required_set_names.difference_update(self._sets)
6509 if not required_set_names and not self._ignored_deps:
6511 root_config = self.roots[root]
6512 setconfig = root_config.setconfig
6514 # Reuse existing SetArg instances when available.
6515 for arg in self.digraph.root_nodes():
6516 if not isinstance(arg, SetArg):
6518 if arg.root_config != root_config:
6520 if arg.name in required_set_names:
6522 required_set_names.remove(arg.name)
6523 # Create new SetArg instances only when necessary.
6524 for s in required_set_names:
6525 expanded_set = InternalPackageSet(
6526 initial_atoms=setconfig.getSetAtoms(s))
6527 atom = SETPREFIX + s
6528 args.append(SetArg(arg=atom, set=expanded_set,
6529 root_config=root_config))
6530 vardb = root_config.trees["vartree"].dbapi
6532 for atom in arg.set:
6533 self._dep_stack.append(
6534 Dependency(atom=atom, root=root, parent=arg))
6535 if self._ignored_deps:
6536 self._dep_stack.extend(self._ignored_deps)
6537 self._ignored_deps = []
6538 if not self._create_graph(allow_unsatisfied=True):
6540 # Check the unsatisfied deps to see if any initially satisfied deps
6541 # will become unsatisfied due to an upgrade. Initially unsatisfied
6542 # deps are irrelevant since we only want to avoid breaking deps
6543 # that are initially satisfied.
6544 while self._unsatisfied_deps:
6545 dep = self._unsatisfied_deps.pop()
6546 matches = vardb.match_pkgs(dep.atom)
6548 self._initially_unsatisfied_deps.append(dep)
6550 # An scheduled installation broke a deep dependency.
6551 # Add the installed package to the graph so that it
6552 # will be appropriately reported as a slot collision
6553 # (possibly solvable via backtracking).
6554 pkg = matches[-1] # highest match
6555 if not self._add_pkg(pkg, dep):
6557 if not self._create_graph(allow_unsatisfied=True):
6561 def _pkg(self, cpv, type_name, root_config, installed=False):
6563 Get a package instance from the cache, or create a new
6564 one if necessary. Raises KeyError from aux_get if it
6565 failures for some reason (package does not exist or is
6570 operation = "nomerge"
6571 pkg = self._pkg_cache.get(
6572 (type_name, root_config.root, cpv, operation))
6574 tree_type = self.pkg_tree_map[type_name]
6575 db = root_config.trees[tree_type].dbapi
6576 db_keys = list(self._trees_orig[root_config.root][
6577 tree_type].dbapi._aux_cache_keys)
6579 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6581 raise portage.exception.PackageNotFound(cpv)
6582 pkg = Package(cpv=cpv, metadata=metadata,
6583 root_config=root_config, installed=installed)
6584 if type_name == "ebuild":
6585 settings = self.pkgsettings[root_config.root]
6586 settings.setcpv(pkg)
6587 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6588 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6589 self._pkg_cache[pkg] = pkg
6592 def validate_blockers(self):
6593 """Remove any blockers from the digraph that do not match any of the
6594 packages within the graph. If necessary, create hard deps to ensure
6595 correct merge order such that mutually blocking packages are never
6596 installed simultaneously."""
6598 if "--buildpkgonly" in self.myopts or \
6599 "--nodeps" in self.myopts:
6602 #if "deep" in self.myparams:
6604 # Pull in blockers from all installed packages that haven't already
6605 # been pulled into the depgraph. This is not enabled by default
6606 # due to the performance penalty that is incurred by all the
6607 # additional dep_check calls that are required.
6609 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6610 for myroot in self.trees:
6611 vardb = self.trees[myroot]["vartree"].dbapi
6612 portdb = self.trees[myroot]["porttree"].dbapi
6613 pkgsettings = self.pkgsettings[myroot]
6614 final_db = self.mydbapi[myroot]
6616 blocker_cache = BlockerCache(myroot, vardb)
6617 stale_cache = set(blocker_cache)
6620 stale_cache.discard(cpv)
6621 pkg_in_graph = self.digraph.contains(pkg)
6623 # Check for masked installed packages. Only warn about
6624 # packages that are in the graph in order to avoid warning
6625 # about those that will be automatically uninstalled during
6626 # the merge process or by --depclean.
6628 if pkg_in_graph and not visible(pkgsettings, pkg):
6629 self._masked_installed.add(pkg)
6631 blocker_atoms = None
6637 self._blocker_parents.child_nodes(pkg))
6642 self._irrelevant_blockers.child_nodes(pkg))
6645 if blockers is not None:
6646 blockers = set(str(blocker.atom) \
6647 for blocker in blockers)
6649 # If this node has any blockers, create a "nomerge"
6650 # node for it so that they can be enforced.
6651 self.spinner.update()
6652 blocker_data = blocker_cache.get(cpv)
6653 if blocker_data is not None and \
6654 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6657 # If blocker data from the graph is available, use
6658 # it to validate the cache and update the cache if
6660 if blocker_data is not None and \
6661 blockers is not None:
6662 if not blockers.symmetric_difference(
6663 blocker_data.atoms):
6667 if blocker_data is None and \
6668 blockers is not None:
6669 # Re-use the blockers from the graph.
6670 blocker_atoms = sorted(blockers)
6671 counter = long(pkg.metadata["COUNTER"])
6673 blocker_cache.BlockerData(counter, blocker_atoms)
6674 blocker_cache[pkg.cpv] = blocker_data
6678 blocker_atoms = blocker_data.atoms
6680 # Use aux_get() to trigger FakeVartree global
6681 # updates on *DEPEND when appropriate.
6682 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6683 # It is crucial to pass in final_db here in order to
6684 # optimize dep_check calls by eliminating atoms via
6685 # dep_wordreduce and dep_eval calls.
6687 portage.dep._dep_check_strict = False
6689 success, atoms = portage.dep_check(depstr,
6690 final_db, pkgsettings, myuse=pkg.use.enabled,
6691 trees=self._graph_trees, myroot=myroot)
6692 except Exception, e:
6693 if isinstance(e, SystemExit):
6695 # This is helpful, for example, if a ValueError
6696 # is thrown from cpv_expand due to multiple
6697 # matches (this can happen if an atom lacks a
6699 show_invalid_depstring_notice(
6700 pkg, depstr, str(e))
6704 portage.dep._dep_check_strict = True
6706 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6707 if replacement_pkg and \
6708 replacement_pkg[0].operation == "merge":
6709 # This package is being replaced anyway, so
6710 # ignore invalid dependencies so as not to
6711 # annoy the user too much (otherwise they'd be
6712 # forced to manually unmerge it first).
6714 show_invalid_depstring_notice(pkg, depstr, atoms)
6716 blocker_atoms = [myatom for myatom in atoms \
6717 if myatom.startswith("!")]
6718 blocker_atoms.sort()
6719 counter = long(pkg.metadata["COUNTER"])
6720 blocker_cache[cpv] = \
6721 blocker_cache.BlockerData(counter, blocker_atoms)
6724 for atom in blocker_atoms:
6725 blocker = Blocker(atom=portage.dep.Atom(atom),
6726 eapi=pkg.metadata["EAPI"], root=myroot)
6727 self._blocker_parents.add(blocker, pkg)
6728 except portage.exception.InvalidAtom, e:
6729 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6730 show_invalid_depstring_notice(
6731 pkg, depstr, "Invalid Atom: %s" % (e,))
6733 for cpv in stale_cache:
6734 del blocker_cache[cpv]
6735 blocker_cache.flush()
6738 # Discard any "uninstall" tasks scheduled by previous calls
6739 # to this method, since those tasks may not make sense given
6740 # the current graph state.
6741 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6742 if previous_uninstall_tasks:
6743 self._blocker_uninstalls = digraph()
6744 self.digraph.difference_update(previous_uninstall_tasks)
6746 for blocker in self._blocker_parents.leaf_nodes():
6747 self.spinner.update()
6748 root_config = self.roots[blocker.root]
6749 virtuals = root_config.settings.getvirtuals()
6750 myroot = blocker.root
6751 initial_db = self.trees[myroot]["vartree"].dbapi
6752 final_db = self.mydbapi[myroot]
6754 provider_virtual = False
6755 if blocker.cp in virtuals and \
6756 not self._have_new_virt(blocker.root, blocker.cp):
6757 provider_virtual = True
6759 # Use this to check PROVIDE for each matched package
6761 atom_set = InternalPackageSet(
6762 initial_atoms=[blocker.atom])
6764 if provider_virtual:
6766 for provider_entry in virtuals[blocker.cp]:
6768 portage.dep_getkey(provider_entry)
6769 atoms.append(blocker.atom.replace(
6770 blocker.cp, provider_cp))
6772 atoms = [blocker.atom]
6774 blocked_initial = set()
6776 for pkg in initial_db.match_pkgs(atom):
6777 if atom_set.findAtomForPackage(pkg):
6778 blocked_initial.add(pkg)
6780 blocked_final = set()
6782 for pkg in final_db.match_pkgs(atom):
6783 if atom_set.findAtomForPackage(pkg):
6784 blocked_final.add(pkg)
6786 if not blocked_initial and not blocked_final:
6787 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6788 self._blocker_parents.remove(blocker)
6789 # Discard any parents that don't have any more blockers.
6790 for pkg in parent_pkgs:
6791 self._irrelevant_blockers.add(blocker, pkg)
6792 if not self._blocker_parents.child_nodes(pkg):
6793 self._blocker_parents.remove(pkg)
6795 for parent in self._blocker_parents.parent_nodes(blocker):
6796 unresolved_blocks = False
6797 depends_on_order = set()
6798 for pkg in blocked_initial:
6799 if pkg.slot_atom == parent.slot_atom:
6800 # TODO: Support blocks within slots in cases where it
6801 # might make sense. For example, a new version might
6802 # require that the old version be uninstalled at build
6805 if parent.installed:
6806 # Two currently installed packages conflict with
6807 # eachother. Ignore this case since the damage
6808 # is already done and this would be likely to
6809 # confuse users if displayed like a normal blocker.
6812 self._blocked_pkgs.add(pkg, blocker)
6814 if parent.operation == "merge":
6815 # Maybe the blocked package can be replaced or simply
6816 # unmerged to resolve this block.
6817 depends_on_order.add((pkg, parent))
6819 # None of the above blocker resolutions techniques apply,
6820 # so apparently this one is unresolvable.
6821 unresolved_blocks = True
6822 for pkg in blocked_final:
6823 if pkg.slot_atom == parent.slot_atom:
6824 # TODO: Support blocks within slots.
6826 if parent.operation == "nomerge" and \
6827 pkg.operation == "nomerge":
6828 # This blocker will be handled the next time that a
6829 # merge of either package is triggered.
6832 self._blocked_pkgs.add(pkg, blocker)
6834 # Maybe the blocking package can be
6835 # unmerged to resolve this block.
6836 if parent.operation == "merge" and pkg.installed:
6837 depends_on_order.add((pkg, parent))
6839 elif parent.operation == "nomerge":
6840 depends_on_order.add((parent, pkg))
6842 # None of the above blocker resolutions techniques apply,
6843 # so apparently this one is unresolvable.
6844 unresolved_blocks = True
6846 # Make sure we don't unmerge any package that have been pulled
6848 if not unresolved_blocks and depends_on_order:
6849 for inst_pkg, inst_task in depends_on_order:
6850 if self.digraph.contains(inst_pkg) and \
6851 self.digraph.parent_nodes(inst_pkg):
6852 unresolved_blocks = True
6855 if not unresolved_blocks and depends_on_order:
6856 for inst_pkg, inst_task in depends_on_order:
6857 uninst_task = Package(built=inst_pkg.built,
6858 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6859 metadata=inst_pkg.metadata,
6860 operation="uninstall",
6861 root_config=inst_pkg.root_config,
6862 type_name=inst_pkg.type_name)
6863 self._pkg_cache[uninst_task] = uninst_task
6864 # Enforce correct merge order with a hard dep.
6865 self.digraph.addnode(uninst_task, inst_task,
6866 priority=BlockerDepPriority.instance)
6867 # Count references to this blocker so that it can be
6868 # invalidated after nodes referencing it have been
6870 self._blocker_uninstalls.addnode(uninst_task, blocker)
6871 if not unresolved_blocks and not depends_on_order:
6872 self._irrelevant_blockers.add(blocker, parent)
6873 self._blocker_parents.remove_edge(blocker, parent)
6874 if not self._blocker_parents.parent_nodes(blocker):
6875 self._blocker_parents.remove(blocker)
6876 if not self._blocker_parents.child_nodes(parent):
6877 self._blocker_parents.remove(parent)
6878 if unresolved_blocks:
6879 self._unsolvable_blockers.add(blocker, parent)
6883 def _accept_blocker_conflicts(self):
6885 for x in ("--buildpkgonly", "--fetchonly",
6886 "--fetch-all-uri", "--nodeps"):
6887 if x in self.myopts:
6892 def _merge_order_bias(self, mygraph):
6894 For optimal leaf node selection, promote deep system runtime deps and
6895 order nodes from highest to lowest overall reference count.
6899 for node in mygraph.order:
6900 node_info[node] = len(mygraph.parent_nodes(node))
6901 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6903 def cmp_merge_preference(node1, node2):
6905 if node1.operation == 'uninstall':
6906 if node2.operation == 'uninstall':
6910 if node2.operation == 'uninstall':
6911 if node1.operation == 'uninstall':
6915 node1_sys = node1 in deep_system_deps
6916 node2_sys = node2 in deep_system_deps
6917 if node1_sys != node2_sys:
6922 return node_info[node2] - node_info[node1]
6924 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6926 def altlist(self, reversed=False):
6928 while self._serialized_tasks_cache is None:
6929 self._resolve_conflicts()
6931 self._serialized_tasks_cache, self._scheduler_graph = \
6932 self._serialize_tasks()
6933 except self._serialize_tasks_retry:
6936 retlist = self._serialized_tasks_cache[:]
6941 def schedulerGraph(self):
6943 The scheduler graph is identical to the normal one except that
6944 uninstall edges are reversed in specific cases that require
6945 conflicting packages to be temporarily installed simultaneously.
6946 This is intended for use by the Scheduler in it's parallelization
6947 logic. It ensures that temporary simultaneous installation of
6948 conflicting packages is avoided when appropriate (especially for
6949 !!atom blockers), but allowed in specific cases that require it.
6951 Note that this method calls break_refs() which alters the state of
6952 internal Package instances such that this depgraph instance should
6953 not be used to perform any more calculations.
6955 if self._scheduler_graph is None:
6957 self.break_refs(self._scheduler_graph.order)
6958 return self._scheduler_graph
6960 def break_refs(self, nodes):
6962 Take a mergelist like that returned from self.altlist() and
6963 break any references that lead back to the depgraph. This is
6964 useful if you want to hold references to packages without
6965 also holding the depgraph on the heap.
6968 if hasattr(node, "root_config"):
6969 # The FakeVartree references the _package_cache which
6970 # references the depgraph. So that Package instances don't
6971 # hold the depgraph and FakeVartree on the heap, replace
6972 # the RootConfig that references the FakeVartree with the
6973 # original RootConfig instance which references the actual
6975 node.root_config = \
6976 self._trees_orig[node.root_config.root]["root_config"]
6978 def _resolve_conflicts(self):
6979 if not self._complete_graph():
6980 raise self._unknown_internal_error()
6982 if not self.validate_blockers():
6983 raise self._unknown_internal_error()
6985 if self._slot_collision_info:
6986 self._process_slot_conflicts()
6988 def _serialize_tasks(self):
6990 if "--debug" in self.myopts:
6991 writemsg("\ndigraph:\n\n", noiselevel=-1)
6992 self.digraph.debug_print()
6993 writemsg("\n", noiselevel=-1)
6995 scheduler_graph = self.digraph.copy()
6996 mygraph=self.digraph.copy()
6997 # Prune "nomerge" root nodes if nothing depends on them, since
6998 # otherwise they slow down merge order calculation. Don't remove
6999 # non-root nodes since they help optimize merge order in some cases
7000 # such as revdep-rebuild.
7001 removed_nodes = set()
7003 for node in mygraph.root_nodes():
7004 if not isinstance(node, Package) or \
7005 node.installed or node.onlydeps:
7006 removed_nodes.add(node)
7008 self.spinner.update()
7009 mygraph.difference_update(removed_nodes)
7010 if not removed_nodes:
7012 removed_nodes.clear()
7013 self._merge_order_bias(mygraph)
7014 def cmp_circular_bias(n1, n2):
7016 RDEPEND is stronger than PDEPEND and this function
7017 measures such a strength bias within a circular
7018 dependency relationship.
7020 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7021 ignore_priority=priority_range.ignore_medium_soft)
7022 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7023 ignore_priority=priority_range.ignore_medium_soft)
7024 if n1_n2_medium == n2_n1_medium:
7029 myblocker_uninstalls = self._blocker_uninstalls.copy()
7031 # Contains uninstall tasks that have been scheduled to
7032 # occur after overlapping blockers have been installed.
7033 scheduled_uninstalls = set()
7034 # Contains any Uninstall tasks that have been ignored
7035 # in order to avoid the circular deps code path. These
7036 # correspond to blocker conflicts that could not be
7038 ignored_uninstall_tasks = set()
7039 have_uninstall_task = False
7040 complete = "complete" in self.myparams
7043 def get_nodes(**kwargs):
7045 Returns leaf nodes excluding Uninstall instances
7046 since those should be executed as late as possible.
7048 return [node for node in mygraph.leaf_nodes(**kwargs) \
7049 if isinstance(node, Package) and \
7050 (node.operation != "uninstall" or \
7051 node in scheduled_uninstalls)]
7053 # sys-apps/portage needs special treatment if ROOT="/"
7054 running_root = self._running_root.root
7055 from portage.const import PORTAGE_PACKAGE_ATOM
7056 runtime_deps = InternalPackageSet(
7057 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7058 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7059 PORTAGE_PACKAGE_ATOM)
7060 replacement_portage = self.mydbapi[running_root].match_pkgs(
7061 PORTAGE_PACKAGE_ATOM)
7064 running_portage = running_portage[0]
7066 running_portage = None
7068 if replacement_portage:
7069 replacement_portage = replacement_portage[0]
7071 replacement_portage = None
7073 if replacement_portage == running_portage:
7074 replacement_portage = None
7076 if replacement_portage is not None:
7077 # update from running_portage to replacement_portage asap
7078 asap_nodes.append(replacement_portage)
7080 if running_portage is not None:
7082 portage_rdepend = self._select_atoms_highest_available(
7083 running_root, running_portage.metadata["RDEPEND"],
7084 myuse=running_portage.use.enabled,
7085 parent=running_portage, strict=False)
7086 except portage.exception.InvalidDependString, e:
7087 portage.writemsg("!!! Invalid RDEPEND in " + \
7088 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7089 (running_root, running_portage.cpv, e), noiselevel=-1)
7091 portage_rdepend = []
7092 runtime_deps.update(atom for atom in portage_rdepend \
7093 if not atom.startswith("!"))
7095 def gather_deps(ignore_priority, mergeable_nodes,
7096 selected_nodes, node):
7098 Recursively gather a group of nodes that RDEPEND on
7099 eachother. This ensures that they are merged as a group
7100 and get their RDEPENDs satisfied as soon as possible.
7102 if node in selected_nodes:
7104 if node not in mergeable_nodes:
7106 if node == replacement_portage and \
7107 mygraph.child_nodes(node,
7108 ignore_priority=priority_range.ignore_medium_soft):
7109 # Make sure that portage always has all of it's
7110 # RDEPENDs installed first.
7112 selected_nodes.add(node)
7113 for child in mygraph.child_nodes(node,
7114 ignore_priority=ignore_priority):
7115 if not gather_deps(ignore_priority,
7116 mergeable_nodes, selected_nodes, child):
7120 def ignore_uninst_or_med(priority):
7121 if priority is BlockerDepPriority.instance:
7123 return priority_range.ignore_medium(priority)
7125 def ignore_uninst_or_med_soft(priority):
7126 if priority is BlockerDepPriority.instance:
7128 return priority_range.ignore_medium_soft(priority)
7130 tree_mode = "--tree" in self.myopts
7131 # Tracks whether or not the current iteration should prefer asap_nodes
7132 # if available. This is set to False when the previous iteration
7133 # failed to select any nodes. It is reset whenever nodes are
7134 # successfully selected.
7137 # Controls whether or not the current iteration should drop edges that
7138 # are "satisfied" by installed packages, in order to solve circular
7139 # dependencies. The deep runtime dependencies of installed packages are
7140 # not checked in this case (bug #199856), so it must be avoided
7141 # whenever possible.
7142 drop_satisfied = False
7144 # State of variables for successive iterations that loosen the
7145 # criteria for node selection.
7147 # iteration prefer_asap drop_satisfied
7152 # If no nodes are selected on the last iteration, it is due to
7153 # unresolved blockers or circular dependencies.
7155 while not mygraph.empty():
7156 self.spinner.update()
7157 selected_nodes = None
7158 ignore_priority = None
7159 if drop_satisfied or (prefer_asap and asap_nodes):
7160 priority_range = DepPrioritySatisfiedRange
7162 priority_range = DepPriorityNormalRange
7163 if prefer_asap and asap_nodes:
7164 # ASAP nodes are merged before their soft deps. Go ahead and
7165 # select root nodes here if necessary, since it's typical for
7166 # the parent to have been removed from the graph already.
7167 asap_nodes = [node for node in asap_nodes \
7168 if mygraph.contains(node)]
7169 for node in asap_nodes:
7170 if not mygraph.child_nodes(node,
7171 ignore_priority=priority_range.ignore_soft):
7172 selected_nodes = [node]
7173 asap_nodes.remove(node)
7175 if not selected_nodes and \
7176 not (prefer_asap and asap_nodes):
7177 for i in xrange(priority_range.NONE,
7178 priority_range.MEDIUM_SOFT + 1):
7179 ignore_priority = priority_range.ignore_priority[i]
7180 nodes = get_nodes(ignore_priority=ignore_priority)
7182 # If there is a mix of uninstall nodes with other
7183 # types, save the uninstall nodes for later since
7184 # sometimes a merge node will render an uninstall
7185 # node unnecessary (due to occupying the same slot),
7186 # and we want to avoid executing a separate uninstall
7187 # task in that case.
7189 good_uninstalls = []
7190 with_some_uninstalls_excluded = []
7192 if node.operation == "uninstall":
7193 slot_node = self.mydbapi[node.root
7194 ].match_pkgs(node.slot_atom)
7196 slot_node[0].operation == "merge":
7198 good_uninstalls.append(node)
7199 with_some_uninstalls_excluded.append(node)
7201 nodes = good_uninstalls
7202 elif with_some_uninstalls_excluded:
7203 nodes = with_some_uninstalls_excluded
7207 if ignore_priority is None and not tree_mode:
7208 # Greedily pop all of these nodes since no
7209 # relationship has been ignored. This optimization
7210 # destroys --tree output, so it's disabled in tree
7212 selected_nodes = nodes
7214 # For optimal merge order:
7215 # * Only pop one node.
7216 # * Removing a root node (node without a parent)
7217 # will not produce a leaf node, so avoid it.
7218 # * It's normal for a selected uninstall to be a
7219 # root node, so don't check them for parents.
7221 if node.operation == "uninstall" or \
7222 mygraph.parent_nodes(node):
7223 selected_nodes = [node]
7229 if not selected_nodes:
7230 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7232 mergeable_nodes = set(nodes)
7233 if prefer_asap and asap_nodes:
7235 for i in xrange(priority_range.SOFT,
7236 priority_range.MEDIUM_SOFT + 1):
7237 ignore_priority = priority_range.ignore_priority[i]
7239 if not mygraph.parent_nodes(node):
7241 selected_nodes = set()
7242 if gather_deps(ignore_priority,
7243 mergeable_nodes, selected_nodes, node):
7246 selected_nodes = None
7250 if prefer_asap and asap_nodes and not selected_nodes:
7251 # We failed to find any asap nodes to merge, so ignore
7252 # them for the next iteration.
7256 if selected_nodes and ignore_priority is not None:
7257 # Try to merge ignored medium_soft deps as soon as possible
7258 # if they're not satisfied by installed packages.
7259 for node in selected_nodes:
7260 children = set(mygraph.child_nodes(node))
7261 soft = children.difference(
7262 mygraph.child_nodes(node,
7263 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7264 medium_soft = children.difference(
7265 mygraph.child_nodes(node,
7267 DepPrioritySatisfiedRange.ignore_medium_soft))
7268 medium_soft.difference_update(soft)
7269 for child in medium_soft:
7270 if child in selected_nodes:
7272 if child in asap_nodes:
7274 asap_nodes.append(child)
7276 if selected_nodes and len(selected_nodes) > 1:
7277 if not isinstance(selected_nodes, list):
7278 selected_nodes = list(selected_nodes)
7279 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7281 if not selected_nodes and not myblocker_uninstalls.is_empty():
7282 # An Uninstall task needs to be executed in order to
7283 # avoid conflict if possible.
7286 priority_range = DepPrioritySatisfiedRange
7288 priority_range = DepPriorityNormalRange
7290 mergeable_nodes = get_nodes(
7291 ignore_priority=ignore_uninst_or_med)
7293 min_parent_deps = None
7295 for task in myblocker_uninstalls.leaf_nodes():
7296 # Do some sanity checks so that system or world packages
7297 # don't get uninstalled inappropriately here (only really
7298 # necessary when --complete-graph has not been enabled).
7300 if task in ignored_uninstall_tasks:
7303 if task in scheduled_uninstalls:
7304 # It's been scheduled but it hasn't
7305 # been executed yet due to dependence
7306 # on installation of blocking packages.
7309 root_config = self.roots[task.root]
7310 inst_pkg = self._pkg_cache[
7311 ("installed", task.root, task.cpv, "nomerge")]
7313 if self.digraph.contains(inst_pkg):
7316 forbid_overlap = False
7317 heuristic_overlap = False
7318 for blocker in myblocker_uninstalls.parent_nodes(task):
7319 if blocker.eapi in ("0", "1"):
7320 heuristic_overlap = True
7321 elif blocker.atom.blocker.overlap.forbid:
7322 forbid_overlap = True
7324 if forbid_overlap and running_root == task.root:
7327 if heuristic_overlap and running_root == task.root:
7328 # Never uninstall sys-apps/portage or it's essential
7329 # dependencies, except through replacement.
7331 runtime_dep_atoms = \
7332 list(runtime_deps.iterAtomsForPackage(task))
7333 except portage.exception.InvalidDependString, e:
7334 portage.writemsg("!!! Invalid PROVIDE in " + \
7335 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7336 (task.root, task.cpv, e), noiselevel=-1)
7340 # Don't uninstall a runtime dep if it appears
7341 # to be the only suitable one installed.
7343 vardb = root_config.trees["vartree"].dbapi
7344 for atom in runtime_dep_atoms:
7345 other_version = None
7346 for pkg in vardb.match_pkgs(atom):
7347 if pkg.cpv == task.cpv and \
7348 pkg.metadata["COUNTER"] == \
7349 task.metadata["COUNTER"]:
7353 if other_version is None:
7359 # For packages in the system set, don't take
7360 # any chances. If the conflict can't be resolved
7361 # by a normal replacement operation then abort.
7364 for atom in root_config.sets[
7365 "system"].iterAtomsForPackage(task):
7368 except portage.exception.InvalidDependString, e:
7369 portage.writemsg("!!! Invalid PROVIDE in " + \
7370 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7371 (task.root, task.cpv, e), noiselevel=-1)
7377 # Note that the world check isn't always
7378 # necessary since self._complete_graph() will
7379 # add all packages from the system and world sets to the
7380 # graph. This just allows unresolved conflicts to be
7381 # detected as early as possible, which makes it possible
7382 # to avoid calling self._complete_graph() when it is
7383 # unnecessary due to blockers triggering an abortion.
7385 # For packages in the world set, go ahead an uninstall
7386 # when necessary, as long as the atom will be satisfied
7387 # in the final state.
7388 graph_db = self.mydbapi[task.root]
7391 for atom in root_config.sets[
7392 "world"].iterAtomsForPackage(task):
7394 for pkg in graph_db.match_pkgs(atom):
7401 self._blocked_world_pkgs[inst_pkg] = atom
7403 except portage.exception.InvalidDependString, e:
7404 portage.writemsg("!!! Invalid PROVIDE in " + \
7405 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7406 (task.root, task.cpv, e), noiselevel=-1)
7412 # Check the deps of parent nodes to ensure that
7413 # the chosen task produces a leaf node. Maybe
7414 # this can be optimized some more to make the
7415 # best possible choice, but the current algorithm
7416 # is simple and should be near optimal for most
7418 mergeable_parent = False
7420 for parent in mygraph.parent_nodes(task):
7421 parent_deps.update(mygraph.child_nodes(parent,
7422 ignore_priority=priority_range.ignore_medium_soft))
7423 if parent in mergeable_nodes and \
7424 gather_deps(ignore_uninst_or_med_soft,
7425 mergeable_nodes, set(), parent):
7426 mergeable_parent = True
7428 if not mergeable_parent:
7431 parent_deps.remove(task)
7432 if min_parent_deps is None or \
7433 len(parent_deps) < min_parent_deps:
7434 min_parent_deps = len(parent_deps)
7437 if uninst_task is not None:
7438 # The uninstall is performed only after blocking
7439 # packages have been merged on top of it. File
7440 # collisions between blocking packages are detected
7441 # and removed from the list of files to be uninstalled.
7442 scheduled_uninstalls.add(uninst_task)
7443 parent_nodes = mygraph.parent_nodes(uninst_task)
7445 # Reverse the parent -> uninstall edges since we want
7446 # to do the uninstall after blocking packages have
7447 # been merged on top of it.
7448 mygraph.remove(uninst_task)
7449 for blocked_pkg in parent_nodes:
7450 mygraph.add(blocked_pkg, uninst_task,
7451 priority=BlockerDepPriority.instance)
7452 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7453 scheduler_graph.add(blocked_pkg, uninst_task,
7454 priority=BlockerDepPriority.instance)
7456 # Reset the state variables for leaf node selection and
7457 # continue trying to select leaf nodes.
7459 drop_satisfied = False
7462 if not selected_nodes:
7463 # Only select root nodes as a last resort. This case should
7464 # only trigger when the graph is nearly empty and the only
7465 # remaining nodes are isolated (no parents or children). Since
7466 # the nodes must be isolated, ignore_priority is not needed.
7467 selected_nodes = get_nodes()
7469 if not selected_nodes and not drop_satisfied:
7470 drop_satisfied = True
7473 if not selected_nodes and not myblocker_uninstalls.is_empty():
7474 # If possible, drop an uninstall task here in order to avoid
7475 # the circular deps code path. The corresponding blocker will
7476 # still be counted as an unresolved conflict.
7478 for node in myblocker_uninstalls.leaf_nodes():
7480 mygraph.remove(node)
7485 ignored_uninstall_tasks.add(node)
7488 if uninst_task is not None:
7489 # Reset the state variables for leaf node selection and
7490 # continue trying to select leaf nodes.
7492 drop_satisfied = False
7495 if not selected_nodes:
7496 self._circular_deps_for_display = mygraph
7497 raise self._unknown_internal_error()
7499 # At this point, we've succeeded in selecting one or more nodes, so
7500 # reset state variables for leaf node selection.
7502 drop_satisfied = False
7504 mygraph.difference_update(selected_nodes)
7506 for node in selected_nodes:
7507 if isinstance(node, Package) and \
7508 node.operation == "nomerge":
7511 # Handle interactions between blockers
7512 # and uninstallation tasks.
7513 solved_blockers = set()
7515 if isinstance(node, Package) and \
7516 "uninstall" == node.operation:
7517 have_uninstall_task = True
7520 vardb = self.trees[node.root]["vartree"].dbapi
7521 previous_cpv = vardb.match(node.slot_atom)
7523 # The package will be replaced by this one, so remove
7524 # the corresponding Uninstall task if necessary.
7525 previous_cpv = previous_cpv[0]
7527 ("installed", node.root, previous_cpv, "uninstall")
7529 mygraph.remove(uninst_task)
7533 if uninst_task is not None and \
7534 uninst_task not in ignored_uninstall_tasks and \
7535 myblocker_uninstalls.contains(uninst_task):
7536 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7537 myblocker_uninstalls.remove(uninst_task)
7538 # Discard any blockers that this Uninstall solves.
7539 for blocker in blocker_nodes:
7540 if not myblocker_uninstalls.child_nodes(blocker):
7541 myblocker_uninstalls.remove(blocker)
7542 solved_blockers.add(blocker)
7544 retlist.append(node)
7546 if (isinstance(node, Package) and \
7547 "uninstall" == node.operation) or \
7548 (uninst_task is not None and \
7549 uninst_task in scheduled_uninstalls):
7550 # Include satisfied blockers in the merge list
7551 # since the user might be interested and also
7552 # it serves as an indicator that blocking packages
7553 # will be temporarily installed simultaneously.
7554 for blocker in solved_blockers:
7555 retlist.append(Blocker(atom=blocker.atom,
7556 root=blocker.root, eapi=blocker.eapi,
7559 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7560 for node in myblocker_uninstalls.root_nodes():
7561 unsolvable_blockers.add(node)
7563 for blocker in unsolvable_blockers:
7564 retlist.append(blocker)
7566 # If any Uninstall tasks need to be executed in order
7567 # to avoid a conflict, complete the graph with any
7568 # dependencies that may have been initially
7569 # neglected (to ensure that unsafe Uninstall tasks
7570 # are properly identified and blocked from execution).
7571 if have_uninstall_task and \
7573 not unsolvable_blockers:
7574 self.myparams.add("complete")
7575 raise self._serialize_tasks_retry("")
7577 if unsolvable_blockers and \
7578 not self._accept_blocker_conflicts():
7579 self._unsatisfied_blockers_for_display = unsolvable_blockers
7580 self._serialized_tasks_cache = retlist[:]
7581 self._scheduler_graph = scheduler_graph
7582 raise self._unknown_internal_error()
7584 if self._slot_collision_info and \
7585 not self._accept_blocker_conflicts():
7586 self._serialized_tasks_cache = retlist[:]
7587 self._scheduler_graph = scheduler_graph
7588 raise self._unknown_internal_error()
7590 return retlist, scheduler_graph
7592 def _show_circular_deps(self, mygraph):
7593 # No leaf nodes are available, so we have a circular
7594 # dependency panic situation. Reduce the noise level to a
7595 # minimum via repeated elimination of root nodes since they
7596 # have no parents and thus can not be part of a cycle.
7598 root_nodes = mygraph.root_nodes(
7599 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7602 mygraph.difference_update(root_nodes)
7603 # Display the USE flags that are enabled on nodes that are part
7604 # of dependency cycles in case that helps the user decide to
7605 # disable some of them.
7607 tempgraph = mygraph.copy()
7608 while not tempgraph.empty():
7609 nodes = tempgraph.leaf_nodes()
7611 node = tempgraph.order[0]
7614 display_order.append(node)
7615 tempgraph.remove(node)
7616 display_order.reverse()
7617 self.myopts.pop("--quiet", None)
7618 self.myopts.pop("--verbose", None)
7619 self.myopts["--tree"] = True
7620 portage.writemsg("\n\n", noiselevel=-1)
7621 self.display(display_order)
7622 prefix = colorize("BAD", " * ")
7623 portage.writemsg("\n", noiselevel=-1)
7624 portage.writemsg(prefix + "Error: circular dependencies:\n",
7626 portage.writemsg("\n", noiselevel=-1)
7627 mygraph.debug_print()
7628 portage.writemsg("\n", noiselevel=-1)
7629 portage.writemsg(prefix + "Note that circular dependencies " + \
7630 "can often be avoided by temporarily\n", noiselevel=-1)
7631 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7632 "optional dependencies.\n", noiselevel=-1)
7634 def _show_merge_list(self):
7635 if self._serialized_tasks_cache is not None and \
7636 not (self._displayed_list and \
7637 (self._displayed_list == self._serialized_tasks_cache or \
7638 self._displayed_list == \
7639 list(reversed(self._serialized_tasks_cache)))):
7640 display_list = self._serialized_tasks_cache[:]
7641 if "--tree" in self.myopts:
7642 display_list.reverse()
7643 self.display(display_list)
7645 def _show_unsatisfied_blockers(self, blockers):
7646 self._show_merge_list()
7647 msg = "Error: The above package list contains " + \
7648 "packages which cannot be installed " + \
7649 "at the same time on the same system."
7650 prefix = colorize("BAD", " * ")
7651 from textwrap import wrap
7652 portage.writemsg("\n", noiselevel=-1)
7653 for line in wrap(msg, 70):
7654 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7656 # Display the conflicting packages along with the packages
7657 # that pulled them in. This is helpful for troubleshooting
7658 # cases in which blockers don't solve automatically and
7659 # the reasons are not apparent from the normal merge list
7663 for blocker in blockers:
7664 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7665 self._blocker_parents.parent_nodes(blocker)):
7666 parent_atoms = self._parent_atoms.get(pkg)
7667 if not parent_atoms:
7668 atom = self._blocked_world_pkgs.get(pkg)
7669 if atom is not None:
7670 parent_atoms = set([("@world", atom)])
7672 conflict_pkgs[pkg] = parent_atoms
7675 # Reduce noise by pruning packages that are only
7676 # pulled in by other conflict packages.
7678 for pkg, parent_atoms in conflict_pkgs.iteritems():
7679 relevant_parent = False
7680 for parent, atom in parent_atoms:
7681 if parent not in conflict_pkgs:
7682 relevant_parent = True
7684 if not relevant_parent:
7685 pruned_pkgs.add(pkg)
7686 for pkg in pruned_pkgs:
7687 del conflict_pkgs[pkg]
7693 # Max number of parents shown, to avoid flooding the display.
7695 for pkg, parent_atoms in conflict_pkgs.iteritems():
7699 # Prefer packages that are not directly involved in a conflict.
7700 for parent_atom in parent_atoms:
7701 if len(pruned_list) >= max_parents:
7703 parent, atom = parent_atom
7704 if parent not in conflict_pkgs:
7705 pruned_list.add(parent_atom)
7707 for parent_atom in parent_atoms:
7708 if len(pruned_list) >= max_parents:
7710 pruned_list.add(parent_atom)
7712 omitted_parents = len(parent_atoms) - len(pruned_list)
7713 msg.append(indent + "%s pulled in by\n" % pkg)
7715 for parent_atom in pruned_list:
7716 parent, atom = parent_atom
7717 msg.append(2*indent)
7718 if isinstance(parent,
7719 (PackageArg, AtomArg)):
7720 # For PackageArg and AtomArg types, it's
7721 # redundant to display the atom attribute.
7722 msg.append(str(parent))
7724 # Display the specific atom from SetArg or
7726 msg.append("%s required by %s" % (atom, parent))
7730 msg.append(2*indent)
7731 msg.append("(and %d more)\n" % omitted_parents)
7735 sys.stderr.write("".join(msg))
7738 if "--quiet" not in self.myopts:
7739 show_blocker_docs_link()
7741 def display(self, mylist, favorites=[], verbosity=None):
7743 # This is used to prevent display_problems() from
7744 # redundantly displaying this exact same merge list
7745 # again via _show_merge_list().
7746 self._displayed_list = mylist
7748 if verbosity is None:
7749 verbosity = ("--quiet" in self.myopts and 1 or \
7750 "--verbose" in self.myopts and 3 or 2)
7751 favorites_set = InternalPackageSet(favorites)
7752 oneshot = "--oneshot" in self.myopts or \
7753 "--onlydeps" in self.myopts
7754 columns = "--columns" in self.myopts
7759 counters = PackageCounters()
7761 if verbosity == 1 and "--verbose" not in self.myopts:
7762 def create_use_string(*args):
7765 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7767 is_new, reinst_flags,
7768 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7769 alphabetical=("--alphabetical" in self.myopts)):
7777 cur_iuse = set(cur_iuse)
7778 enabled_flags = cur_iuse.intersection(cur_use)
7779 removed_iuse = set(old_iuse).difference(cur_iuse)
7780 any_iuse = cur_iuse.union(old_iuse)
7781 any_iuse = list(any_iuse)
7783 for flag in any_iuse:
7786 reinst_flag = reinst_flags and flag in reinst_flags
7787 if flag in enabled_flags:
7789 if is_new or flag in old_use and \
7790 (all_flags or reinst_flag):
7791 flag_str = red(flag)
7792 elif flag not in old_iuse:
7793 flag_str = yellow(flag) + "%*"
7794 elif flag not in old_use:
7795 flag_str = green(flag) + "*"
7796 elif flag in removed_iuse:
7797 if all_flags or reinst_flag:
7798 flag_str = yellow("-" + flag) + "%"
7801 flag_str = "(" + flag_str + ")"
7802 removed.append(flag_str)
7805 if is_new or flag in old_iuse and \
7806 flag not in old_use and \
7807 (all_flags or reinst_flag):
7808 flag_str = blue("-" + flag)
7809 elif flag not in old_iuse:
7810 flag_str = yellow("-" + flag)
7811 if flag not in iuse_forced:
7813 elif flag in old_use:
7814 flag_str = green("-" + flag) + "*"
7816 if flag in iuse_forced:
7817 flag_str = "(" + flag_str + ")"
7819 enabled.append(flag_str)
7821 disabled.append(flag_str)
7824 ret = " ".join(enabled)
7826 ret = " ".join(enabled + disabled + removed)
7828 ret = '%s="%s" ' % (name, ret)
7831 repo_display = RepoDisplay(self.roots)
7835 mygraph = self.digraph.copy()
7837 # If there are any Uninstall instances, add the corresponding
7838 # blockers to the digraph (useful for --tree display).
7840 executed_uninstalls = set(node for node in mylist \
7841 if isinstance(node, Package) and node.operation == "unmerge")
7843 for uninstall in self._blocker_uninstalls.leaf_nodes():
7844 uninstall_parents = \
7845 self._blocker_uninstalls.parent_nodes(uninstall)
7846 if not uninstall_parents:
7849 # Remove the corresponding "nomerge" node and substitute
7850 # the Uninstall node.
7851 inst_pkg = self._pkg_cache[
7852 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7854 mygraph.remove(inst_pkg)
7859 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7861 inst_pkg_blockers = []
7863 # Break the Package -> Uninstall edges.
7864 mygraph.remove(uninstall)
7866 # Resolution of a package's blockers
7867 # depend on it's own uninstallation.
7868 for blocker in inst_pkg_blockers:
7869 mygraph.add(uninstall, blocker)
7871 # Expand Package -> Uninstall edges into
7872 # Package -> Blocker -> Uninstall edges.
7873 for blocker in uninstall_parents:
7874 mygraph.add(uninstall, blocker)
7875 for parent in self._blocker_parents.parent_nodes(blocker):
7876 if parent != inst_pkg:
7877 mygraph.add(blocker, parent)
7879 # If the uninstall task did not need to be executed because
7880 # of an upgrade, display Blocker -> Upgrade edges since the
7881 # corresponding Blocker -> Uninstall edges will not be shown.
7883 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7884 if upgrade_node is not None and \
7885 uninstall not in executed_uninstalls:
7886 for blocker in uninstall_parents:
7887 mygraph.add(upgrade_node, blocker)
7889 unsatisfied_blockers = []
7894 if isinstance(x, Blocker) and not x.satisfied:
7895 unsatisfied_blockers.append(x)
7898 if "--tree" in self.myopts:
7899 depth = len(tree_nodes)
7900 while depth and graph_key not in \
7901 mygraph.child_nodes(tree_nodes[depth-1]):
7904 tree_nodes = tree_nodes[:depth]
7905 tree_nodes.append(graph_key)
7906 display_list.append((x, depth, True))
7907 shown_edges.add((graph_key, tree_nodes[depth-1]))
7909 traversed_nodes = set() # prevent endless circles
7910 traversed_nodes.add(graph_key)
7911 def add_parents(current_node, ordered):
7913 # Do not traverse to parents if this node is an
7914 # an argument or a direct member of a set that has
7915 # been specified as an argument (system or world).
7916 if current_node not in self._set_nodes:
7917 parent_nodes = mygraph.parent_nodes(current_node)
7919 child_nodes = set(mygraph.child_nodes(current_node))
7920 selected_parent = None
7921 # First, try to avoid a direct cycle.
7922 for node in parent_nodes:
7923 if not isinstance(node, (Blocker, Package)):
7925 if node not in traversed_nodes and \
7926 node not in child_nodes:
7927 edge = (current_node, node)
7928 if edge in shown_edges:
7930 selected_parent = node
7932 if not selected_parent:
7933 # A direct cycle is unavoidable.
7934 for node in parent_nodes:
7935 if not isinstance(node, (Blocker, Package)):
7937 if node not in traversed_nodes:
7938 edge = (current_node, node)
7939 if edge in shown_edges:
7941 selected_parent = node
7944 shown_edges.add((current_node, selected_parent))
7945 traversed_nodes.add(selected_parent)
7946 add_parents(selected_parent, False)
7947 display_list.append((current_node,
7948 len(tree_nodes), ordered))
7949 tree_nodes.append(current_node)
7951 add_parents(graph_key, True)
7953 display_list.append((x, depth, True))
7954 mylist = display_list
7955 for x in unsatisfied_blockers:
7956 mylist.append((x, 0, True))
7958 last_merge_depth = 0
7959 for i in xrange(len(mylist)-1,-1,-1):
7960 graph_key, depth, ordered = mylist[i]
7961 if not ordered and depth == 0 and i > 0 \
7962 and graph_key == mylist[i-1][0] and \
7963 mylist[i-1][1] == 0:
7964 # An ordered node got a consecutive duplicate when the tree was
7968 if ordered and graph_key[-1] != "nomerge":
7969 last_merge_depth = depth
7971 if depth >= last_merge_depth or \
7972 i < len(mylist) - 1 and \
7973 depth >= mylist[i+1][1]:
7976 from portage import flatten
7977 from portage.dep import use_reduce, paren_reduce
7978 # files to fetch list - avoids counting a same file twice
7979 # in size display (verbose mode)
7982 # Use this set to detect when all the "repoadd" strings are "[0]"
7983 # and disable the entire repo display in this case.
7986 for mylist_index in xrange(len(mylist)):
7987 x, depth, ordered = mylist[mylist_index]
7991 portdb = self.trees[myroot]["porttree"].dbapi
7992 bindb = self.trees[myroot]["bintree"].dbapi
7993 vardb = self.trees[myroot]["vartree"].dbapi
7994 vartree = self.trees[myroot]["vartree"]
7995 pkgsettings = self.pkgsettings[myroot]
7998 indent = " " * depth
8000 if isinstance(x, Blocker):
8002 blocker_style = "PKG_BLOCKER_SATISFIED"
8003 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8005 blocker_style = "PKG_BLOCKER"
8006 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8008 counters.blocks += 1
8010 counters.blocks_satisfied += 1
8011 resolved = portage.key_expand(
8012 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8013 if "--columns" in self.myopts and "--quiet" in self.myopts:
8014 addl += " " + colorize(blocker_style, resolved)
8016 addl = "[%s %s] %s%s" % \
8017 (colorize(blocker_style, "blocks"),
8018 addl, indent, colorize(blocker_style, resolved))
8019 block_parents = self._blocker_parents.parent_nodes(x)
8020 block_parents = set([pnode[2] for pnode in block_parents])
8021 block_parents = ", ".join(block_parents)
8023 addl += colorize(blocker_style,
8024 " (\"%s\" is blocking %s)") % \
8025 (str(x.atom).lstrip("!"), block_parents)
8027 addl += colorize(blocker_style,
8028 " (is blocking %s)") % block_parents
8029 if isinstance(x, Blocker) and x.satisfied:
8034 blockers.append(addl)
8037 pkg_merge = ordered and pkg_status == "merge"
8038 if not pkg_merge and pkg_status == "merge":
8039 pkg_status = "nomerge"
8040 built = pkg_type != "ebuild"
8041 installed = pkg_type == "installed"
8043 metadata = pkg.metadata
8045 repo_name = metadata["repository"]
8046 if pkg_type == "ebuild":
8047 ebuild_path = portdb.findname(pkg_key)
8048 if not ebuild_path: # shouldn't happen
8049 raise portage.exception.PackageNotFound(pkg_key)
8050 repo_path_real = os.path.dirname(os.path.dirname(
8051 os.path.dirname(ebuild_path)))
8053 repo_path_real = portdb.getRepositoryPath(repo_name)
8054 pkg_use = list(pkg.use.enabled)
8056 restrict = flatten(use_reduce(paren_reduce(
8057 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8058 except portage.exception.InvalidDependString, e:
8059 if not pkg.installed:
8060 show_invalid_depstring_notice(x,
8061 pkg.metadata["RESTRICT"], str(e))
8065 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8066 "fetch" in restrict:
8069 counters.restrict_fetch += 1
8070 if portdb.fetch_check(pkg_key, pkg_use):
8073 counters.restrict_fetch_satisfied += 1
8075 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8076 #param is used for -u, where you still *do* want to see when something is being upgraded.
8079 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8080 if vardb.cpv_exists(pkg_key):
8081 addl=" "+yellow("R")+fetch+" "
8084 counters.reinst += 1
8085 elif pkg_status == "uninstall":
8086 counters.uninst += 1
8087 # filter out old-style virtual matches
8088 elif installed_versions and \
8089 portage.cpv_getkey(installed_versions[0]) == \
8090 portage.cpv_getkey(pkg_key):
8091 myinslotlist = vardb.match(pkg.slot_atom)
8092 # If this is the first install of a new-style virtual, we
8093 # need to filter out old-style virtual matches.
8094 if myinslotlist and \
8095 portage.cpv_getkey(myinslotlist[0]) != \
8096 portage.cpv_getkey(pkg_key):
8099 myoldbest = myinslotlist[:]
8101 if not portage.dep.cpvequal(pkg_key,
8102 portage.best([pkg_key] + myoldbest)):
8104 addl += turquoise("U")+blue("D")
8106 counters.downgrades += 1
8109 addl += turquoise("U") + " "
8111 counters.upgrades += 1
8113 # New slot, mark it new.
8114 addl = " " + green("NS") + fetch + " "
8115 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8117 counters.newslot += 1
8119 if "--changelog" in self.myopts:
8120 inst_matches = vardb.match(pkg.slot_atom)
8122 changelogs.extend(self.calc_changelog(
8123 portdb.findname(pkg_key),
8124 inst_matches[0], pkg_key))
8126 addl = " " + green("N") + " " + fetch + " "
8135 forced_flags = set()
8136 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8137 forced_flags.update(pkgsettings.useforce)
8138 forced_flags.update(pkgsettings.usemask)
8140 cur_use = [flag for flag in pkg.use.enabled \
8141 if flag in pkg.iuse.all]
8142 cur_iuse = sorted(pkg.iuse.all)
8144 if myoldbest and myinslotlist:
8145 previous_cpv = myoldbest[0]
8147 previous_cpv = pkg.cpv
8148 if vardb.cpv_exists(previous_cpv):
8149 old_iuse, old_use = vardb.aux_get(
8150 previous_cpv, ["IUSE", "USE"])
8151 old_iuse = list(set(
8152 filter_iuse_defaults(old_iuse.split())))
8154 old_use = old_use.split()
8161 old_use = [flag for flag in old_use if flag in old_iuse]
8163 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8165 use_expand.reverse()
8166 use_expand_hidden = \
8167 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8169 def map_to_use_expand(myvals, forcedFlags=False,
8173 for exp in use_expand:
8176 for val in myvals[:]:
8177 if val.startswith(exp.lower()+"_"):
8178 if val in forced_flags:
8179 forced[exp].add(val[len(exp)+1:])
8180 ret[exp].append(val[len(exp)+1:])
8183 forced["USE"] = [val for val in myvals \
8184 if val in forced_flags]
8186 for exp in use_expand_hidden:
8192 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8193 # are the only thing that triggered reinstallation.
8194 reinst_flags_map = {}
8195 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8196 reinst_expand_map = None
8197 if reinstall_for_flags:
8198 reinst_flags_map = map_to_use_expand(
8199 list(reinstall_for_flags), removeHidden=False)
8200 for k in list(reinst_flags_map):
8201 if not reinst_flags_map[k]:
8202 del reinst_flags_map[k]
8203 if not reinst_flags_map.get("USE"):
8204 reinst_expand_map = reinst_flags_map.copy()
8205 reinst_expand_map.pop("USE", None)
8206 if reinst_expand_map and \
8207 not set(reinst_expand_map).difference(
8209 use_expand_hidden = \
8210 set(use_expand_hidden).difference(
8213 cur_iuse_map, iuse_forced = \
8214 map_to_use_expand(cur_iuse, forcedFlags=True)
8215 cur_use_map = map_to_use_expand(cur_use)
8216 old_iuse_map = map_to_use_expand(old_iuse)
8217 old_use_map = map_to_use_expand(old_use)
8220 use_expand.insert(0, "USE")
8222 for key in use_expand:
8223 if key in use_expand_hidden:
8225 verboseadd += create_use_string(key.upper(),
8226 cur_iuse_map[key], iuse_forced[key],
8227 cur_use_map[key], old_iuse_map[key],
8228 old_use_map[key], is_new,
8229 reinst_flags_map.get(key))
8234 if pkg_type == "ebuild" and pkg_merge:
8236 myfilesdict = portdb.getfetchsizes(pkg_key,
8237 useflags=pkg_use, debug=self.edebug)
8238 except portage.exception.InvalidDependString, e:
8239 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8240 show_invalid_depstring_notice(x, src_uri, str(e))
8243 if myfilesdict is None:
8244 myfilesdict="[empty/missing/bad digest]"
8246 for myfetchfile in myfilesdict:
8247 if myfetchfile not in myfetchlist:
8248 mysize+=myfilesdict[myfetchfile]
8249 myfetchlist.append(myfetchfile)
8251 counters.totalsize += mysize
8252 verboseadd += format_size(mysize)
8255 # assign index for a previous version in the same slot
8256 has_previous = False
8257 repo_name_prev = None
8258 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8260 slot_matches = vardb.match(slot_atom)
8263 repo_name_prev = vardb.aux_get(slot_matches[0],
8266 # now use the data to generate output
8267 if pkg.installed or not has_previous:
8268 repoadd = repo_display.repoStr(repo_path_real)
8270 repo_path_prev = None
8272 repo_path_prev = portdb.getRepositoryPath(
8274 if repo_path_prev == repo_path_real:
8275 repoadd = repo_display.repoStr(repo_path_real)
8277 repoadd = "%s=>%s" % (
8278 repo_display.repoStr(repo_path_prev),
8279 repo_display.repoStr(repo_path_real))
8281 repoadd_set.add(repoadd)
8283 xs = [portage.cpv_getkey(pkg_key)] + \
8284 list(portage.catpkgsplit(pkg_key)[2:])
8291 if "COLUMNWIDTH" in self.settings:
8293 mywidth = int(self.settings["COLUMNWIDTH"])
8294 except ValueError, e:
8295 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8297 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8298 self.settings["COLUMNWIDTH"], noiselevel=-1)
8300 oldlp = mywidth - 30
8303 # Convert myoldbest from a list to a string.
8307 for pos, key in enumerate(myoldbest):
8308 key = portage.catpkgsplit(key)[2] + \
8309 "-" + portage.catpkgsplit(key)[3]
8310 if key[-3:] == "-r0":
8312 myoldbest[pos] = key
8313 myoldbest = blue("["+", ".join(myoldbest)+"]")
8316 root_config = self.roots[myroot]
8317 system_set = root_config.sets["system"]
8318 world_set = root_config.sets["world"]
8323 pkg_system = system_set.findAtomForPackage(pkg)
8324 pkg_world = world_set.findAtomForPackage(pkg)
8325 if not (oneshot or pkg_world) and \
8326 myroot == self.target_root and \
8327 favorites_set.findAtomForPackage(pkg):
8328 # Maybe it will be added to world now.
8329 if create_world_atom(pkg, favorites_set, root_config):
8331 except portage.exception.InvalidDependString:
8332 # This is reported elsewhere if relevant.
8335 def pkgprint(pkg_str):
8338 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8340 return colorize("PKG_MERGE_WORLD", pkg_str)
8342 return colorize("PKG_MERGE", pkg_str)
8343 elif pkg_status == "uninstall":
8344 return colorize("PKG_UNINSTALL", pkg_str)
8347 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8349 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8351 return colorize("PKG_NOMERGE", pkg_str)
8354 properties = flatten(use_reduce(paren_reduce(
8355 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8356 except portage.exception.InvalidDependString, e:
8357 if not pkg.installed:
8358 show_invalid_depstring_notice(pkg,
8359 pkg.metadata["PROPERTIES"], str(e))
8363 interactive = "interactive" in properties
8364 if interactive and pkg.operation == "merge":
8365 addl = colorize("WARN", "I") + addl[1:]
8367 counters.interactive += 1
8372 if "--columns" in self.myopts:
8373 if "--quiet" in self.myopts:
8374 myprint=addl+" "+indent+pkgprint(pkg_cp)
8375 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8376 myprint=myprint+myoldbest
8377 myprint=myprint+darkgreen("to "+x[1])
8381 myprint = "[%s] %s%s" % \
8382 (pkgprint(pkg_status.ljust(13)),
8383 indent, pkgprint(pkg.cp))
8385 myprint = "[%s %s] %s%s" % \
8386 (pkgprint(pkg.type_name), addl,
8387 indent, pkgprint(pkg.cp))
8388 if (newlp-nc_len(myprint)) > 0:
8389 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8390 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8391 if (oldlp-nc_len(myprint)) > 0:
8392 myprint=myprint+" "*(oldlp-nc_len(myprint))
8393 myprint=myprint+myoldbest
8394 myprint += darkgreen("to " + pkg.root)
8397 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8399 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8400 myprint += indent + pkgprint(pkg_key) + " " + \
8401 myoldbest + darkgreen("to " + myroot)
8403 if "--columns" in self.myopts:
8404 if "--quiet" in self.myopts:
8405 myprint=addl+" "+indent+pkgprint(pkg_cp)
8406 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8407 myprint=myprint+myoldbest
8411 myprint = "[%s] %s%s" % \
8412 (pkgprint(pkg_status.ljust(13)),
8413 indent, pkgprint(pkg.cp))
8415 myprint = "[%s %s] %s%s" % \
8416 (pkgprint(pkg.type_name), addl,
8417 indent, pkgprint(pkg.cp))
8418 if (newlp-nc_len(myprint)) > 0:
8419 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8420 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8421 if (oldlp-nc_len(myprint)) > 0:
8422 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8423 myprint += myoldbest
8426 myprint = "[%s] %s%s %s" % \
8427 (pkgprint(pkg_status.ljust(13)),
8428 indent, pkgprint(pkg.cpv),
8431 myprint = "[%s %s] %s%s %s" % \
8432 (pkgprint(pkg_type), addl, indent,
8433 pkgprint(pkg.cpv), myoldbest)
8435 if columns and pkg.operation == "uninstall":
8437 p.append((myprint, verboseadd, repoadd))
8439 if "--tree" not in self.myopts and \
8440 "--quiet" not in self.myopts and \
8441 not self._opts_no_restart.intersection(self.myopts) and \
8442 pkg.root == self._running_root.root and \
8443 portage.match_from_list(
8444 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8445 not vardb.cpv_exists(pkg.cpv) and \
8446 "--quiet" not in self.myopts:
8447 if mylist_index < len(mylist) - 1:
8448 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8449 p.append(colorize("WARN", " then resume the merge."))
8452 show_repos = repoadd_set and repoadd_set != set(["0"])
8455 if isinstance(x, basestring):
8456 out.write("%s\n" % (x,))
8459 myprint, verboseadd, repoadd = x
8462 myprint += " " + verboseadd
8464 if show_repos and repoadd:
8465 myprint += " " + teal("[%s]" % repoadd)
8467 out.write("%s\n" % (myprint,))
8476 sys.stdout.write(str(repo_display))
8478 if "--changelog" in self.myopts:
8480 for revision,text in changelogs:
8481 print bold('*'+revision)
8482 sys.stdout.write(text)
8487 def display_problems(self):
8489 Display problems with the dependency graph such as slot collisions.
8490 This is called internally by display() to show the problems _after_
8491 the merge list where it is most likely to be seen, but if display()
8492 is not going to be called then this method should be called explicitly
8493 to ensure that the user is notified of problems with the graph.
8495 All output goes to stderr, except for unsatisfied dependencies which
8496 go to stdout for parsing by programs such as autounmask.
8499 # Note that show_masked_packages() sends it's output to
8500 # stdout, and some programs such as autounmask parse the
8501 # output in cases when emerge bails out. However, when
8502 # show_masked_packages() is called for installed packages
8503 # here, the message is a warning that is more appropriate
8504 # to send to stderr, so temporarily redirect stdout to
8505 # stderr. TODO: Fix output code so there's a cleaner way
8506 # to redirect everything to stderr.
8511 sys.stdout = sys.stderr
8512 self._display_problems()
8518 # This goes to stdout for parsing by programs like autounmask.
8519 for pargs, kwargs in self._unsatisfied_deps_for_display:
8520 self._show_unsatisfied_dep(*pargs, **kwargs)
8522 def _display_problems(self):
8523 if self._circular_deps_for_display is not None:
8524 self._show_circular_deps(
8525 self._circular_deps_for_display)
8527 # The user is only notified of a slot conflict if
8528 # there are no unresolvable blocker conflicts.
8529 if self._unsatisfied_blockers_for_display is not None:
8530 self._show_unsatisfied_blockers(
8531 self._unsatisfied_blockers_for_display)
8533 self._show_slot_collision_notice()
8535 # TODO: Add generic support for "set problem" handlers so that
8536 # the below warnings aren't special cases for world only.
8538 if self._missing_args:
8539 world_problems = False
8540 if "world" in self._sets:
8541 # Filter out indirect members of world (from nested sets)
8542 # since only direct members of world are desired here.
8543 world_set = self.roots[self.target_root].sets["world"]
8544 for arg, atom in self._missing_args:
8545 if arg.name == "world" and atom in world_set:
8546 world_problems = True
8550 sys.stderr.write("\n!!! Problems have been " + \
8551 "detected with your world file\n")
8552 sys.stderr.write("!!! Please run " + \
8553 green("emaint --check world")+"\n\n")
8555 if self._missing_args:
8556 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8557 " Ebuilds for the following packages are either all\n")
8558 sys.stderr.write(colorize("BAD", "!!!") + \
8559 " masked or don't exist:\n")
8560 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8561 self._missing_args) + "\n")
8563 if self._pprovided_args:
8565 for arg, atom in self._pprovided_args:
8566 if isinstance(arg, SetArg):
8568 arg_atom = (atom, atom)
8571 arg_atom = (arg.arg, atom)
8572 refs = arg_refs.setdefault(arg_atom, [])
8573 if parent not in refs:
8576 msg.append(bad("\nWARNING: "))
8577 if len(self._pprovided_args) > 1:
8578 msg.append("Requested packages will not be " + \
8579 "merged because they are listed in\n")
8581 msg.append("A requested package will not be " + \
8582 "merged because it is listed in\n")
8583 msg.append("package.provided:\n\n")
8584 problems_sets = set()
8585 for (arg, atom), refs in arg_refs.iteritems():
8588 problems_sets.update(refs)
8590 ref_string = ", ".join(["'%s'" % name for name in refs])
8591 ref_string = " pulled in by " + ref_string
8592 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8594 if "world" in problems_sets:
8595 msg.append("This problem can be solved in one of the following ways:\n\n")
8596 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8597 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8598 msg.append(" C) Remove offending entries from package.provided.\n\n")
8599 msg.append("The best course of action depends on the reason that an offending\n")
8600 msg.append("package.provided entry exists.\n\n")
8601 sys.stderr.write("".join(msg))
8603 masked_packages = []
8604 for pkg in self._masked_installed:
8605 root_config = pkg.root_config
8606 pkgsettings = self.pkgsettings[pkg.root]
8607 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8608 masked_packages.append((root_config, pkgsettings,
8609 pkg.cpv, pkg.metadata, mreasons))
8611 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8612 " The following installed packages are masked:\n")
8613 show_masked_packages(masked_packages)
8617 def calc_changelog(self,ebuildpath,current,next):
8618 if ebuildpath == None or not os.path.exists(ebuildpath):
8620 current = '-'.join(portage.catpkgsplit(current)[1:])
8621 if current.endswith('-r0'):
8622 current = current[:-3]
8623 next = '-'.join(portage.catpkgsplit(next)[1:])
8624 if next.endswith('-r0'):
8626 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8628 changelog = open(changelogpath).read()
8629 except SystemExit, e:
8630 raise # Needed else can't exit
8633 divisions = self.find_changelog_tags(changelog)
8634 #print 'XX from',current,'to',next
8635 #for div,text in divisions: print 'XX',div
8636 # skip entries for all revisions above the one we are about to emerge
8637 for i in range(len(divisions)):
8638 if divisions[i][0]==next:
8639 divisions = divisions[i:]
8641 # find out how many entries we are going to display
8642 for i in range(len(divisions)):
8643 if divisions[i][0]==current:
8644 divisions = divisions[:i]
8647 # couldnt find the current revision in the list. display nothing
8651 def find_changelog_tags(self,changelog):
8655 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8657 if release is not None:
8658 divs.append((release,changelog))
8660 if release is not None:
8661 divs.append((release,changelog[:match.start()]))
8662 changelog = changelog[match.end():]
8663 release = match.group(1)
8664 if release.endswith('.ebuild'):
8665 release = release[:-7]
8666 if release.endswith('-r0'):
8667 release = release[:-3]
8669 def saveNomergeFavorites(self):
8670 """Find atoms in favorites that are not in the mergelist and add them
8671 to the world file if necessary."""
8672 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8673 "--oneshot", "--onlydeps", "--pretend"):
8674 if x in self.myopts:
8676 root_config = self.roots[self.target_root]
8677 world_set = root_config.sets["world"]
8679 world_locked = False
8680 if hasattr(world_set, "lock"):
8684 if hasattr(world_set, "load"):
8685 world_set.load() # maybe it's changed on disk
8687 args_set = self._sets["args"]
8688 portdb = self.trees[self.target_root]["porttree"].dbapi
8689 added_favorites = set()
8690 for x in self._set_nodes:
8691 pkg_type, root, pkg_key, pkg_status = x
8692 if pkg_status != "nomerge":
8696 myfavkey = create_world_atom(x, args_set, root_config)
8698 if myfavkey in added_favorites:
8700 added_favorites.add(myfavkey)
8701 except portage.exception.InvalidDependString, e:
8702 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8703 (pkg_key, str(e)), noiselevel=-1)
8704 writemsg("!!! see '%s'\n\n" % os.path.join(
8705 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8708 for k in self._sets:
8709 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8714 all_added.append(SETPREFIX + k)
8715 all_added.extend(added_favorites)
8718 print ">>> Recording %s in \"world\" favorites file..." % \
8719 colorize("INFORM", str(a))
8721 world_set.update(all_added)
8726 def loadResumeCommand(self, resume_data, skip_masked=False):
8728 Add a resume command to the graph and validate it in the process. This
8729 will raise a PackageNotFound exception if a package is not available.
8732 if not isinstance(resume_data, dict):
8735 mergelist = resume_data.get("mergelist")
8736 if not isinstance(mergelist, list):
8739 fakedb = self.mydbapi
8741 serialized_tasks = []
8744 if not (isinstance(x, list) and len(x) == 4):
8746 pkg_type, myroot, pkg_key, action = x
8747 if pkg_type not in self.pkg_tree_map:
8749 if action != "merge":
8751 tree_type = self.pkg_tree_map[pkg_type]
8752 mydb = trees[myroot][tree_type].dbapi
8753 db_keys = list(self._trees_orig[myroot][
8754 tree_type].dbapi._aux_cache_keys)
8756 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8758 # It does no exist or it is corrupt.
8759 if action == "uninstall":
8761 raise portage.exception.PackageNotFound(pkg_key)
8762 installed = action == "uninstall"
8763 built = pkg_type != "ebuild"
8764 root_config = self.roots[myroot]
8765 pkg = Package(built=built, cpv=pkg_key,
8766 installed=installed, metadata=metadata,
8767 operation=action, root_config=root_config,
8769 if pkg_type == "ebuild":
8770 pkgsettings = self.pkgsettings[myroot]
8771 pkgsettings.setcpv(pkg)
8772 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8773 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8774 self._pkg_cache[pkg] = pkg
8776 root_config = self.roots[pkg.root]
8777 if "merge" == pkg.operation and \
8778 not visible(root_config.settings, pkg):
8780 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8782 self._unsatisfied_deps_for_display.append(
8783 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8785 fakedb[myroot].cpv_inject(pkg)
8786 serialized_tasks.append(pkg)
8787 self.spinner.update()
8789 if self._unsatisfied_deps_for_display:
8792 if not serialized_tasks or "--nodeps" in self.myopts:
8793 self._serialized_tasks_cache = serialized_tasks
8794 self._scheduler_graph = self.digraph
8796 self._select_package = self._select_pkg_from_graph
8797 self.myparams.add("selective")
8798 # Always traverse deep dependencies in order to account for
8799 # potentially unsatisfied dependencies of installed packages.
8800 # This is necessary for correct --keep-going or --resume operation
8801 # in case a package from a group of circularly dependent packages
8802 # fails. In this case, a package which has recently been installed
8803 # may have an unsatisfied circular dependency (pulled in by
8804 # PDEPEND, for example). So, even though a package is already
8805 # installed, it may not have all of it's dependencies satisfied, so
8806 # it may not be usable. If such a package is in the subgraph of
8807 # deep depenedencies of a scheduled build, that build needs to
8808 # be cancelled. In order for this type of situation to be
8809 # recognized, deep traversal of dependencies is required.
8810 self.myparams.add("deep")
8812 favorites = resume_data.get("favorites")
8813 args_set = self._sets["args"]
8814 if isinstance(favorites, list):
8815 args = self._load_favorites(favorites)
8819 for task in serialized_tasks:
8820 if isinstance(task, Package) and \
8821 task.operation == "merge":
8822 if not self._add_pkg(task, None):
8825 # Packages for argument atoms need to be explicitly
8826 # added via _add_pkg() so that they are included in the
8827 # digraph (needed at least for --tree display).
8829 for atom in arg.set:
8830 pkg, existing_node = self._select_package(
8831 arg.root_config.root, atom)
8832 if existing_node is None and \
8834 if not self._add_pkg(pkg, Dependency(atom=atom,
8835 root=pkg.root, parent=arg)):
8838 # Allow unsatisfied deps here to avoid showing a masking
8839 # message for an unsatisfied dep that isn't necessarily
8841 if not self._create_graph(allow_unsatisfied=True):
8844 unsatisfied_deps = []
8845 for dep in self._unsatisfied_deps:
8846 if not isinstance(dep.parent, Package):
8848 if dep.parent.operation == "merge":
8849 unsatisfied_deps.append(dep)
8852 # For unsatisfied deps of installed packages, only account for
8853 # them if they are in the subgraph of dependencies of a package
8854 # which is scheduled to be installed.
8855 unsatisfied_install = False
8857 dep_stack = self.digraph.parent_nodes(dep.parent)
8859 node = dep_stack.pop()
8860 if not isinstance(node, Package):
8862 if node.operation == "merge":
8863 unsatisfied_install = True
8865 if node in traversed:
8868 dep_stack.extend(self.digraph.parent_nodes(node))
8870 if unsatisfied_install:
8871 unsatisfied_deps.append(dep)
8873 if masked_tasks or unsatisfied_deps:
8874 # This probably means that a required package
8875 # was dropped via --skipfirst. It makes the
8876 # resume list invalid, so convert it to a
8877 # UnsatisfiedResumeDep exception.
8878 raise self.UnsatisfiedResumeDep(self,
8879 masked_tasks + unsatisfied_deps)
8880 self._serialized_tasks_cache = None
8883 except self._unknown_internal_error:
8888 def _load_favorites(self, favorites):
8890 Use a list of favorites to resume state from a
8891 previous select_files() call. This creates similar
8892 DependencyArg instances to those that would have
8893 been created by the original select_files() call.
8894 This allows Package instances to be matched with
8895 DependencyArg instances during graph creation.
8897 root_config = self.roots[self.target_root]
8898 getSetAtoms = root_config.setconfig.getSetAtoms
8899 sets = root_config.sets
8902 if not isinstance(x, basestring):
8904 if x in ("system", "world"):
8906 if x.startswith(SETPREFIX):
8907 s = x[len(SETPREFIX):]
8912 # Recursively expand sets so that containment tests in
8913 # self._get_parent_sets() properly match atoms in nested
8914 # sets (like if world contains system).
8915 expanded_set = InternalPackageSet(
8916 initial_atoms=getSetAtoms(s))
8917 self._sets[s] = expanded_set
8918 args.append(SetArg(arg=x, set=expanded_set,
8919 root_config=root_config))
8921 if not portage.isvalidatom(x):
8923 args.append(AtomArg(arg=x, atom=x,
8924 root_config=root_config))
8926 self._set_args(args)
8929 class UnsatisfiedResumeDep(portage.exception.PortageException):
8931 A dependency of a resume list is not installed. This
8932 can occur when a required package is dropped from the
8933 merge list via --skipfirst.
8935 def __init__(self, depgraph, value):
8936 portage.exception.PortageException.__init__(self, value)
8937 self.depgraph = depgraph
8939 class _internal_exception(portage.exception.PortageException):
8940 def __init__(self, value=""):
8941 portage.exception.PortageException.__init__(self, value)
8943 class _unknown_internal_error(_internal_exception):
8945 Used by the depgraph internally to terminate graph creation.
8946 The specific reason for the failure should have been dumped
8947 to stderr, unfortunately, the exact reason for the failure
8951 class _serialize_tasks_retry(_internal_exception):
8953 This is raised by the _serialize_tasks() method when it needs to
8954 be called again for some reason. The only case that it's currently
8955 used for is when neglected dependencies need to be added to the
8956 graph in order to avoid making a potentially unsafe decision.
8959 class _dep_check_composite_db(portage.dbapi):
8961 A dbapi-like interface that is optimized for use in dep_check() calls.
8962 This is built on top of the existing depgraph package selection logic.
8963 Some packages that have been added to the graph may be masked from this
8964 view in order to influence the atom preference selection that occurs
8967 def __init__(self, depgraph, root):
8968 portage.dbapi.__init__(self)
8969 self._depgraph = depgraph
8971 self._match_cache = {}
8972 self._cpv_pkg_map = {}
8974 def _clear_cache(self):
8975 self._match_cache.clear()
8976 self._cpv_pkg_map.clear()
8978 def match(self, atom):
8979 ret = self._match_cache.get(atom)
8984 atom = self._dep_expand(atom)
8985 pkg, existing = self._depgraph._select_package(self._root, atom)
8989 # Return the highest available from select_package() as well as
8990 # any matching slots in the graph db.
8992 slots.add(pkg.metadata["SLOT"])
8993 atom_cp = portage.dep_getkey(atom)
8994 if pkg.cp.startswith("virtual/"):
8995 # For new-style virtual lookahead that occurs inside
8996 # dep_check(), examine all slots. This is needed
8997 # so that newer slots will not unnecessarily be pulled in
8998 # when a satisfying lower slot is already installed. For
8999 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9000 # there's no need to pull in a newer slot to satisfy a
9001 # virtual/jdk dependency.
9002 for db, pkg_type, built, installed, db_keys in \
9003 self._depgraph._filtered_trees[self._root]["dbs"]:
9004 for cpv in db.match(atom):
9005 if portage.cpv_getkey(cpv) != pkg.cp:
9007 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9009 if self._visible(pkg):
9010 self._cpv_pkg_map[pkg.cpv] = pkg
9012 slots.remove(pkg.metadata["SLOT"])
9014 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9015 pkg, existing = self._depgraph._select_package(
9016 self._root, slot_atom)
9019 if not self._visible(pkg):
9021 self._cpv_pkg_map[pkg.cpv] = pkg
9024 self._cpv_sort_ascending(ret)
9025 self._match_cache[orig_atom] = ret
9028 def _visible(self, pkg):
9029 if pkg.installed and "selective" not in self._depgraph.myparams:
9031 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9032 except (StopIteration, portage.exception.InvalidDependString):
9039 self._depgraph.pkgsettings[pkg.root], pkg):
9041 except portage.exception.InvalidDependString:
9043 in_graph = self._depgraph._slot_pkg_map[
9044 self._root].get(pkg.slot_atom)
9045 if in_graph is None:
9046 # Mask choices for packages which are not the highest visible
9047 # version within their slot (since they usually trigger slot
9049 highest_visible, in_graph = self._depgraph._select_package(
9050 self._root, pkg.slot_atom)
9051 if pkg != highest_visible:
9053 elif in_graph != pkg:
9054 # Mask choices for packages that would trigger a slot
9055 # conflict with a previously selected package.
9059 def _dep_expand(self, atom):
9061 This is only needed for old installed packages that may
9062 contain atoms that are not fully qualified with a specific
9063 category. Emulate the cpv_expand() function that's used by
9064 dbapi.match() in cases like this. If there are multiple
9065 matches, it's often due to a new-style virtual that has
9066 been added, so try to filter those out to avoid raising
9069 root_config = self._depgraph.roots[self._root]
9071 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9072 if len(expanded_atoms) > 1:
9073 non_virtual_atoms = []
9074 for x in expanded_atoms:
9075 if not portage.dep_getkey(x).startswith("virtual/"):
9076 non_virtual_atoms.append(x)
9077 if len(non_virtual_atoms) == 1:
9078 expanded_atoms = non_virtual_atoms
9079 if len(expanded_atoms) > 1:
9080 # compatible with portage.cpv_expand()
9081 raise portage.exception.AmbiguousPackageName(
9082 [portage.dep_getkey(x) for x in expanded_atoms])
9084 atom = expanded_atoms[0]
9086 null_atom = insert_category_into_atom(atom, "null")
9087 null_cp = portage.dep_getkey(null_atom)
9088 cat, atom_pn = portage.catsplit(null_cp)
9089 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9091 # Allow the resolver to choose which virtual.
9092 atom = insert_category_into_atom(atom, "virtual")
9094 atom = insert_category_into_atom(atom, "null")
9097 def aux_get(self, cpv, wants):
9098 metadata = self._cpv_pkg_map[cpv].metadata
9099 return [metadata.get(x, "") for x in wants]
9101 class RepoDisplay(object):
9102 def __init__(self, roots):
9103 self._shown_repos = {}
9104 self._unknown_repo = False
9106 for root_config in roots.itervalues():
9107 portdir = root_config.settings.get("PORTDIR")
9109 repo_paths.add(portdir)
9110 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9112 repo_paths.update(overlays.split())
9113 repo_paths = list(repo_paths)
9114 self._repo_paths = repo_paths
9115 self._repo_paths_real = [ os.path.realpath(repo_path) \
9116 for repo_path in repo_paths ]
9118 # pre-allocate index for PORTDIR so that it always has index 0.
9119 for root_config in roots.itervalues():
9120 portdb = root_config.trees["porttree"].dbapi
9121 portdir = portdb.porttree_root
9123 self.repoStr(portdir)
9125 def repoStr(self, repo_path_real):
9128 real_index = self._repo_paths_real.index(repo_path_real)
9129 if real_index == -1:
9131 self._unknown_repo = True
9133 shown_repos = self._shown_repos
9134 repo_paths = self._repo_paths
9135 repo_path = repo_paths[real_index]
9136 index = shown_repos.get(repo_path)
9138 index = len(shown_repos)
9139 shown_repos[repo_path] = index
9145 shown_repos = self._shown_repos
9146 unknown_repo = self._unknown_repo
9147 if shown_repos or self._unknown_repo:
9148 output.append("Portage tree and overlays:\n")
9149 show_repo_paths = list(shown_repos)
9150 for repo_path, repo_index in shown_repos.iteritems():
9151 show_repo_paths[repo_index] = repo_path
9153 for index, repo_path in enumerate(show_repo_paths):
9154 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9156 output.append(" "+teal("[?]") + \
9157 " indicates that the source repository could not be determined\n")
9158 return "".join(output)
9160 class PackageCounters(object):
9170 self.blocks_satisfied = 0
9172 self.restrict_fetch = 0
9173 self.restrict_fetch_satisfied = 0
9174 self.interactive = 0
9177 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9180 myoutput.append("Total: %s package" % total_installs)
9181 if total_installs != 1:
9182 myoutput.append("s")
9183 if total_installs != 0:
9184 myoutput.append(" (")
9185 if self.upgrades > 0:
9186 details.append("%s upgrade" % self.upgrades)
9187 if self.upgrades > 1:
9189 if self.downgrades > 0:
9190 details.append("%s downgrade" % self.downgrades)
9191 if self.downgrades > 1:
9194 details.append("%s new" % self.new)
9195 if self.newslot > 0:
9196 details.append("%s in new slot" % self.newslot)
9197 if self.newslot > 1:
9200 details.append("%s reinstall" % self.reinst)
9204 details.append("%s uninstall" % self.uninst)
9207 if self.interactive > 0:
9208 details.append("%s %s" % (self.interactive,
9209 colorize("WARN", "interactive")))
9210 myoutput.append(", ".join(details))
9211 if total_installs != 0:
9212 myoutput.append(")")
9213 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9214 if self.restrict_fetch:
9215 myoutput.append("\nFetch Restriction: %s package" % \
9216 self.restrict_fetch)
9217 if self.restrict_fetch > 1:
9218 myoutput.append("s")
9219 if self.restrict_fetch_satisfied < self.restrict_fetch:
9220 myoutput.append(bad(" (%s unsatisfied)") % \
9221 (self.restrict_fetch - self.restrict_fetch_satisfied))
9223 myoutput.append("\nConflict: %s block" % \
9226 myoutput.append("s")
9227 if self.blocks_satisfied < self.blocks:
9228 myoutput.append(bad(" (%s unsatisfied)") % \
9229 (self.blocks - self.blocks_satisfied))
9230 return "".join(myoutput)
9232 class PollSelectAdapter(PollConstants):
9235 Use select to emulate a poll object, for
9236 systems that don't support poll().
9240 self._registered = {}
9241 self._select_args = [[], [], []]
9243 def register(self, fd, *args):
9245 Only POLLIN is currently supported!
9249 "register expected at most 2 arguments, got " + \
9250 repr(1 + len(args)))
9252 eventmask = PollConstants.POLLIN | \
9253 PollConstants.POLLPRI | PollConstants.POLLOUT
9257 self._registered[fd] = eventmask
9258 self._select_args = None
9260 def unregister(self, fd):
9261 self._select_args = None
9262 del self._registered[fd]
9264 def poll(self, *args):
9267 "poll expected at most 2 arguments, got " + \
9268 repr(1 + len(args)))
9274 select_args = self._select_args
9275 if select_args is None:
9276 select_args = [self._registered.keys(), [], []]
9278 if timeout is not None:
9279 select_args = select_args[:]
9280 # Translate poll() timeout args to select() timeout args:
9282 # | units | value(s) for indefinite block
9283 # ---------|--------------|------------------------------
9284 # poll | milliseconds | omitted, negative, or None
9285 # ---------|--------------|------------------------------
9286 # select | seconds | omitted
9287 # ---------|--------------|------------------------------
9289 if timeout is not None and timeout < 0:
9291 if timeout is not None:
9292 select_args.append(timeout / 1000)
9294 select_events = select.select(*select_args)
9296 for fd in select_events[0]:
9297 poll_events.append((fd, PollConstants.POLLIN))
9300 class SequentialTaskQueue(SlotObject):
9302 __slots__ = ("max_jobs", "running_tasks") + \
9303 ("_dirty", "_scheduling", "_task_queue")
9305 def __init__(self, **kwargs):
9306 SlotObject.__init__(self, **kwargs)
9307 self._task_queue = deque()
9308 self.running_tasks = set()
9309 if self.max_jobs is None:
9313 def add(self, task):
9314 self._task_queue.append(task)
9317 def addFront(self, task):
9318 self._task_queue.appendleft(task)
9329 if self._scheduling:
9330 # Ignore any recursive schedule() calls triggered via
9331 # self._task_exit().
9334 self._scheduling = True
9336 task_queue = self._task_queue
9337 running_tasks = self.running_tasks
9338 max_jobs = self.max_jobs
9339 state_changed = False
9341 while task_queue and \
9342 (max_jobs is True or len(running_tasks) < max_jobs):
9343 task = task_queue.popleft()
9344 cancelled = getattr(task, "cancelled", None)
9346 running_tasks.add(task)
9347 task.addExitListener(self._task_exit)
9349 state_changed = True
9352 self._scheduling = False
9354 return state_changed
9356 def _task_exit(self, task):
9358 Since we can always rely on exit listeners being called, the set of
9359 running tasks is always pruned automatically and there is never any need
9360 to actively prune it.
9362 self.running_tasks.remove(task)
9363 if self._task_queue:
9367 self._task_queue.clear()
9368 running_tasks = self.running_tasks
9369 while running_tasks:
9370 task = running_tasks.pop()
9371 task.removeExitListener(self._task_exit)
9375 def __nonzero__(self):
9376 return bool(self._task_queue or self.running_tasks)
9379 return len(self._task_queue) + len(self.running_tasks)
9381 _can_poll_device = None
9383 def can_poll_device():
9385 Test if it's possible to use poll() on a device such as a pty. This
9386 is known to fail on Darwin.
9388 @returns: True if poll() on a device succeeds, False otherwise.
9391 global _can_poll_device
9392 if _can_poll_device is not None:
9393 return _can_poll_device
9395 if not hasattr(select, "poll"):
9396 _can_poll_device = False
9397 return _can_poll_device
9400 dev_null = open('/dev/null', 'rb')
9402 _can_poll_device = False
9403 return _can_poll_device
9406 p.register(dev_null.fileno(), PollConstants.POLLIN)
9408 invalid_request = False
9409 for f, event in p.poll():
9410 if event & PollConstants.POLLNVAL:
9411 invalid_request = True
9415 _can_poll_device = not invalid_request
9416 return _can_poll_device
9418 def create_poll_instance():
9420 Create an instance of select.poll, or an instance of
9421 PollSelectAdapter there is no poll() implementation or
9422 it is broken somehow.
9424 if can_poll_device():
9425 return select.poll()
9426 return PollSelectAdapter()
9428 getloadavg = getattr(os, "getloadavg", None)
9429 if getloadavg is None:
9432 Uses /proc/loadavg to emulate os.getloadavg().
9433 Raises OSError if the load average was unobtainable.
9436 loadavg_str = open('/proc/loadavg').readline()
9438 # getloadavg() is only supposed to raise OSError, so convert
9439 raise OSError('unknown')
9440 loadavg_split = loadavg_str.split()
9441 if len(loadavg_split) < 3:
9442 raise OSError('unknown')
9446 loadavg_floats.append(float(loadavg_split[i]))
9448 raise OSError('unknown')
9449 return tuple(loadavg_floats)
9451 class PollScheduler(object):
9453 class _sched_iface_class(SlotObject):
9454 __slots__ = ("register", "schedule", "unregister")
9458 self._max_load = None
9460 self._poll_event_queue = []
9461 self._poll_event_handlers = {}
9462 self._poll_event_handler_ids = {}
9463 # Increment id for each new handler.
9464 self._event_handler_id = 0
9465 self._poll_obj = create_poll_instance()
9466 self._scheduling = False
9468 def _schedule(self):
9470 Calls _schedule_tasks() and automatically returns early from
9471 any recursive calls to this method that the _schedule_tasks()
9472 call might trigger. This makes _schedule() safe to call from
9473 inside exit listeners.
9475 if self._scheduling:
9477 self._scheduling = True
9479 return self._schedule_tasks()
9481 self._scheduling = False
9483 def _running_job_count(self):
9486 def _can_add_job(self):
9487 max_jobs = self._max_jobs
9488 max_load = self._max_load
9490 if self._max_jobs is not True and \
9491 self._running_job_count() >= self._max_jobs:
9494 if max_load is not None and \
9495 (max_jobs is True or max_jobs > 1) and \
9496 self._running_job_count() >= 1:
9498 avg1, avg5, avg15 = getloadavg()
9502 if avg1 >= max_load:
9507 def _poll(self, timeout=None):
9509 All poll() calls pass through here. The poll events
9510 are added directly to self._poll_event_queue.
9511 In order to avoid endless blocking, this raises
9512 StopIteration if timeout is None and there are
9513 no file descriptors to poll.
9515 if not self._poll_event_handlers:
9517 if timeout is None and \
9518 not self._poll_event_handlers:
9519 raise StopIteration(
9520 "timeout is None and there are no poll() event handlers")
9522 # The following error is known to occur with Linux kernel versions
9525 # select.error: (4, 'Interrupted system call')
9527 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9528 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9529 # without any events.
9532 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9534 except select.error, e:
9535 writemsg_level("\n!!! select error: %s\n" % (e,),
9536 level=logging.ERROR, noiselevel=-1)
9538 if timeout is not None:
9541 def _next_poll_event(self, timeout=None):
9543 Since the _schedule_wait() loop is called by event
9544 handlers from _poll_loop(), maintain a central event
9545 queue for both of them to share events from a single
9546 poll() call. In order to avoid endless blocking, this
9547 raises StopIteration if timeout is None and there are
9548 no file descriptors to poll.
9550 if not self._poll_event_queue:
9552 return self._poll_event_queue.pop()
9554 def _poll_loop(self):
9556 event_handlers = self._poll_event_handlers
9557 event_handled = False
9560 while event_handlers:
9561 f, event = self._next_poll_event()
9562 handler, reg_id = event_handlers[f]
9564 event_handled = True
9565 except StopIteration:
9566 event_handled = True
9568 if not event_handled:
9569 raise AssertionError("tight loop")
9571 def _schedule_yield(self):
9573 Schedule for a short period of time chosen by the scheduler based
9574 on internal state. Synchronous tasks should call this periodically
9575 in order to allow the scheduler to service pending poll events. The
9576 scheduler will call poll() exactly once, without blocking, and any
9577 resulting poll events will be serviced.
9579 event_handlers = self._poll_event_handlers
9582 if not event_handlers:
9583 return bool(events_handled)
9585 if not self._poll_event_queue:
9589 while event_handlers and self._poll_event_queue:
9590 f, event = self._next_poll_event()
9591 handler, reg_id = event_handlers[f]
9594 except StopIteration:
9597 return bool(events_handled)
9599 def _register(self, f, eventmask, handler):
9602 @return: A unique registration id, for use in schedule() or
9605 if f in self._poll_event_handlers:
9606 raise AssertionError("fd %d is already registered" % f)
9607 self._event_handler_id += 1
9608 reg_id = self._event_handler_id
9609 self._poll_event_handler_ids[reg_id] = f
9610 self._poll_event_handlers[f] = (handler, reg_id)
9611 self._poll_obj.register(f, eventmask)
9614 def _unregister(self, reg_id):
9615 f = self._poll_event_handler_ids[reg_id]
9616 self._poll_obj.unregister(f)
9617 del self._poll_event_handlers[f]
9618 del self._poll_event_handler_ids[reg_id]
9620 def _schedule_wait(self, wait_ids):
9622 Schedule until wait_id is not longer registered
9625 @param wait_id: a task id to wait for
9627 event_handlers = self._poll_event_handlers
9628 handler_ids = self._poll_event_handler_ids
9629 event_handled = False
9631 if isinstance(wait_ids, int):
9632 wait_ids = frozenset([wait_ids])
9635 while wait_ids.intersection(handler_ids):
9636 f, event = self._next_poll_event()
9637 handler, reg_id = event_handlers[f]
9639 event_handled = True
9640 except StopIteration:
9641 event_handled = True
9643 return event_handled
9645 class QueueScheduler(PollScheduler):
9648 Add instances of SequentialTaskQueue and then call run(). The
9649 run() method returns when no tasks remain.
9652 def __init__(self, max_jobs=None, max_load=None):
9653 PollScheduler.__init__(self)
9655 if max_jobs is None:
9658 self._max_jobs = max_jobs
9659 self._max_load = max_load
9660 self.sched_iface = self._sched_iface_class(
9661 register=self._register,
9662 schedule=self._schedule_wait,
9663 unregister=self._unregister)
9666 self._schedule_listeners = []
9669 self._queues.append(q)
9671 def remove(self, q):
9672 self._queues.remove(q)
9676 while self._schedule():
9679 while self._running_job_count():
9682 def _schedule_tasks(self):
9685 @returns: True if there may be remaining tasks to schedule,
9688 while self._can_add_job():
9689 n = self._max_jobs - self._running_job_count()
9693 if not self._start_next_job(n):
9696 for q in self._queues:
9701 def _running_job_count(self):
9703 for q in self._queues:
9704 job_count += len(q.running_tasks)
9705 self._jobs = job_count
9708 def _start_next_job(self, n=1):
9710 for q in self._queues:
9711 initial_job_count = len(q.running_tasks)
9713 final_job_count = len(q.running_tasks)
9714 if final_job_count > initial_job_count:
9715 started_count += (final_job_count - initial_job_count)
9716 if started_count >= n:
9718 return started_count
9720 class TaskScheduler(object):
9723 A simple way to handle scheduling of AsynchrousTask instances. Simply
9724 add tasks and call run(). The run() method returns when no tasks remain.
9727 def __init__(self, max_jobs=None, max_load=None):
9728 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9729 self._scheduler = QueueScheduler(
9730 max_jobs=max_jobs, max_load=max_load)
9731 self.sched_iface = self._scheduler.sched_iface
9732 self.run = self._scheduler.run
9733 self._scheduler.add(self._queue)
9735 def add(self, task):
9736 self._queue.add(task)
9738 class JobStatusDisplay(object):
9740 _bound_properties = ("curval", "failed", "running")
9741 _jobs_column_width = 48
9743 # Don't update the display unless at least this much
9744 # time has passed, in units of seconds.
9745 _min_display_latency = 2
9747 _default_term_codes = {
9753 _termcap_name_map = {
9754 'carriage_return' : 'cr',
9759 def __init__(self, out=sys.stdout, quiet=False):
9760 object.__setattr__(self, "out", out)
9761 object.__setattr__(self, "quiet", quiet)
9762 object.__setattr__(self, "maxval", 0)
9763 object.__setattr__(self, "merges", 0)
9764 object.__setattr__(self, "_changed", False)
9765 object.__setattr__(self, "_displayed", False)
9766 object.__setattr__(self, "_last_display_time", 0)
9767 object.__setattr__(self, "width", 80)
9770 isatty = hasattr(out, "isatty") and out.isatty()
9771 object.__setattr__(self, "_isatty", isatty)
9772 if not isatty or not self._init_term():
9774 for k, capname in self._termcap_name_map.iteritems():
9775 term_codes[k] = self._default_term_codes[capname]
9776 object.__setattr__(self, "_term_codes", term_codes)
9777 encoding = sys.getdefaultencoding()
9778 for k, v in self._term_codes.items():
9779 if not isinstance(v, basestring):
9780 self._term_codes[k] = v.decode(encoding, 'replace')
9782 def _init_term(self):
9784 Initialize term control codes.
9786 @returns: True if term codes were successfully initialized,
9790 term_type = os.environ.get("TERM", "vt100")
9796 curses.setupterm(term_type, self.out.fileno())
9797 tigetstr = curses.tigetstr
9798 except curses.error:
9803 if tigetstr is None:
9807 for k, capname in self._termcap_name_map.iteritems():
9808 code = tigetstr(capname)
9810 code = self._default_term_codes[capname]
9811 term_codes[k] = code
9812 object.__setattr__(self, "_term_codes", term_codes)
9815 def _format_msg(self, msg):
9816 return ">>> %s" % msg
9820 self._term_codes['carriage_return'] + \
9821 self._term_codes['clr_eol'])
9823 self._displayed = False
9825 def _display(self, line):
9826 self.out.write(line)
9828 self._displayed = True
9830 def _update(self, msg):
9833 if not self._isatty:
9834 out.write(self._format_msg(msg) + self._term_codes['newline'])
9836 self._displayed = True
9842 self._display(self._format_msg(msg))
9844 def displayMessage(self, msg):
9846 was_displayed = self._displayed
9848 if self._isatty and self._displayed:
9851 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9853 self._displayed = False
9856 self._changed = True
9862 for name in self._bound_properties:
9863 object.__setattr__(self, name, 0)
9866 self.out.write(self._term_codes['newline'])
9868 self._displayed = False
9870 def __setattr__(self, name, value):
9871 old_value = getattr(self, name)
9872 if value == old_value:
9874 object.__setattr__(self, name, value)
9875 if name in self._bound_properties:
9876 self._property_change(name, old_value, value)
9878 def _property_change(self, name, old_value, new_value):
9879 self._changed = True
9882 def _load_avg_str(self):
9897 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9901 Display status on stdout, but only if something has
9902 changed since the last call.
9908 current_time = time.time()
9909 time_delta = current_time - self._last_display_time
9910 if self._displayed and \
9912 if not self._isatty:
9914 if time_delta < self._min_display_latency:
9917 self._last_display_time = current_time
9918 self._changed = False
9919 self._display_status()
9921 def _display_status(self):
9922 # Don't use len(self._completed_tasks) here since that also
9923 # can include uninstall tasks.
9924 curval_str = str(self.curval)
9925 maxval_str = str(self.maxval)
9926 running_str = str(self.running)
9927 failed_str = str(self.failed)
9928 load_avg_str = self._load_avg_str()
9930 color_output = StringIO()
9931 plain_output = StringIO()
9932 style_file = portage.output.ConsoleStyleFile(color_output)
9933 style_file.write_listener = plain_output
9934 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9935 style_writer.style_listener = style_file.new_styles
9936 f = formatter.AbstractFormatter(style_writer)
9938 number_style = "INFORM"
9939 f.add_literal_data("Jobs: ")
9940 f.push_style(number_style)
9941 f.add_literal_data(curval_str)
9943 f.add_literal_data(" of ")
9944 f.push_style(number_style)
9945 f.add_literal_data(maxval_str)
9947 f.add_literal_data(" complete")
9950 f.add_literal_data(", ")
9951 f.push_style(number_style)
9952 f.add_literal_data(running_str)
9954 f.add_literal_data(" running")
9957 f.add_literal_data(", ")
9958 f.push_style(number_style)
9959 f.add_literal_data(failed_str)
9961 f.add_literal_data(" failed")
9963 padding = self._jobs_column_width - len(plain_output.getvalue())
9965 f.add_literal_data(padding * " ")
9967 f.add_literal_data("Load avg: ")
9968 f.add_literal_data(load_avg_str)
9970 # Truncate to fit width, to avoid making the terminal scroll if the
9971 # line overflows (happens when the load average is large).
9972 plain_output = plain_output.getvalue()
9973 if self._isatty and len(plain_output) > self.width:
9974 # Use plain_output here since it's easier to truncate
9975 # properly than the color output which contains console
9977 self._update(plain_output[:self.width])
9979 self._update(color_output.getvalue())
9981 xtermTitle(" ".join(plain_output.split()))
9983 class Scheduler(PollScheduler):
9985 _opts_ignore_blockers = \
9986 frozenset(["--buildpkgonly",
9987 "--fetchonly", "--fetch-all-uri",
9988 "--nodeps", "--pretend"])
9990 _opts_no_background = \
9991 frozenset(["--pretend",
9992 "--fetchonly", "--fetch-all-uri"])
9994 _opts_no_restart = frozenset(["--buildpkgonly",
9995 "--fetchonly", "--fetch-all-uri", "--pretend"])
9997 _bad_resume_opts = set(["--ask", "--changelog",
9998 "--resume", "--skipfirst"])
10000 _fetch_log = "/var/log/emerge-fetch.log"
10002 class _iface_class(SlotObject):
10003 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10004 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10005 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10008 class _fetch_iface_class(SlotObject):
10009 __slots__ = ("log_file", "schedule")
10011 _task_queues_class = slot_dict_class(
10012 ("merge", "jobs", "fetch", "unpack"), prefix="")
10014 class _build_opts_class(SlotObject):
10015 __slots__ = ("buildpkg", "buildpkgonly",
10016 "fetch_all_uri", "fetchonly", "pretend")
10018 class _binpkg_opts_class(SlotObject):
10019 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10021 class _pkg_count_class(SlotObject):
10022 __slots__ = ("curval", "maxval")
10024 class _emerge_log_class(SlotObject):
10025 __slots__ = ("xterm_titles",)
10027 def log(self, *pargs, **kwargs):
10028 if not self.xterm_titles:
10029 # Avoid interference with the scheduler's status display.
10030 kwargs.pop("short_msg", None)
10031 emergelog(self.xterm_titles, *pargs, **kwargs)
10033 class _failed_pkg(SlotObject):
10034 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10036 class _ConfigPool(object):
10037 """Interface for a task to temporarily allocate a config
10038 instance from a pool. This allows a task to be constructed
10039 long before the config instance actually becomes needed, like
10040 when prefetchers are constructed for the whole merge list."""
10041 __slots__ = ("_root", "_allocate", "_deallocate")
10042 def __init__(self, root, allocate, deallocate):
10044 self._allocate = allocate
10045 self._deallocate = deallocate
10046 def allocate(self):
10047 return self._allocate(self._root)
10048 def deallocate(self, settings):
10049 self._deallocate(settings)
10051 class _unknown_internal_error(portage.exception.PortageException):
10053 Used internally to terminate scheduling. The specific reason for
10054 the failure should have been dumped to stderr.
10056 def __init__(self, value=""):
10057 portage.exception.PortageException.__init__(self, value)
10059 def __init__(self, settings, trees, mtimedb, myopts,
10060 spinner, mergelist, favorites, digraph):
10061 PollScheduler.__init__(self)
10062 self.settings = settings
10063 self.target_root = settings["ROOT"]
10065 self.myopts = myopts
10066 self._spinner = spinner
10067 self._mtimedb = mtimedb
10068 self._mergelist = mergelist
10069 self._favorites = favorites
10070 self._args_set = InternalPackageSet(favorites)
10071 self._build_opts = self._build_opts_class()
10072 for k in self._build_opts.__slots__:
10073 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10074 self._binpkg_opts = self._binpkg_opts_class()
10075 for k in self._binpkg_opts.__slots__:
10076 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10079 self._logger = self._emerge_log_class()
10080 self._task_queues = self._task_queues_class()
10081 for k in self._task_queues.allowed_keys:
10082 setattr(self._task_queues, k,
10083 SequentialTaskQueue())
10085 # Holds merges that will wait to be executed when no builds are
10086 # executing. This is useful for system packages since dependencies
10087 # on system packages are frequently unspecified.
10088 self._merge_wait_queue = []
10089 # Holds merges that have been transfered from the merge_wait_queue to
10090 # the actual merge queue. They are removed from this list upon
10091 # completion. Other packages can start building only when this list is
10093 self._merge_wait_scheduled = []
10095 # Holds system packages and their deep runtime dependencies. Before
10096 # being merged, these packages go to merge_wait_queue, to be merged
10097 # when no other packages are building.
10098 self._deep_system_deps = set()
10100 # Holds packages to merge which will satisfy currently unsatisfied
10101 # deep runtime dependencies of system packages. If this is not empty
10102 # then no parallel builds will be spawned until it is empty. This
10103 # minimizes the possibility that a build will fail due to the system
10104 # being in a fragile state. For example, see bug #259954.
10105 self._unsatisfied_system_deps = set()
10107 self._status_display = JobStatusDisplay()
10108 self._max_load = myopts.get("--load-average")
10109 max_jobs = myopts.get("--jobs")
10110 if max_jobs is None:
10112 self._set_max_jobs(max_jobs)
10114 # The root where the currently running
10115 # portage instance is installed.
10116 self._running_root = trees["/"]["root_config"]
10118 if settings.get("PORTAGE_DEBUG", "") == "1":
10120 self.pkgsettings = {}
10121 self._config_pool = {}
10122 self._blocker_db = {}
10124 self._config_pool[root] = []
10125 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10127 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10128 schedule=self._schedule_fetch)
10129 self._sched_iface = self._iface_class(
10130 dblinkEbuildPhase=self._dblink_ebuild_phase,
10131 dblinkDisplayMerge=self._dblink_display_merge,
10132 dblinkElog=self._dblink_elog,
10133 dblinkEmergeLog=self._dblink_emerge_log,
10134 fetch=fetch_iface, register=self._register,
10135 schedule=self._schedule_wait,
10136 scheduleSetup=self._schedule_setup,
10137 scheduleUnpack=self._schedule_unpack,
10138 scheduleYield=self._schedule_yield,
10139 unregister=self._unregister)
10141 self._prefetchers = weakref.WeakValueDictionary()
10142 self._pkg_queue = []
10143 self._completed_tasks = set()
10145 self._failed_pkgs = []
10146 self._failed_pkgs_all = []
10147 self._failed_pkgs_die_msgs = []
10148 self._post_mod_echo_msgs = []
10149 self._parallel_fetch = False
10150 merge_count = len([x for x in mergelist \
10151 if isinstance(x, Package) and x.operation == "merge"])
10152 self._pkg_count = self._pkg_count_class(
10153 curval=0, maxval=merge_count)
10154 self._status_display.maxval = self._pkg_count.maxval
10156 # The load average takes some time to respond when new
10157 # jobs are added, so we need to limit the rate of adding
10159 self._job_delay_max = 10
10160 self._job_delay_factor = 1.0
10161 self._job_delay_exp = 1.5
10162 self._previous_job_start_time = None
10164 self._set_digraph(digraph)
10166 # This is used to memoize the _choose_pkg() result when
10167 # no packages can be chosen until one of the existing
10169 self._choose_pkg_return_early = False
10171 features = self.settings.features
10172 if "parallel-fetch" in features and \
10173 not ("--pretend" in self.myopts or \
10174 "--fetch-all-uri" in self.myopts or \
10175 "--fetchonly" in self.myopts):
10176 if "distlocks" not in features:
10177 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10178 portage.writemsg(red("!!!")+" parallel-fetching " + \
10179 "requires the distlocks feature enabled"+"\n",
10181 portage.writemsg(red("!!!")+" you have it disabled, " + \
10182 "thus parallel-fetching is being disabled"+"\n",
10184 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10185 elif len(mergelist) > 1:
10186 self._parallel_fetch = True
10188 if self._parallel_fetch:
10189 # clear out existing fetch log if it exists
10191 open(self._fetch_log, 'w')
10192 except EnvironmentError:
10195 self._running_portage = None
10196 portage_match = self._running_root.trees["vartree"].dbapi.match(
10197 portage.const.PORTAGE_PACKAGE_ATOM)
10199 cpv = portage_match.pop()
10200 self._running_portage = self._pkg(cpv, "installed",
10201 self._running_root, installed=True)
10203 def _poll(self, timeout=None):
10205 PollScheduler._poll(self, timeout=timeout)
10207 def _set_max_jobs(self, max_jobs):
10208 self._max_jobs = max_jobs
10209 self._task_queues.jobs.max_jobs = max_jobs
10211 def _background_mode(self):
10213 Check if background mode is enabled and adjust states as necessary.
10216 @returns: True if background mode is enabled, False otherwise.
10218 background = (self._max_jobs is True or \
10219 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10220 not bool(self._opts_no_background.intersection(self.myopts))
10223 interactive_tasks = self._get_interactive_tasks()
10224 if interactive_tasks:
10226 writemsg_level(">>> Sending package output to stdio due " + \
10227 "to interactive package(s):\n",
10228 level=logging.INFO, noiselevel=-1)
10230 for pkg in interactive_tasks:
10231 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10232 if pkg.root != "/":
10233 pkg_str += " for " + pkg.root
10234 msg.append(pkg_str)
10236 writemsg_level("".join("%s\n" % (l,) for l in msg),
10237 level=logging.INFO, noiselevel=-1)
10238 if self._max_jobs is True or self._max_jobs > 1:
10239 self._set_max_jobs(1)
10240 writemsg_level(">>> Setting --jobs=1 due " + \
10241 "to the above interactive package(s)\n",
10242 level=logging.INFO, noiselevel=-1)
10244 self._status_display.quiet = \
10245 not background or \
10246 ("--quiet" in self.myopts and \
10247 "--verbose" not in self.myopts)
10249 self._logger.xterm_titles = \
10250 "notitles" not in self.settings.features and \
10251 self._status_display.quiet
10255 def _get_interactive_tasks(self):
10256 from portage import flatten
10257 from portage.dep import use_reduce, paren_reduce
10258 interactive_tasks = []
10259 for task in self._mergelist:
10260 if not (isinstance(task, Package) and \
10261 task.operation == "merge"):
10264 properties = flatten(use_reduce(paren_reduce(
10265 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10266 except portage.exception.InvalidDependString, e:
10267 show_invalid_depstring_notice(task,
10268 task.metadata["PROPERTIES"], str(e))
10269 raise self._unknown_internal_error()
10270 if "interactive" in properties:
10271 interactive_tasks.append(task)
10272 return interactive_tasks
10274 def _set_digraph(self, digraph):
10275 if "--nodeps" in self.myopts or \
10276 (self._max_jobs is not True and self._max_jobs < 2):
10278 self._digraph = None
10281 self._digraph = digraph
10282 self._find_system_deps()
10283 self._prune_digraph()
10284 self._prevent_builddir_collisions()
10286 def _find_system_deps(self):
10288 Find system packages and their deep runtime dependencies. Before being
10289 merged, these packages go to merge_wait_queue, to be merged when no
10290 other packages are building.
10292 deep_system_deps = self._deep_system_deps
10293 deep_system_deps.clear()
10294 deep_system_deps.update(
10295 _find_deep_system_runtime_deps(self._digraph))
10296 deep_system_deps.difference_update([pkg for pkg in \
10297 deep_system_deps if pkg.operation != "merge"])
10299 def _prune_digraph(self):
10301 Prune any root nodes that are irrelevant.
10304 graph = self._digraph
10305 completed_tasks = self._completed_tasks
10306 removed_nodes = set()
10308 for node in graph.root_nodes():
10309 if not isinstance(node, Package) or \
10310 (node.installed and node.operation == "nomerge") or \
10312 node in completed_tasks:
10313 removed_nodes.add(node)
10315 graph.difference_update(removed_nodes)
10316 if not removed_nodes:
10318 removed_nodes.clear()
10320 def _prevent_builddir_collisions(self):
10322 When building stages, sometimes the same exact cpv needs to be merged
10323 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10324 in the builddir. Currently, normal file locks would be inappropriate
10325 for this purpose since emerge holds all of it's build dir locks from
10329 for pkg in self._mergelist:
10330 if not isinstance(pkg, Package):
10331 # a satisfied blocker
10335 if pkg.cpv not in cpv_map:
10336 cpv_map[pkg.cpv] = [pkg]
10338 for earlier_pkg in cpv_map[pkg.cpv]:
10339 self._digraph.add(earlier_pkg, pkg,
10340 priority=DepPriority(buildtime=True))
10341 cpv_map[pkg.cpv].append(pkg)
10343 class _pkg_failure(portage.exception.PortageException):
10345 An instance of this class is raised by unmerge() when
10346 an uninstallation fails.
10349 def __init__(self, *pargs):
10350 portage.exception.PortageException.__init__(self, pargs)
10352 self.status = pargs[0]
10354 def _schedule_fetch(self, fetcher):
10356 Schedule a fetcher on the fetch queue, in order to
10357 serialize access to the fetch log.
10359 self._task_queues.fetch.addFront(fetcher)
10361 def _schedule_setup(self, setup_phase):
10363 Schedule a setup phase on the merge queue, in order to
10364 serialize unsandboxed access to the live filesystem.
10366 self._task_queues.merge.addFront(setup_phase)
10369 def _schedule_unpack(self, unpack_phase):
10371 Schedule an unpack phase on the unpack queue, in order
10372 to serialize $DISTDIR access for live ebuilds.
10374 self._task_queues.unpack.add(unpack_phase)
10376 def _find_blockers(self, new_pkg):
10378 Returns a callable which should be called only when
10379 the vdb lock has been acquired.
10381 def get_blockers():
10382 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10383 return get_blockers
10385 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10386 if self._opts_ignore_blockers.intersection(self.myopts):
10389 # Call gc.collect() here to avoid heap overflow that
10390 # triggers 'Cannot allocate memory' errors (reported
10391 # with python-2.5).
10395 blocker_db = self._blocker_db[new_pkg.root]
10397 blocker_dblinks = []
10398 for blocking_pkg in blocker_db.findInstalledBlockers(
10399 new_pkg, acquire_lock=acquire_lock):
10400 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10402 if new_pkg.cpv == blocking_pkg.cpv:
10404 blocker_dblinks.append(portage.dblink(
10405 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10406 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10407 vartree=self.trees[blocking_pkg.root]["vartree"]))
10411 return blocker_dblinks
10413 def _dblink_pkg(self, pkg_dblink):
10414 cpv = pkg_dblink.mycpv
10415 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10416 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10417 installed = type_name == "installed"
10418 return self._pkg(cpv, type_name, root_config, installed=installed)
10420 def _append_to_log_path(self, log_path, msg):
10421 f = open(log_path, 'a')
10427 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10429 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10432 background = self._background
10434 if background and log_path is not None:
10435 log_file = open(log_path, 'a')
10440 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10442 if log_file is not None:
10445 def _dblink_emerge_log(self, msg):
10446 self._logger.log(msg)
10448 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10449 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10450 background = self._background
10452 if log_path is None:
10453 if not (background and level < logging.WARN):
10454 portage.util.writemsg_level(msg,
10455 level=level, noiselevel=noiselevel)
10458 portage.util.writemsg_level(msg,
10459 level=level, noiselevel=noiselevel)
10460 self._append_to_log_path(log_path, msg)
10462 def _dblink_ebuild_phase(self,
10463 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10465 Using this callback for merge phases allows the scheduler
10466 to run while these phases execute asynchronously, and allows
10467 the scheduler control output handling.
10470 scheduler = self._sched_iface
10471 settings = pkg_dblink.settings
10472 pkg = self._dblink_pkg(pkg_dblink)
10473 background = self._background
10474 log_path = settings.get("PORTAGE_LOG_FILE")
10476 ebuild_phase = EbuildPhase(background=background,
10477 pkg=pkg, phase=phase, scheduler=scheduler,
10478 settings=settings, tree=pkg_dblink.treetype)
10479 ebuild_phase.start()
10480 ebuild_phase.wait()
10482 return ebuild_phase.returncode
10484 def _generate_digests(self):
10486 Generate digests if necessary for --digests or FEATURES=digest.
10487 In order to avoid interference, this must done before parallel
10491 if '--fetchonly' in self.myopts:
10494 digest = '--digest' in self.myopts
10496 for pkgsettings in self.pkgsettings.itervalues():
10497 if 'digest' in pkgsettings.features:
10504 for x in self._mergelist:
10505 if not isinstance(x, Package) or \
10506 x.type_name != 'ebuild' or \
10507 x.operation != 'merge':
10509 pkgsettings = self.pkgsettings[x.root]
10510 if '--digest' not in self.myopts and \
10511 'digest' not in pkgsettings.features:
10513 portdb = x.root_config.trees['porttree'].dbapi
10514 ebuild_path = portdb.findname(x.cpv)
10515 if not ebuild_path:
10517 "!!! Could not locate ebuild for '%s'.\n" \
10518 % x.cpv, level=logging.ERROR, noiselevel=-1)
10520 pkgsettings['O'] = os.path.dirname(ebuild_path)
10521 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10523 "!!! Unable to generate manifest for '%s'.\n" \
10524 % x.cpv, level=logging.ERROR, noiselevel=-1)
10529 def _check_manifests(self):
10530 # Verify all the manifests now so that the user is notified of failure
10531 # as soon as possible.
10532 if "strict" not in self.settings.features or \
10533 "--fetchonly" in self.myopts or \
10534 "--fetch-all-uri" in self.myopts:
10537 shown_verifying_msg = False
10538 quiet_settings = {}
10539 for myroot, pkgsettings in self.pkgsettings.iteritems():
10540 quiet_config = portage.config(clone=pkgsettings)
10541 quiet_config["PORTAGE_QUIET"] = "1"
10542 quiet_config.backup_changes("PORTAGE_QUIET")
10543 quiet_settings[myroot] = quiet_config
10546 for x in self._mergelist:
10547 if not isinstance(x, Package) or \
10548 x.type_name != "ebuild":
10551 if not shown_verifying_msg:
10552 shown_verifying_msg = True
10553 self._status_msg("Verifying ebuild manifests")
10555 root_config = x.root_config
10556 portdb = root_config.trees["porttree"].dbapi
10557 quiet_config = quiet_settings[root_config.root]
10558 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10559 if not portage.digestcheck([], quiet_config, strict=True):
10564 def _add_prefetchers(self):
10566 if not self._parallel_fetch:
10569 if self._parallel_fetch:
10570 self._status_msg("Starting parallel fetch")
10572 prefetchers = self._prefetchers
10573 getbinpkg = "--getbinpkg" in self.myopts
10575 # In order to avoid "waiting for lock" messages
10576 # at the beginning, which annoy users, never
10577 # spawn a prefetcher for the first package.
10578 for pkg in self._mergelist[1:]:
10579 prefetcher = self._create_prefetcher(pkg)
10580 if prefetcher is not None:
10581 self._task_queues.fetch.add(prefetcher)
10582 prefetchers[pkg] = prefetcher
10584 def _create_prefetcher(self, pkg):
10586 @return: a prefetcher, or None if not applicable
10590 if not isinstance(pkg, Package):
10593 elif pkg.type_name == "ebuild":
10595 prefetcher = EbuildFetcher(background=True,
10596 config_pool=self._ConfigPool(pkg.root,
10597 self._allocate_config, self._deallocate_config),
10598 fetchonly=1, logfile=self._fetch_log,
10599 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10601 elif pkg.type_name == "binary" and \
10602 "--getbinpkg" in self.myopts and \
10603 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10605 prefetcher = BinpkgPrefetcher(background=True,
10606 pkg=pkg, scheduler=self._sched_iface)
10610 def _is_restart_scheduled(self):
10612 Check if the merge list contains a replacement
10613 for the current running instance, that will result
10614 in restart after merge.
10616 @returns: True if a restart is scheduled, False otherwise.
10618 if self._opts_no_restart.intersection(self.myopts):
10621 mergelist = self._mergelist
10623 for i, pkg in enumerate(mergelist):
10624 if self._is_restart_necessary(pkg) and \
10625 i != len(mergelist) - 1:
10630 def _is_restart_necessary(self, pkg):
10632 @return: True if merging the given package
10633 requires restart, False otherwise.
10636 # Figure out if we need a restart.
10637 if pkg.root == self._running_root.root and \
10638 portage.match_from_list(
10639 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10640 if self._running_portage:
10641 return pkg.cpv != self._running_portage.cpv
10645 def _restart_if_necessary(self, pkg):
10647 Use execv() to restart emerge. This happens
10648 if portage upgrades itself and there are
10649 remaining packages in the list.
10652 if self._opts_no_restart.intersection(self.myopts):
10655 if not self._is_restart_necessary(pkg):
10658 if pkg == self._mergelist[-1]:
10661 self._main_loop_cleanup()
10663 logger = self._logger
10664 pkg_count = self._pkg_count
10665 mtimedb = self._mtimedb
10666 bad_resume_opts = self._bad_resume_opts
10668 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10669 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10671 logger.log(" *** RESTARTING " + \
10672 "emerge via exec() after change of " + \
10673 "portage version.")
10675 mtimedb["resume"]["mergelist"].remove(list(pkg))
10677 portage.run_exitfuncs()
10678 mynewargv = [sys.argv[0], "--resume"]
10679 resume_opts = self.myopts.copy()
10680 # For automatic resume, we need to prevent
10681 # any of bad_resume_opts from leaking in
10682 # via EMERGE_DEFAULT_OPTS.
10683 resume_opts["--ignore-default-opts"] = True
10684 for myopt, myarg in resume_opts.iteritems():
10685 if myopt not in bad_resume_opts:
10687 mynewargv.append(myopt)
10689 mynewargv.append(myopt +"="+ str(myarg))
10690 # priority only needs to be adjusted on the first run
10691 os.environ["PORTAGE_NICENESS"] = "0"
10692 os.execv(mynewargv[0], mynewargv)
10696 if "--resume" in self.myopts:
10698 portage.writemsg_stdout(
10699 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10700 self._logger.log(" *** Resuming merge...")
10702 self._save_resume_list()
10705 self._background = self._background_mode()
10706 except self._unknown_internal_error:
10709 for root in self.trees:
10710 root_config = self.trees[root]["root_config"]
10712 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10713 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10714 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10715 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10716 if not tmpdir or not os.path.isdir(tmpdir):
10717 msg = "The directory specified in your " + \
10718 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10719 "does not exist. Please create this " + \
10720 "directory or correct your PORTAGE_TMPDIR setting."
10721 msg = textwrap.wrap(msg, 70)
10722 out = portage.output.EOutput()
10727 if self._background:
10728 root_config.settings.unlock()
10729 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10730 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10731 root_config.settings.lock()
10733 self.pkgsettings[root] = portage.config(
10734 clone=root_config.settings)
10736 rval = self._generate_digests()
10737 if rval != os.EX_OK:
10740 rval = self._check_manifests()
10741 if rval != os.EX_OK:
10744 keep_going = "--keep-going" in self.myopts
10745 fetchonly = self._build_opts.fetchonly
10746 mtimedb = self._mtimedb
10747 failed_pkgs = self._failed_pkgs
10750 rval = self._merge()
10751 if rval == os.EX_OK or fetchonly or not keep_going:
10753 if "resume" not in mtimedb:
10755 mergelist = self._mtimedb["resume"].get("mergelist")
10759 if not failed_pkgs:
10762 for failed_pkg in failed_pkgs:
10763 mergelist.remove(list(failed_pkg.pkg))
10765 self._failed_pkgs_all.extend(failed_pkgs)
10771 if not self._calc_resume_list():
10774 clear_caches(self.trees)
10775 if not self._mergelist:
10778 self._save_resume_list()
10779 self._pkg_count.curval = 0
10780 self._pkg_count.maxval = len([x for x in self._mergelist \
10781 if isinstance(x, Package) and x.operation == "merge"])
10782 self._status_display.maxval = self._pkg_count.maxval
10784 self._logger.log(" *** Finished. Cleaning up...")
10787 self._failed_pkgs_all.extend(failed_pkgs)
10790 background = self._background
10791 failure_log_shown = False
10792 if background and len(self._failed_pkgs_all) == 1:
10793 # If only one package failed then just show it's
10794 # whole log for easy viewing.
10795 failed_pkg = self._failed_pkgs_all[-1]
10796 build_dir = failed_pkg.build_dir
10799 log_paths = [failed_pkg.build_log]
10801 log_path = self._locate_failure_log(failed_pkg)
10802 if log_path is not None:
10804 log_file = open(log_path)
10808 if log_file is not None:
10810 for line in log_file:
10811 writemsg_level(line, noiselevel=-1)
10814 failure_log_shown = True
10816 # Dump mod_echo output now since it tends to flood the terminal.
10817 # This allows us to avoid having more important output, generated
10818 # later, from being swept away by the mod_echo output.
10819 mod_echo_output = _flush_elog_mod_echo()
10821 if background and not failure_log_shown and \
10822 self._failed_pkgs_all and \
10823 self._failed_pkgs_die_msgs and \
10824 not mod_echo_output:
10826 printer = portage.output.EOutput()
10827 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10829 if mysettings["ROOT"] != "/":
10830 root_msg = " merged to %s" % mysettings["ROOT"]
10832 printer.einfo("Error messages for package %s%s:" % \
10833 (colorize("INFORM", key), root_msg))
10835 for phase in portage.const.EBUILD_PHASES:
10836 if phase not in logentries:
10838 for msgtype, msgcontent in logentries[phase]:
10839 if isinstance(msgcontent, basestring):
10840 msgcontent = [msgcontent]
10841 for line in msgcontent:
10842 printer.eerror(line.strip("\n"))
10844 if self._post_mod_echo_msgs:
10845 for msg in self._post_mod_echo_msgs:
10848 if len(self._failed_pkgs_all) > 1 or \
10849 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10850 if len(self._failed_pkgs_all) > 1:
10851 msg = "The following %d packages have " % \
10852 len(self._failed_pkgs_all) + \
10853 "failed to build or install:"
10855 msg = "The following package has " + \
10856 "failed to build or install:"
10857 prefix = bad(" * ")
10858 writemsg(prefix + "\n", noiselevel=-1)
10859 from textwrap import wrap
10860 for line in wrap(msg, 72):
10861 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10862 writemsg(prefix + "\n", noiselevel=-1)
10863 for failed_pkg in self._failed_pkgs_all:
10864 writemsg("%s\t%s\n" % (prefix,
10865 colorize("INFORM", str(failed_pkg.pkg))),
10867 writemsg(prefix + "\n", noiselevel=-1)
10871 def _elog_listener(self, mysettings, key, logentries, fulltext):
10872 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10874 self._failed_pkgs_die_msgs.append(
10875 (mysettings, key, errors))
10877 def _locate_failure_log(self, failed_pkg):
10879 build_dir = failed_pkg.build_dir
10882 log_paths = [failed_pkg.build_log]
10884 for log_path in log_paths:
10889 log_size = os.stat(log_path).st_size
10900 def _add_packages(self):
10901 pkg_queue = self._pkg_queue
10902 for pkg in self._mergelist:
10903 if isinstance(pkg, Package):
10904 pkg_queue.append(pkg)
10905 elif isinstance(pkg, Blocker):
10908 def _system_merge_started(self, merge):
10910 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10912 graph = self._digraph
10915 pkg = merge.merge.pkg
10917 # Skip this if $ROOT != / since it shouldn't matter if there
10918 # are unsatisfied system runtime deps in this case.
10919 if pkg.root != '/':
10922 completed_tasks = self._completed_tasks
10923 unsatisfied = self._unsatisfied_system_deps
10925 def ignore_non_runtime_or_satisfied(priority):
10927 Ignore non-runtime and satisfied runtime priorities.
10929 if isinstance(priority, DepPriority) and \
10930 not priority.satisfied and \
10931 (priority.runtime or priority.runtime_post):
10935 # When checking for unsatisfied runtime deps, only check
10936 # direct deps since indirect deps are checked when the
10937 # corresponding parent is merged.
10938 for child in graph.child_nodes(pkg,
10939 ignore_priority=ignore_non_runtime_or_satisfied):
10940 if not isinstance(child, Package) or \
10941 child.operation == 'uninstall':
10945 if child.operation == 'merge' and \
10946 child not in completed_tasks:
10947 unsatisfied.add(child)
10949 def _merge_wait_exit_handler(self, task):
10950 self._merge_wait_scheduled.remove(task)
10951 self._merge_exit(task)
10953 def _merge_exit(self, merge):
10954 self._do_merge_exit(merge)
10955 self._deallocate_config(merge.merge.settings)
10956 if merge.returncode == os.EX_OK and \
10957 not merge.merge.pkg.installed:
10958 self._status_display.curval += 1
10959 self._status_display.merges = len(self._task_queues.merge)
10962 def _do_merge_exit(self, merge):
10963 pkg = merge.merge.pkg
10964 if merge.returncode != os.EX_OK:
10965 settings = merge.merge.settings
10966 build_dir = settings.get("PORTAGE_BUILDDIR")
10967 build_log = settings.get("PORTAGE_LOG_FILE")
10969 self._failed_pkgs.append(self._failed_pkg(
10970 build_dir=build_dir, build_log=build_log,
10972 returncode=merge.returncode))
10973 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10975 self._status_display.failed = len(self._failed_pkgs)
10978 self._task_complete(pkg)
10979 pkg_to_replace = merge.merge.pkg_to_replace
10980 if pkg_to_replace is not None:
10981 # When a package is replaced, mark it's uninstall
10982 # task complete (if any).
10983 uninst_hash_key = \
10984 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10985 self._task_complete(uninst_hash_key)
10990 self._restart_if_necessary(pkg)
10992 # Call mtimedb.commit() after each merge so that
10993 # --resume still works after being interrupted
10994 # by reboot, sigkill or similar.
10995 mtimedb = self._mtimedb
10996 mtimedb["resume"]["mergelist"].remove(list(pkg))
10997 if not mtimedb["resume"]["mergelist"]:
10998 del mtimedb["resume"]
11001 def _build_exit(self, build):
11002 if build.returncode == os.EX_OK:
11004 merge = PackageMerge(merge=build)
11005 if not build.build_opts.buildpkgonly and \
11006 build.pkg in self._deep_system_deps:
11007 # Since dependencies on system packages are frequently
11008 # unspecified, merge them only when no builds are executing.
11009 self._merge_wait_queue.append(merge)
11010 merge.addStartListener(self._system_merge_started)
11012 merge.addExitListener(self._merge_exit)
11013 self._task_queues.merge.add(merge)
11014 self._status_display.merges = len(self._task_queues.merge)
11016 settings = build.settings
11017 build_dir = settings.get("PORTAGE_BUILDDIR")
11018 build_log = settings.get("PORTAGE_LOG_FILE")
11020 self._failed_pkgs.append(self._failed_pkg(
11021 build_dir=build_dir, build_log=build_log,
11023 returncode=build.returncode))
11024 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11026 self._status_display.failed = len(self._failed_pkgs)
11027 self._deallocate_config(build.settings)
11029 self._status_display.running = self._jobs
11032 def _extract_exit(self, build):
11033 self._build_exit(build)
11035 def _task_complete(self, pkg):
11036 self._completed_tasks.add(pkg)
11037 self._unsatisfied_system_deps.discard(pkg)
11038 self._choose_pkg_return_early = False
11042 self._add_prefetchers()
11043 self._add_packages()
11044 pkg_queue = self._pkg_queue
11045 failed_pkgs = self._failed_pkgs
11046 portage.locks._quiet = self._background
11047 portage.elog._emerge_elog_listener = self._elog_listener
11053 self._main_loop_cleanup()
11054 portage.locks._quiet = False
11055 portage.elog._emerge_elog_listener = None
11057 rval = failed_pkgs[-1].returncode
11061 def _main_loop_cleanup(self):
11062 del self._pkg_queue[:]
11063 self._completed_tasks.clear()
11064 self._deep_system_deps.clear()
11065 self._unsatisfied_system_deps.clear()
11066 self._choose_pkg_return_early = False
11067 self._status_display.reset()
11068 self._digraph = None
11069 self._task_queues.fetch.clear()
11071 def _choose_pkg(self):
11073 Choose a task that has all it's dependencies satisfied.
11076 if self._choose_pkg_return_early:
11079 if self._digraph is None:
11080 if (self._jobs or self._task_queues.merge) and \
11081 not ("--nodeps" in self.myopts and \
11082 (self._max_jobs is True or self._max_jobs > 1)):
11083 self._choose_pkg_return_early = True
11085 return self._pkg_queue.pop(0)
11087 if not (self._jobs or self._task_queues.merge):
11088 return self._pkg_queue.pop(0)
11090 self._prune_digraph()
11093 later = set(self._pkg_queue)
11094 for pkg in self._pkg_queue:
11096 if not self._dependent_on_scheduled_merges(pkg, later):
11100 if chosen_pkg is not None:
11101 self._pkg_queue.remove(chosen_pkg)
11103 if chosen_pkg is None:
11104 # There's no point in searching for a package to
11105 # choose until at least one of the existing jobs
11107 self._choose_pkg_return_early = True
11111 def _dependent_on_scheduled_merges(self, pkg, later):
11113 Traverse the subgraph of the given packages deep dependencies
11114 to see if it contains any scheduled merges.
11115 @param pkg: a package to check dependencies for
11117 @param later: packages for which dependence should be ignored
11118 since they will be merged later than pkg anyway and therefore
11119 delaying the merge of pkg will not result in a more optimal
11123 @returns: True if the package is dependent, False otherwise.
11126 graph = self._digraph
11127 completed_tasks = self._completed_tasks
11130 traversed_nodes = set([pkg])
11131 direct_deps = graph.child_nodes(pkg)
11132 node_stack = direct_deps
11133 direct_deps = frozenset(direct_deps)
11135 node = node_stack.pop()
11136 if node in traversed_nodes:
11138 traversed_nodes.add(node)
11139 if not ((node.installed and node.operation == "nomerge") or \
11140 (node.operation == "uninstall" and \
11141 node not in direct_deps) or \
11142 node in completed_tasks or \
11146 node_stack.extend(graph.child_nodes(node))
11150 def _allocate_config(self, root):
11152 Allocate a unique config instance for a task in order
11153 to prevent interference between parallel tasks.
11155 if self._config_pool[root]:
11156 temp_settings = self._config_pool[root].pop()
11158 temp_settings = portage.config(clone=self.pkgsettings[root])
11159 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11160 # performance reasons, call it here to make sure all settings from the
11161 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11162 temp_settings.reload()
11163 temp_settings.reset()
11164 return temp_settings
11166 def _deallocate_config(self, settings):
11167 self._config_pool[settings["ROOT"]].append(settings)
11169 def _main_loop(self):
11171 # Only allow 1 job max if a restart is scheduled
11172 # due to portage update.
11173 if self._is_restart_scheduled() or \
11174 self._opts_no_background.intersection(self.myopts):
11175 self._set_max_jobs(1)
11177 merge_queue = self._task_queues.merge
11179 while self._schedule():
11180 if self._poll_event_handlers:
11185 if not (self._jobs or merge_queue):
11187 if self._poll_event_handlers:
11190 def _keep_scheduling(self):
11191 return bool(self._pkg_queue and \
11192 not (self._failed_pkgs and not self._build_opts.fetchonly))
11194 def _schedule_tasks(self):
11196 # When the number of jobs drops to zero, process all waiting merges.
11197 if not self._jobs and self._merge_wait_queue:
11198 for task in self._merge_wait_queue:
11199 task.addExitListener(self._merge_wait_exit_handler)
11200 self._task_queues.merge.add(task)
11201 self._status_display.merges = len(self._task_queues.merge)
11202 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11203 del self._merge_wait_queue[:]
11205 self._schedule_tasks_imp()
11206 self._status_display.display()
11209 for q in self._task_queues.values():
11213 # Cancel prefetchers if they're the only reason
11214 # the main poll loop is still running.
11215 if self._failed_pkgs and not self._build_opts.fetchonly and \
11216 not (self._jobs or self._task_queues.merge) and \
11217 self._task_queues.fetch:
11218 self._task_queues.fetch.clear()
11222 self._schedule_tasks_imp()
11223 self._status_display.display()
11225 return self._keep_scheduling()
11227 def _job_delay(self):
11230 @returns: True if job scheduling should be delayed, False otherwise.
11233 if self._jobs and self._max_load is not None:
11235 current_time = time.time()
11237 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11238 if delay > self._job_delay_max:
11239 delay = self._job_delay_max
11240 if (current_time - self._previous_job_start_time) < delay:
11245 def _schedule_tasks_imp(self):
11248 @returns: True if state changed, False otherwise.
11255 if not self._keep_scheduling():
11256 return bool(state_change)
11258 if self._choose_pkg_return_early or \
11259 self._merge_wait_scheduled or \
11260 (self._jobs and self._unsatisfied_system_deps) or \
11261 not self._can_add_job() or \
11263 return bool(state_change)
11265 pkg = self._choose_pkg()
11267 return bool(state_change)
11271 if not pkg.installed:
11272 self._pkg_count.curval += 1
11274 task = self._task(pkg)
11277 merge = PackageMerge(merge=task)
11278 merge.addExitListener(self._merge_exit)
11279 self._task_queues.merge.add(merge)
11283 self._previous_job_start_time = time.time()
11284 self._status_display.running = self._jobs
11285 task.addExitListener(self._extract_exit)
11286 self._task_queues.jobs.add(task)
11290 self._previous_job_start_time = time.time()
11291 self._status_display.running = self._jobs
11292 task.addExitListener(self._build_exit)
11293 self._task_queues.jobs.add(task)
11295 return bool(state_change)
11297 def _task(self, pkg):
11299 pkg_to_replace = None
11300 if pkg.operation != "uninstall":
11301 vardb = pkg.root_config.trees["vartree"].dbapi
11302 previous_cpv = vardb.match(pkg.slot_atom)
11304 previous_cpv = previous_cpv.pop()
11305 pkg_to_replace = self._pkg(previous_cpv,
11306 "installed", pkg.root_config, installed=True)
11308 task = MergeListItem(args_set=self._args_set,
11309 background=self._background, binpkg_opts=self._binpkg_opts,
11310 build_opts=self._build_opts,
11311 config_pool=self._ConfigPool(pkg.root,
11312 self._allocate_config, self._deallocate_config),
11313 emerge_opts=self.myopts,
11314 find_blockers=self._find_blockers(pkg), logger=self._logger,
11315 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11316 pkg_to_replace=pkg_to_replace,
11317 prefetcher=self._prefetchers.get(pkg),
11318 scheduler=self._sched_iface,
11319 settings=self._allocate_config(pkg.root),
11320 statusMessage=self._status_msg,
11321 world_atom=self._world_atom)
11325 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11326 pkg = failed_pkg.pkg
11327 msg = "%s to %s %s" % \
11328 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11329 if pkg.root != "/":
11330 msg += " %s %s" % (preposition, pkg.root)
11332 log_path = self._locate_failure_log(failed_pkg)
11333 if log_path is not None:
11334 msg += ", Log file:"
11335 self._status_msg(msg)
11337 if log_path is not None:
11338 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11340 def _status_msg(self, msg):
11342 Display a brief status message (no newlines) in the status display.
11343 This is called by tasks to provide feedback to the user. This
11344 delegates the resposibility of generating \r and \n control characters,
11345 to guarantee that lines are created or erased when necessary and
11349 @param msg: a brief status message (no newlines allowed)
11351 if not self._background:
11352 writemsg_level("\n")
11353 self._status_display.displayMessage(msg)
11355 def _save_resume_list(self):
11357 Do this before verifying the ebuild Manifests since it might
11358 be possible for the user to use --resume --skipfirst get past
11359 a non-essential package with a broken digest.
11361 mtimedb = self._mtimedb
11362 mtimedb["resume"]["mergelist"] = [list(x) \
11363 for x in self._mergelist \
11364 if isinstance(x, Package) and x.operation == "merge"]
11368 def _calc_resume_list(self):
11370 Use the current resume list to calculate a new one,
11371 dropping any packages with unsatisfied deps.
11373 @returns: True if successful, False otherwise.
11375 print colorize("GOOD", "*** Resuming merge...")
11377 if self._show_list():
11378 if "--tree" in self.myopts:
11379 portage.writemsg_stdout("\n" + \
11380 darkgreen("These are the packages that " + \
11381 "would be merged, in reverse order:\n\n"))
11384 portage.writemsg_stdout("\n" + \
11385 darkgreen("These are the packages that " + \
11386 "would be merged, in order:\n\n"))
11388 show_spinner = "--quiet" not in self.myopts and \
11389 "--nodeps" not in self.myopts
11392 print "Calculating dependencies ",
11394 myparams = create_depgraph_params(self.myopts, None)
11398 success, mydepgraph, dropped_tasks = resume_depgraph(
11399 self.settings, self.trees, self._mtimedb, self.myopts,
11400 myparams, self._spinner)
11401 except depgraph.UnsatisfiedResumeDep, exc:
11402 # rename variable to avoid python-3.0 error:
11403 # SyntaxError: can not delete variable 'e' referenced in nested
11406 mydepgraph = e.depgraph
11407 dropped_tasks = set()
11410 print "\b\b... done!"
11413 def unsatisfied_resume_dep_msg():
11414 mydepgraph.display_problems()
11415 out = portage.output.EOutput()
11416 out.eerror("One or more packages are either masked or " + \
11417 "have missing dependencies:")
11420 show_parents = set()
11421 for dep in e.value:
11422 if dep.parent in show_parents:
11424 show_parents.add(dep.parent)
11425 if dep.atom is None:
11426 out.eerror(indent + "Masked package:")
11427 out.eerror(2 * indent + str(dep.parent))
11430 out.eerror(indent + str(dep.atom) + " pulled in by:")
11431 out.eerror(2 * indent + str(dep.parent))
11433 msg = "The resume list contains packages " + \
11434 "that are either masked or have " + \
11435 "unsatisfied dependencies. " + \
11436 "Please restart/continue " + \
11437 "the operation manually, or use --skipfirst " + \
11438 "to skip the first package in the list and " + \
11439 "any other packages that may be " + \
11440 "masked or have missing dependencies."
11441 for line in textwrap.wrap(msg, 72):
11443 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11446 if success and self._show_list():
11447 mylist = mydepgraph.altlist()
11449 if "--tree" in self.myopts:
11451 mydepgraph.display(mylist, favorites=self._favorites)
11454 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11456 mydepgraph.display_problems()
11458 mylist = mydepgraph.altlist()
11459 mydepgraph.break_refs(mylist)
11460 mydepgraph.break_refs(dropped_tasks)
11461 self._mergelist = mylist
11462 self._set_digraph(mydepgraph.schedulerGraph())
11465 for task in dropped_tasks:
11466 if not (isinstance(task, Package) and task.operation == "merge"):
11469 msg = "emerge --keep-going:" + \
11471 if pkg.root != "/":
11472 msg += " for %s" % (pkg.root,)
11473 msg += " dropped due to unsatisfied dependency."
11474 for line in textwrap.wrap(msg, msg_width):
11475 eerror(line, phase="other", key=pkg.cpv)
11476 settings = self.pkgsettings[pkg.root]
11477 # Ensure that log collection from $T is disabled inside
11478 # elog_process(), since any logs that might exist are
11480 settings.pop("T", None)
11481 portage.elog.elog_process(pkg.cpv, settings)
11482 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11486 def _show_list(self):
11487 myopts = self.myopts
11488 if "--quiet" not in myopts and \
11489 ("--ask" in myopts or "--tree" in myopts or \
11490 "--verbose" in myopts):
11494 def _world_atom(self, pkg):
11496 Add the package to the world file, but only if
11497 it's supposed to be added. Otherwise, do nothing.
11500 if set(("--buildpkgonly", "--fetchonly",
11502 "--oneshot", "--onlydeps",
11503 "--pretend")).intersection(self.myopts):
11506 if pkg.root != self.target_root:
11509 args_set = self._args_set
11510 if not args_set.findAtomForPackage(pkg):
11513 logger = self._logger
11514 pkg_count = self._pkg_count
11515 root_config = pkg.root_config
11516 world_set = root_config.sets["world"]
11517 world_locked = False
11518 if hasattr(world_set, "lock"):
11520 world_locked = True
11523 if hasattr(world_set, "load"):
11524 world_set.load() # maybe it's changed on disk
11526 atom = create_world_atom(pkg, args_set, root_config)
11528 if hasattr(world_set, "add"):
11529 self._status_msg(('Recording %s in "world" ' + \
11530 'favorites file...') % atom)
11531 logger.log(" === (%s of %s) Updating world file (%s)" % \
11532 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11533 world_set.add(atom)
11535 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11536 (atom,), level=logging.WARN, noiselevel=-1)
11541 def _pkg(self, cpv, type_name, root_config, installed=False):
11543 Get a package instance from the cache, or create a new
11544 one if necessary. Raises KeyError from aux_get if it
11545 failures for some reason (package does not exist or is
11548 operation = "merge"
11550 operation = "nomerge"
11552 if self._digraph is not None:
11553 # Reuse existing instance when available.
11554 pkg = self._digraph.get(
11555 (type_name, root_config.root, cpv, operation))
11556 if pkg is not None:
11559 tree_type = depgraph.pkg_tree_map[type_name]
11560 db = root_config.trees[tree_type].dbapi
11561 db_keys = list(self.trees[root_config.root][
11562 tree_type].dbapi._aux_cache_keys)
11563 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11564 pkg = Package(cpv=cpv, metadata=metadata,
11565 root_config=root_config, installed=installed)
11566 if type_name == "ebuild":
11567 settings = self.pkgsettings[root_config.root]
11568 settings.setcpv(pkg)
11569 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11570 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11574 class MetadataRegen(PollScheduler):
11576 def __init__(self, portdb, cp_iter=None, consumer=None,
11577 max_jobs=None, max_load=None):
11578 PollScheduler.__init__(self)
11579 self._portdb = portdb
11580 self._global_cleanse = False
11581 if cp_iter is None:
11582 cp_iter = self._iter_every_cp()
11583 # We can globally cleanse stale cache only if we
11584 # iterate over every single cp.
11585 self._global_cleanse = True
11586 self._cp_iter = cp_iter
11587 self._consumer = consumer
11589 if max_jobs is None:
11592 self._max_jobs = max_jobs
11593 self._max_load = max_load
11594 self._sched_iface = self._sched_iface_class(
11595 register=self._register,
11596 schedule=self._schedule_wait,
11597 unregister=self._unregister)
11599 self._valid_pkgs = set()
11600 self._cp_set = set()
11601 self._process_iter = self._iter_metadata_processes()
11602 self.returncode = os.EX_OK
11603 self._error_count = 0
11605 def _iter_every_cp(self):
11606 every_cp = self._portdb.cp_all()
11607 every_cp.sort(reverse=True)
11610 yield every_cp.pop()
11614 def _iter_metadata_processes(self):
11615 portdb = self._portdb
11616 valid_pkgs = self._valid_pkgs
11617 cp_set = self._cp_set
11618 consumer = self._consumer
11620 for cp in self._cp_iter:
11622 portage.writemsg_stdout("Processing %s\n" % cp)
11623 cpv_list = portdb.cp_list(cp)
11624 for cpv in cpv_list:
11625 valid_pkgs.add(cpv)
11626 ebuild_path, repo_path = portdb.findname2(cpv)
11627 metadata, st, emtime = portdb._pull_valid_cache(
11628 cpv, ebuild_path, repo_path)
11629 if metadata is not None:
11630 if consumer is not None:
11631 consumer(cpv, ebuild_path,
11632 repo_path, metadata)
11635 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11636 ebuild_mtime=emtime,
11637 metadata_callback=portdb._metadata_callback,
11638 portdb=portdb, repo_path=repo_path,
11639 settings=portdb.doebuild_settings)
11643 portdb = self._portdb
11644 from portage.cache.cache_errors import CacheError
11647 while self._schedule():
11653 if self._global_cleanse:
11654 for mytree in portdb.porttrees:
11656 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11657 except CacheError, e:
11658 portage.writemsg("Error listing cache entries for " + \
11659 "'%s': %s, continuing...\n" % (mytree, e),
11665 cp_set = self._cp_set
11666 cpv_getkey = portage.cpv_getkey
11667 for mytree in portdb.porttrees:
11669 dead_nodes[mytree] = set(cpv for cpv in \
11670 portdb.auxdb[mytree].iterkeys() \
11671 if cpv_getkey(cpv) in cp_set)
11672 except CacheError, e:
11673 portage.writemsg("Error listing cache entries for " + \
11674 "'%s': %s, continuing...\n" % (mytree, e),
11681 for y in self._valid_pkgs:
11682 for mytree in portdb.porttrees:
11683 if portdb.findname2(y, mytree=mytree)[0]:
11684 dead_nodes[mytree].discard(y)
11686 for mytree, nodes in dead_nodes.iteritems():
11687 auxdb = portdb.auxdb[mytree]
11691 except (KeyError, CacheError):
11694 def _schedule_tasks(self):
11697 @returns: True if there may be remaining tasks to schedule,
11700 while self._can_add_job():
11702 metadata_process = self._process_iter.next()
11703 except StopIteration:
11707 metadata_process.scheduler = self._sched_iface
11708 metadata_process.addExitListener(self._metadata_exit)
11709 metadata_process.start()
11712 def _metadata_exit(self, metadata_process):
11714 if metadata_process.returncode != os.EX_OK:
11715 self.returncode = 1
11716 self._error_count += 1
11717 self._valid_pkgs.discard(metadata_process.cpv)
11718 portage.writemsg("Error processing %s, continuing...\n" % \
11719 (metadata_process.cpv,), noiselevel=-1)
11721 if self._consumer is not None:
11722 # On failure, still notify the consumer (in this case the metadata
11723 # argument is None).
11724 self._consumer(metadata_process.cpv,
11725 metadata_process.ebuild_path,
11726 metadata_process.repo_path,
11727 metadata_process.metadata)
11731 class UninstallFailure(portage.exception.PortageException):
11733 An instance of this class is raised by unmerge() when
11734 an uninstallation fails.
11737 def __init__(self, *pargs):
11738 portage.exception.PortageException.__init__(self, pargs)
11740 self.status = pargs[0]
11742 def unmerge(root_config, myopts, unmerge_action,
11743 unmerge_files, ldpath_mtimes, autoclean=0,
11744 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11745 scheduler=None, writemsg_level=portage.util.writemsg_level):
11747 quiet = "--quiet" in myopts
11748 settings = root_config.settings
11749 sets = root_config.sets
11750 vartree = root_config.trees["vartree"]
11751 candidate_catpkgs=[]
11753 xterm_titles = "notitles" not in settings.features
11754 out = portage.output.EOutput()
11756 db_keys = list(vartree.dbapi._aux_cache_keys)
11759 pkg = pkg_cache.get(cpv)
11761 pkg = Package(cpv=cpv, installed=True,
11762 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11763 root_config=root_config,
11764 type_name="installed")
11765 pkg_cache[cpv] = pkg
11768 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11770 # At least the parent needs to exist for the lock file.
11771 portage.util.ensure_dirs(vdb_path)
11772 except portage.exception.PortageException:
11776 if os.access(vdb_path, os.W_OK):
11777 vdb_lock = portage.locks.lockdir(vdb_path)
11778 realsyslist = sets["system"].getAtoms()
11780 for x in realsyslist:
11781 mycp = portage.dep_getkey(x)
11782 if mycp in settings.getvirtuals():
11784 for provider in settings.getvirtuals()[mycp]:
11785 if vartree.dbapi.match(provider):
11786 providers.append(provider)
11787 if len(providers) == 1:
11788 syslist.extend(providers)
11790 syslist.append(mycp)
11792 mysettings = portage.config(clone=settings)
11794 if not unmerge_files:
11795 if unmerge_action == "unmerge":
11797 print bold("emerge unmerge") + " can only be used with specific package names"
11803 localtree = vartree
11804 # process all arguments and add all
11805 # valid db entries to candidate_catpkgs
11807 if not unmerge_files:
11808 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11810 #we've got command-line arguments
11811 if not unmerge_files:
11812 print "\nNo packages to unmerge have been provided.\n"
11814 for x in unmerge_files:
11815 arg_parts = x.split('/')
11816 if x[0] not in [".","/"] and \
11817 arg_parts[-1][-7:] != ".ebuild":
11818 #possible cat/pkg or dep; treat as such
11819 candidate_catpkgs.append(x)
11820 elif unmerge_action in ["prune","clean"]:
11821 print "\n!!! Prune and clean do not accept individual" + \
11822 " ebuilds as arguments;\n skipping.\n"
11825 # it appears that the user is specifying an installed
11826 # ebuild and we're in "unmerge" mode, so it's ok.
11827 if not os.path.exists(x):
11828 print "\n!!! The path '"+x+"' doesn't exist.\n"
11831 absx = os.path.abspath(x)
11832 sp_absx = absx.split("/")
11833 if sp_absx[-1][-7:] == ".ebuild":
11835 absx = "/".join(sp_absx)
11837 sp_absx_len = len(sp_absx)
11839 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11840 vdb_len = len(vdb_path)
11842 sp_vdb = vdb_path.split("/")
11843 sp_vdb_len = len(sp_vdb)
11845 if not os.path.exists(absx+"/CONTENTS"):
11846 print "!!! Not a valid db dir: "+str(absx)
11849 if sp_absx_len <= sp_vdb_len:
11850 # The Path is shorter... so it can't be inside the vdb.
11853 print "\n!!!",x,"cannot be inside "+ \
11854 vdb_path+"; aborting.\n"
11857 for idx in range(0,sp_vdb_len):
11858 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11861 print "\n!!!", x, "is not inside "+\
11862 vdb_path+"; aborting.\n"
11865 print "="+"/".join(sp_absx[sp_vdb_len:])
11866 candidate_catpkgs.append(
11867 "="+"/".join(sp_absx[sp_vdb_len:]))
11870 if (not "--quiet" in myopts):
11872 if settings["ROOT"] != "/":
11873 writemsg_level(darkgreen(newline+ \
11874 ">>> Using system located in ROOT tree %s\n" % \
11877 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11878 not ("--quiet" in myopts):
11879 writemsg_level(darkgreen(newline+\
11880 ">>> These are the packages that would be unmerged:\n"))
11882 # Preservation of order is required for --depclean and --prune so
11883 # that dependencies are respected. Use all_selected to eliminate
11884 # duplicate packages since the same package may be selected by
11887 all_selected = set()
11888 for x in candidate_catpkgs:
11889 # cycle through all our candidate deps and determine
11890 # what will and will not get unmerged
11892 mymatch = vartree.dbapi.match(x)
11893 except portage.exception.AmbiguousPackageName, errpkgs:
11894 print "\n\n!!! The short ebuild name \"" + \
11895 x + "\" is ambiguous. Please specify"
11896 print "!!! one of the following fully-qualified " + \
11897 "ebuild names instead:\n"
11898 for i in errpkgs[0]:
11899 print " " + green(i)
11903 if not mymatch and x[0] not in "<>=~":
11904 mymatch = localtree.dep_match(x)
11906 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11907 (x, unmerge_action), noiselevel=-1)
11911 {"protected": set(), "selected": set(), "omitted": set()})
11912 mykey = len(pkgmap) - 1
11913 if unmerge_action=="unmerge":
11915 if y not in all_selected:
11916 pkgmap[mykey]["selected"].add(y)
11917 all_selected.add(y)
11918 elif unmerge_action == "prune":
11919 if len(mymatch) == 1:
11921 best_version = mymatch[0]
11922 best_slot = vartree.getslot(best_version)
11923 best_counter = vartree.dbapi.cpv_counter(best_version)
11924 for mypkg in mymatch[1:]:
11925 myslot = vartree.getslot(mypkg)
11926 mycounter = vartree.dbapi.cpv_counter(mypkg)
11927 if (myslot == best_slot and mycounter > best_counter) or \
11928 mypkg == portage.best([mypkg, best_version]):
11929 if myslot == best_slot:
11930 if mycounter < best_counter:
11931 # On slot collision, keep the one with the
11932 # highest counter since it is the most
11933 # recently installed.
11935 best_version = mypkg
11937 best_counter = mycounter
11938 pkgmap[mykey]["protected"].add(best_version)
11939 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11940 if mypkg != best_version and mypkg not in all_selected)
11941 all_selected.update(pkgmap[mykey]["selected"])
11943 # unmerge_action == "clean"
11945 for mypkg in mymatch:
11946 if unmerge_action == "clean":
11947 myslot = localtree.getslot(mypkg)
11949 # since we're pruning, we don't care about slots
11950 # and put all the pkgs in together
11952 if myslot not in slotmap:
11953 slotmap[myslot] = {}
11954 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11956 for mypkg in vartree.dbapi.cp_list(
11957 portage.dep_getkey(mymatch[0])):
11958 myslot = vartree.getslot(mypkg)
11959 if myslot not in slotmap:
11960 slotmap[myslot] = {}
11961 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11963 for myslot in slotmap:
11964 counterkeys = slotmap[myslot].keys()
11965 if not counterkeys:
11968 pkgmap[mykey]["protected"].add(
11969 slotmap[myslot][counterkeys[-1]])
11970 del counterkeys[-1]
11972 for counter in counterkeys[:]:
11973 mypkg = slotmap[myslot][counter]
11974 if mypkg not in mymatch:
11975 counterkeys.remove(counter)
11976 pkgmap[mykey]["protected"].add(
11977 slotmap[myslot][counter])
11979 #be pretty and get them in order of merge:
11980 for ckey in counterkeys:
11981 mypkg = slotmap[myslot][ckey]
11982 if mypkg not in all_selected:
11983 pkgmap[mykey]["selected"].add(mypkg)
11984 all_selected.add(mypkg)
11985 # ok, now the last-merged package
11986 # is protected, and the rest are selected
11987 numselected = len(all_selected)
11988 if global_unmerge and not numselected:
11989 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11992 if not numselected:
11993 portage.writemsg_stdout(
11994 "\n>>> No packages selected for removal by " + \
11995 unmerge_action + "\n")
11999 vartree.dbapi.flush_cache()
12000 portage.locks.unlockdir(vdb_lock)
12002 from portage.sets.base import EditablePackageSet
12004 # generate a list of package sets that are directly or indirectly listed in "world",
12005 # as there is no persistent list of "installed" sets
12006 installed_sets = ["world"]
12011 pos = len(installed_sets)
12012 for s in installed_sets[pos - 1:]:
12015 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12018 installed_sets += candidates
12019 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12022 # we don't want to unmerge packages that are still listed in user-editable package sets
12023 # listed in "world" as they would be remerged on the next update of "world" or the
12024 # relevant package sets.
12025 unknown_sets = set()
12026 for cp in xrange(len(pkgmap)):
12027 for cpv in pkgmap[cp]["selected"].copy():
12031 # It could have been uninstalled
12032 # by a concurrent process.
12035 if unmerge_action != "clean" and \
12036 root_config.root == "/" and \
12037 portage.match_from_list(
12038 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12039 msg = ("Not unmerging package %s since there is no valid " + \
12040 "reason for portage to unmerge itself.") % (pkg.cpv,)
12041 for line in textwrap.wrap(msg, 75):
12043 # adjust pkgmap so the display output is correct
12044 pkgmap[cp]["selected"].remove(cpv)
12045 all_selected.remove(cpv)
12046 pkgmap[cp]["protected"].add(cpv)
12050 for s in installed_sets:
12051 # skip sets that the user requested to unmerge, and skip world
12052 # unless we're unmerging a package set (as the package would be
12053 # removed from "world" later on)
12054 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12058 if s in unknown_sets:
12060 unknown_sets.add(s)
12061 out = portage.output.EOutput()
12062 out.eerror(("Unknown set '@%s' in " + \
12063 "%svar/lib/portage/world_sets") % \
12064 (s, root_config.root))
12067 # only check instances of EditablePackageSet as other classes are generally used for
12068 # special purposes and can be ignored here (and are usually generated dynamically, so the
12069 # user can't do much about them anyway)
12070 if isinstance(sets[s], EditablePackageSet):
12072 # This is derived from a snippet of code in the
12073 # depgraph._iter_atoms_for_pkg() method.
12074 for atom in sets[s].iterAtomsForPackage(pkg):
12075 inst_matches = vartree.dbapi.match(atom)
12076 inst_matches.reverse() # descending order
12078 for inst_cpv in inst_matches:
12080 inst_pkg = _pkg(inst_cpv)
12082 # It could have been uninstalled
12083 # by a concurrent process.
12086 if inst_pkg.cp != atom.cp:
12088 if pkg >= inst_pkg:
12089 # This is descending order, and we're not
12090 # interested in any versions <= pkg given.
12092 if pkg.slot_atom != inst_pkg.slot_atom:
12093 higher_slot = inst_pkg
12095 if higher_slot is None:
12099 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12100 #print colorize("WARN", "but still listed in the following package sets:")
12101 #print " %s\n" % ", ".join(parents)
12102 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12103 print colorize("WARN", "still referenced by the following package sets:")
12104 print " %s\n" % ", ".join(parents)
12105 # adjust pkgmap so the display output is correct
12106 pkgmap[cp]["selected"].remove(cpv)
12107 all_selected.remove(cpv)
12108 pkgmap[cp]["protected"].add(cpv)
12112 numselected = len(all_selected)
12113 if not numselected:
12115 "\n>>> No packages selected for removal by " + \
12116 unmerge_action + "\n")
12119 # Unmerge order only matters in some cases
12123 selected = d["selected"]
12126 cp = portage.cpv_getkey(iter(selected).next())
12127 cp_dict = unordered.get(cp)
12128 if cp_dict is None:
12130 unordered[cp] = cp_dict
12133 for k, v in d.iteritems():
12134 cp_dict[k].update(v)
12135 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12137 for x in xrange(len(pkgmap)):
12138 selected = pkgmap[x]["selected"]
12141 for mytype, mylist in pkgmap[x].iteritems():
12142 if mytype == "selected":
12144 mylist.difference_update(all_selected)
12145 cp = portage.cpv_getkey(iter(selected).next())
12146 for y in localtree.dep_match(cp):
12147 if y not in pkgmap[x]["omitted"] and \
12148 y not in pkgmap[x]["selected"] and \
12149 y not in pkgmap[x]["protected"] and \
12150 y not in all_selected:
12151 pkgmap[x]["omitted"].add(y)
12152 if global_unmerge and not pkgmap[x]["selected"]:
12153 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12155 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12156 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12157 "'%s' is part of your system profile.\n" % cp),
12158 level=logging.WARNING, noiselevel=-1)
12159 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12160 "be damaging to your system.\n\n"),
12161 level=logging.WARNING, noiselevel=-1)
12162 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12163 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12164 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12166 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12168 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12169 for mytype in ["selected","protected","omitted"]:
12171 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12172 if pkgmap[x][mytype]:
12173 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12174 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12175 for pn, ver, rev in sorted_pkgs:
12179 myversion = ver + "-" + rev
12180 if mytype == "selected":
12182 colorize("UNMERGE_WARN", myversion + " "),
12186 colorize("GOOD", myversion + " "), noiselevel=-1)
12188 writemsg_level("none ", noiselevel=-1)
12190 writemsg_level("\n", noiselevel=-1)
12192 writemsg_level("\n", noiselevel=-1)
12194 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12195 " packages are slated for removal.\n")
12196 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12197 " and " + colorize("GOOD", "'omitted'") + \
12198 " packages will not be removed.\n\n")
12200 if "--pretend" in myopts:
12201 #we're done... return
12203 if "--ask" in myopts:
12204 if userquery("Would you like to unmerge these packages?")=="No":
12205 # enter pretend mode for correct formatting of results
12206 myopts["--pretend"] = True
12211 #the real unmerging begins, after a short delay....
12212 if clean_delay and not autoclean:
12213 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12215 for x in xrange(len(pkgmap)):
12216 for y in pkgmap[x]["selected"]:
12217 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12218 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12219 mysplit = y.split("/")
12221 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12222 mysettings, unmerge_action not in ["clean","prune"],
12223 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12224 scheduler=scheduler)
12226 if retval != os.EX_OK:
12227 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12229 raise UninstallFailure(retval)
12232 if clean_world and hasattr(sets["world"], "cleanPackage"):
12233 sets["world"].cleanPackage(vartree.dbapi, y)
12234 emergelog(xterm_titles, " >>> unmerge success: "+y)
12235 if clean_world and hasattr(sets["world"], "remove"):
12236 for s in root_config.setconfig.active:
12237 sets["world"].remove(SETPREFIX+s)
12240 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12242 if os.path.exists("/usr/bin/install-info"):
12243 out = portage.output.EOutput()
12248 inforoot=normpath(root+z)
12249 if os.path.isdir(inforoot):
12250 infomtime = long(os.stat(inforoot).st_mtime)
12251 if inforoot not in prev_mtimes or \
12252 prev_mtimes[inforoot] != infomtime:
12253 regen_infodirs.append(inforoot)
12255 if not regen_infodirs:
12256 portage.writemsg_stdout("\n")
12257 out.einfo("GNU info directory index is up-to-date.")
12259 portage.writemsg_stdout("\n")
12260 out.einfo("Regenerating GNU info directory index...")
12262 dir_extensions = ("", ".gz", ".bz2")
12266 for inforoot in regen_infodirs:
12270 if not os.path.isdir(inforoot) or \
12271 not os.access(inforoot, os.W_OK):
12274 file_list = os.listdir(inforoot)
12276 dir_file = os.path.join(inforoot, "dir")
12277 moved_old_dir = False
12278 processed_count = 0
12279 for x in file_list:
12280 if x.startswith(".") or \
12281 os.path.isdir(os.path.join(inforoot, x)):
12283 if x.startswith("dir"):
12285 for ext in dir_extensions:
12286 if x == "dir" + ext or \
12287 x == "dir" + ext + ".old":
12292 if processed_count == 0:
12293 for ext in dir_extensions:
12295 os.rename(dir_file + ext, dir_file + ext + ".old")
12296 moved_old_dir = True
12297 except EnvironmentError, e:
12298 if e.errno != errno.ENOENT:
12301 processed_count += 1
12302 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12303 existsstr="already exists, for file `"
12305 if re.search(existsstr,myso):
12306 # Already exists... Don't increment the count for this.
12308 elif myso[:44]=="install-info: warning: no info dir entry in ":
12309 # This info file doesn't contain a DIR-header: install-info produces this
12310 # (harmless) warning (the --quiet switch doesn't seem to work).
12311 # Don't increment the count for this.
12314 badcount=badcount+1
12315 errmsg += myso + "\n"
12318 if moved_old_dir and not os.path.exists(dir_file):
12319 # We didn't generate a new dir file, so put the old file
12320 # back where it was originally found.
12321 for ext in dir_extensions:
12323 os.rename(dir_file + ext + ".old", dir_file + ext)
12324 except EnvironmentError, e:
12325 if e.errno != errno.ENOENT:
12329 # Clean dir.old cruft so that they don't prevent
12330 # unmerge of otherwise empty directories.
12331 for ext in dir_extensions:
12333 os.unlink(dir_file + ext + ".old")
12334 except EnvironmentError, e:
12335 if e.errno != errno.ENOENT:
12339 #update mtime so we can potentially avoid regenerating.
12340 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12343 out.eerror("Processed %d info files; %d errors." % \
12344 (icount, badcount))
12345 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12348 out.einfo("Processed %d info files." % (icount,))
12351 def display_news_notification(root_config, myopts):
12352 target_root = root_config.root
12353 trees = root_config.trees
12354 settings = trees["vartree"].settings
12355 portdb = trees["porttree"].dbapi
12356 vardb = trees["vartree"].dbapi
12357 NEWS_PATH = os.path.join("metadata", "news")
12358 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12359 newsReaderDisplay = False
12360 update = "--pretend" not in myopts
12362 for repo in portdb.getRepositories():
12363 unreadItems = checkUpdatedNewsItems(
12364 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12366 if not newsReaderDisplay:
12367 newsReaderDisplay = True
12369 print colorize("WARN", " * IMPORTANT:"),
12370 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12373 if newsReaderDisplay:
12374 print colorize("WARN", " *"),
12375 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12378 def display_preserved_libs(vardbapi):
12381 # Ensure the registry is consistent with existing files.
12382 vardbapi.plib_registry.pruneNonExisting()
12384 if vardbapi.plib_registry.hasEntries():
12386 print colorize("WARN", "!!!") + " existing preserved libs:"
12387 plibdata = vardbapi.plib_registry.getPreservedLibs()
12388 linkmap = vardbapi.linkmap
12391 linkmap_broken = False
12395 except portage.exception.CommandNotFound, e:
12396 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12397 level=logging.ERROR, noiselevel=-1)
12399 linkmap_broken = True
12401 search_for_owners = set()
12402 for cpv in plibdata:
12403 internal_plib_keys = set(linkmap._obj_key(f) \
12404 for f in plibdata[cpv])
12405 for f in plibdata[cpv]:
12406 if f in consumer_map:
12409 for c in linkmap.findConsumers(f):
12410 # Filter out any consumers that are also preserved libs
12411 # belonging to the same package as the provider.
12412 if linkmap._obj_key(c) not in internal_plib_keys:
12413 consumers.append(c)
12415 consumer_map[f] = consumers
12416 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12418 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12420 for cpv in plibdata:
12421 print colorize("WARN", ">>>") + " package: %s" % cpv
12423 for f in plibdata[cpv]:
12424 obj_key = linkmap._obj_key(f)
12425 alt_paths = samefile_map.get(obj_key)
12426 if alt_paths is None:
12428 samefile_map[obj_key] = alt_paths
12431 for alt_paths in samefile_map.itervalues():
12432 alt_paths = sorted(alt_paths)
12433 for p in alt_paths:
12434 print colorize("WARN", " * ") + " - %s" % (p,)
12436 consumers = consumer_map.get(f, [])
12437 for c in consumers[:MAX_DISPLAY]:
12438 print colorize("WARN", " * ") + " used by %s (%s)" % \
12439 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12440 if len(consumers) == MAX_DISPLAY + 1:
12441 print colorize("WARN", " * ") + " used by %s (%s)" % \
12442 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12443 for x in owners.get(consumers[MAX_DISPLAY], [])))
12444 elif len(consumers) > MAX_DISPLAY:
12445 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12446 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12449 def _flush_elog_mod_echo():
12451 Dump the mod_echo output now so that our other
12452 notifications are shown last.
12454 @returns: True if messages were shown, False otherwise.
12456 messages_shown = False
12458 from portage.elog import mod_echo
12459 except ImportError:
12460 pass # happens during downgrade to a version without the module
12462 messages_shown = bool(mod_echo._items)
12463 mod_echo.finalize()
12464 return messages_shown
12466 def post_emerge(root_config, myopts, mtimedb, retval):
12468 Misc. things to run at the end of a merge session.
12471 Update Config Files
12474 Display preserved libs warnings
12477 @param trees: A dictionary mapping each ROOT to it's package databases
12479 @param mtimedb: The mtimeDB to store data needed across merge invocations
12480 @type mtimedb: MtimeDB class instance
12481 @param retval: Emerge's return value
12485 1. Calls sys.exit(retval)
12488 target_root = root_config.root
12489 trees = { target_root : root_config.trees }
12490 vardbapi = trees[target_root]["vartree"].dbapi
12491 settings = vardbapi.settings
12492 info_mtimes = mtimedb["info"]
12494 # Load the most current variables from ${ROOT}/etc/profile.env
12497 settings.regenerate()
12500 config_protect = settings.get("CONFIG_PROTECT","").split()
12501 infodirs = settings.get("INFOPATH","").split(":") + \
12502 settings.get("INFODIR","").split(":")
12506 if retval == os.EX_OK:
12507 exit_msg = " *** exiting successfully."
12509 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12510 emergelog("notitles" not in settings.features, exit_msg)
12512 _flush_elog_mod_echo()
12514 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12515 if "--pretend" in myopts or (counter_hash is not None and \
12516 counter_hash == vardbapi._counter_hash()):
12517 display_news_notification(root_config, myopts)
12518 # If vdb state has not changed then there's nothing else to do.
12521 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12522 portage.util.ensure_dirs(vdb_path)
12524 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12525 vdb_lock = portage.locks.lockdir(vdb_path)
12529 if "noinfo" not in settings.features:
12530 chk_updated_info_files(target_root,
12531 infodirs, info_mtimes, retval)
12535 portage.locks.unlockdir(vdb_lock)
12537 chk_updated_cfg_files(target_root, config_protect)
12539 display_news_notification(root_config, myopts)
12540 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12541 display_preserved_libs(vardbapi)
12546 def chk_updated_cfg_files(target_root, config_protect):
12548 #number of directories with some protect files in them
12550 for x in config_protect:
12551 x = os.path.join(target_root, x.lstrip(os.path.sep))
12552 if not os.access(x, os.W_OK):
12553 # Avoid Permission denied errors generated
12557 mymode = os.lstat(x).st_mode
12560 if stat.S_ISLNK(mymode):
12561 # We want to treat it like a directory if it
12562 # is a symlink to an existing directory.
12564 real_mode = os.stat(x).st_mode
12565 if stat.S_ISDIR(real_mode):
12569 if stat.S_ISDIR(mymode):
12570 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12572 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12573 os.path.split(x.rstrip(os.path.sep))
12574 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12575 a = commands.getstatusoutput(mycommand)
12577 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12579 # Show the error message alone, sending stdout to /dev/null.
12580 os.system(mycommand + " 1>/dev/null")
12582 files = a[1].split('\0')
12583 # split always produces an empty string as the last element
12584 if files and not files[-1]:
12588 print "\n"+colorize("WARN", " * IMPORTANT:"),
12589 if stat.S_ISDIR(mymode):
12590 print "%d config files in '%s' need updating." % \
12593 print "config file '%s' needs updating." % x
12596 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12597 " section of the " + bold("emerge")
12598 print " "+yellow("*")+" man page to learn how to update config files."
12600 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12603 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12604 Returns the number of unread (yet relevent) items.
12606 @param portdb: a portage tree database
12607 @type portdb: pordbapi
12608 @param vardb: an installed package database
12609 @type vardb: vardbapi
12612 @param UNREAD_PATH:
12618 1. The number of unread but relevant news items.
12621 from portage.news import NewsManager
12622 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12623 return manager.getUnreadItems( repo_id, update=update )
12625 def insert_category_into_atom(atom, category):
12626 alphanum = re.search(r'\w', atom)
12628 ret = atom[:alphanum.start()] + "%s/" % category + \
12629 atom[alphanum.start():]
12634 def is_valid_package_atom(x):
12636 alphanum = re.search(r'\w', x)
12638 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12639 return portage.isvalidatom(x)
12641 def show_blocker_docs_link():
12643 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12644 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12646 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12649 def show_mask_docs():
12650 print "For more information, see the MASKED PACKAGES section in the emerge"
12651 print "man page or refer to the Gentoo Handbook."
12653 def action_sync(settings, trees, mtimedb, myopts, myaction):
12654 xterm_titles = "notitles" not in settings.features
12655 emergelog(xterm_titles, " === sync")
12656 myportdir = settings.get("PORTDIR", None)
12657 out = portage.output.EOutput()
12659 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12661 if myportdir[-1]=="/":
12662 myportdir=myportdir[:-1]
12664 st = os.stat(myportdir)
12668 print ">>>",myportdir,"not found, creating it."
12669 os.makedirs(myportdir,0755)
12670 st = os.stat(myportdir)
12673 spawn_kwargs["env"] = settings.environ()
12674 if 'usersync' in settings.features and \
12675 portage.data.secpass >= 2 and \
12676 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12677 st.st_gid != os.getgid() and st.st_mode & 0070):
12679 homedir = pwd.getpwuid(st.st_uid).pw_dir
12683 # Drop privileges when syncing, in order to match
12684 # existing uid/gid settings.
12685 spawn_kwargs["uid"] = st.st_uid
12686 spawn_kwargs["gid"] = st.st_gid
12687 spawn_kwargs["groups"] = [st.st_gid]
12688 spawn_kwargs["env"]["HOME"] = homedir
12690 if not st.st_mode & 0020:
12691 umask = umask | 0020
12692 spawn_kwargs["umask"] = umask
12694 syncuri = settings.get("SYNC", "").strip()
12696 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12697 noiselevel=-1, level=logging.ERROR)
12700 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12701 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12704 dosyncuri = syncuri
12705 updatecache_flg = False
12706 if myaction == "metadata":
12707 print "skipping sync"
12708 updatecache_flg = True
12709 elif ".git" in vcs_dirs:
12710 # Update existing git repository, and ignore the syncuri. We are
12711 # going to trust the user and assume that the user is in the branch
12712 # that he/she wants updated. We'll let the user manage branches with
12714 if portage.process.find_binary("git") is None:
12715 msg = ["Command not found: git",
12716 "Type \"emerge dev-util/git\" to enable git support."]
12718 writemsg_level("!!! %s\n" % l,
12719 level=logging.ERROR, noiselevel=-1)
12721 msg = ">>> Starting git pull in %s..." % myportdir
12722 emergelog(xterm_titles, msg )
12723 writemsg_level(msg + "\n")
12724 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12725 (portage._shell_quote(myportdir),), **spawn_kwargs)
12726 if exitcode != os.EX_OK:
12727 msg = "!!! git pull error in %s." % myportdir
12728 emergelog(xterm_titles, msg)
12729 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12731 msg = ">>> Git pull in %s successful" % myportdir
12732 emergelog(xterm_titles, msg)
12733 writemsg_level(msg + "\n")
12734 exitcode = git_sync_timestamps(settings, myportdir)
12735 if exitcode == os.EX_OK:
12736 updatecache_flg = True
12737 elif syncuri[:8]=="rsync://":
12738 for vcs_dir in vcs_dirs:
12739 writemsg_level(("!!! %s appears to be under revision " + \
12740 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12741 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12743 if not os.path.exists("/usr/bin/rsync"):
12744 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12745 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12750 if settings["PORTAGE_RSYNC_OPTS"] == "":
12751 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12752 rsync_opts.extend([
12753 "--recursive", # Recurse directories
12754 "--links", # Consider symlinks
12755 "--safe-links", # Ignore links outside of tree
12756 "--perms", # Preserve permissions
12757 "--times", # Preserive mod times
12758 "--compress", # Compress the data transmitted
12759 "--force", # Force deletion on non-empty dirs
12760 "--whole-file", # Don't do block transfers, only entire files
12761 "--delete", # Delete files that aren't in the master tree
12762 "--stats", # Show final statistics about what was transfered
12763 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12764 "--exclude=/distfiles", # Exclude distfiles from consideration
12765 "--exclude=/local", # Exclude local from consideration
12766 "--exclude=/packages", # Exclude packages from consideration
12770 # The below validation is not needed when using the above hardcoded
12773 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12775 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12776 for opt in ("--recursive", "--times"):
12777 if opt not in rsync_opts:
12778 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12779 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12780 rsync_opts.append(opt)
12782 for exclude in ("distfiles", "local", "packages"):
12783 opt = "--exclude=/%s" % exclude
12784 if opt not in rsync_opts:
12785 portage.writemsg(yellow("WARNING:") + \
12786 " adding required option %s not included in " % opt + \
12787 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12788 rsync_opts.append(opt)
12790 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12791 def rsync_opt_startswith(opt_prefix):
12792 for x in rsync_opts:
12793 if x.startswith(opt_prefix):
12797 if not rsync_opt_startswith("--timeout="):
12798 rsync_opts.append("--timeout=%d" % mytimeout)
12800 for opt in ("--compress", "--whole-file"):
12801 if opt not in rsync_opts:
12802 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12803 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12804 rsync_opts.append(opt)
12806 if "--quiet" in myopts:
12807 rsync_opts.append("--quiet") # Shut up a lot
12809 rsync_opts.append("--verbose") # Print filelist
12811 if "--verbose" in myopts:
12812 rsync_opts.append("--progress") # Progress meter for each file
12814 if "--debug" in myopts:
12815 rsync_opts.append("--checksum") # Force checksum on all files
12817 # Real local timestamp file.
12818 servertimestampfile = os.path.join(
12819 myportdir, "metadata", "timestamp.chk")
12821 content = portage.util.grabfile(servertimestampfile)
12825 mytimestamp = time.mktime(time.strptime(content[0],
12826 "%a, %d %b %Y %H:%M:%S +0000"))
12827 except (OverflowError, ValueError):
12832 rsync_initial_timeout = \
12833 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12835 rsync_initial_timeout = 15
12838 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12839 except SystemExit, e:
12840 raise # Needed else can't exit
12842 maxretries=3 #default number of retries
12845 user_name, hostname, port = re.split(
12846 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12849 if user_name is None:
12851 updatecache_flg=True
12852 all_rsync_opts = set(rsync_opts)
12853 extra_rsync_opts = shlex.split(
12854 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12855 all_rsync_opts.update(extra_rsync_opts)
12856 family = socket.AF_INET
12857 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12858 family = socket.AF_INET
12859 elif socket.has_ipv6 and \
12860 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12861 family = socket.AF_INET6
12863 SERVER_OUT_OF_DATE = -1
12864 EXCEEDED_MAX_RETRIES = -2
12870 for addrinfo in socket.getaddrinfo(
12871 hostname, None, family, socket.SOCK_STREAM):
12872 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12873 # IPv6 addresses need to be enclosed in square brackets
12874 ips.append("[%s]" % addrinfo[4][0])
12876 ips.append(addrinfo[4][0])
12877 from random import shuffle
12879 except SystemExit, e:
12880 raise # Needed else can't exit
12881 except Exception, e:
12882 print "Notice:",str(e)
12887 dosyncuri = syncuri.replace(
12888 "//" + user_name + hostname + port + "/",
12889 "//" + user_name + ips[0] + port + "/", 1)
12890 except SystemExit, e:
12891 raise # Needed else can't exit
12892 except Exception, e:
12893 print "Notice:",str(e)
12897 if "--ask" in myopts:
12898 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12903 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12904 if "--quiet" not in myopts:
12905 print ">>> Starting rsync with "+dosyncuri+"..."
12907 emergelog(xterm_titles,
12908 ">>> Starting retry %d of %d with %s" % \
12909 (retries,maxretries,dosyncuri))
12910 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12912 if mytimestamp != 0 and "--quiet" not in myopts:
12913 print ">>> Checking server timestamp ..."
12915 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12917 if "--debug" in myopts:
12920 exitcode = os.EX_OK
12921 servertimestamp = 0
12922 # Even if there's no timestamp available locally, fetch the
12923 # timestamp anyway as an initial probe to verify that the server is
12924 # responsive. This protects us from hanging indefinitely on a
12925 # connection attempt to an unresponsive server which rsync's
12926 # --timeout option does not prevent.
12928 # Temporary file for remote server timestamp comparison.
12929 from tempfile import mkstemp
12930 fd, tmpservertimestampfile = mkstemp()
12932 mycommand = rsynccommand[:]
12933 mycommand.append(dosyncuri.rstrip("/") + \
12934 "/metadata/timestamp.chk")
12935 mycommand.append(tmpservertimestampfile)
12939 def timeout_handler(signum, frame):
12940 raise portage.exception.PortageException("timed out")
12941 signal.signal(signal.SIGALRM, timeout_handler)
12942 # Timeout here in case the server is unresponsive. The
12943 # --timeout rsync option doesn't apply to the initial
12944 # connection attempt.
12945 if rsync_initial_timeout:
12946 signal.alarm(rsync_initial_timeout)
12948 mypids.extend(portage.process.spawn(
12949 mycommand, env=settings.environ(), returnpid=True))
12950 exitcode = os.waitpid(mypids[0], 0)[1]
12951 content = portage.grabfile(tmpservertimestampfile)
12953 if rsync_initial_timeout:
12956 os.unlink(tmpservertimestampfile)
12959 except portage.exception.PortageException, e:
12963 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12964 os.kill(mypids[0], signal.SIGTERM)
12965 os.waitpid(mypids[0], 0)
12966 # This is the same code rsync uses for timeout.
12969 if exitcode != os.EX_OK:
12970 if exitcode & 0xff:
12971 exitcode = (exitcode & 0xff) << 8
12973 exitcode = exitcode >> 8
12975 portage.process.spawned_pids.remove(mypids[0])
12978 servertimestamp = time.mktime(time.strptime(
12979 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12980 except (OverflowError, ValueError):
12982 del mycommand, mypids, content
12983 if exitcode == os.EX_OK:
12984 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12985 emergelog(xterm_titles,
12986 ">>> Cancelling sync -- Already current.")
12989 print ">>> Timestamps on the server and in the local repository are the same."
12990 print ">>> Cancelling all further sync action. You are already up to date."
12992 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12996 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12997 emergelog(xterm_titles,
12998 ">>> Server out of date: %s" % dosyncuri)
13001 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13003 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13006 exitcode = SERVER_OUT_OF_DATE
13007 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13009 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13010 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13011 if exitcode in [0,1,3,4,11,14,20,21]:
13013 elif exitcode in [1,3,4,11,14,20,21]:
13016 # Code 2 indicates protocol incompatibility, which is expected
13017 # for servers with protocol < 29 that don't support
13018 # --prune-empty-directories. Retry for a server that supports
13019 # at least rsync protocol version 29 (>=rsync-2.6.4).
13024 if retries<=maxretries:
13025 print ">>> Retrying..."
13030 updatecache_flg=False
13031 exitcode = EXCEEDED_MAX_RETRIES
13035 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13036 elif exitcode == SERVER_OUT_OF_DATE:
13038 elif exitcode == EXCEEDED_MAX_RETRIES:
13040 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13045 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13046 msg.append("that your SYNC statement is proper.")
13047 msg.append("SYNC=" + settings["SYNC"])
13049 msg.append("Rsync has reported that there is a File IO error. Normally")
13050 msg.append("this means your disk is full, but can be caused by corruption")
13051 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13052 msg.append("and try again after the problem has been fixed.")
13053 msg.append("PORTDIR=" + settings["PORTDIR"])
13055 msg.append("Rsync was killed before it finished.")
13057 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13058 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13059 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13060 msg.append("temporary problem unless complications exist with your network")
13061 msg.append("(and possibly your system's filesystem) configuration.")
13065 elif syncuri[:6]=="cvs://":
13066 if not os.path.exists("/usr/bin/cvs"):
13067 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13068 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13070 cvsroot=syncuri[6:]
13071 cvsdir=os.path.dirname(myportdir)
13072 if not os.path.exists(myportdir+"/CVS"):
13074 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13075 if os.path.exists(cvsdir+"/gentoo-x86"):
13076 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13079 os.rmdir(myportdir)
13081 if e.errno != errno.ENOENT:
13083 "!!! existing '%s' directory; exiting.\n" % myportdir)
13086 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13087 print "!!! cvs checkout error; exiting."
13089 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13092 print ">>> Starting cvs update with "+syncuri+"..."
13093 retval = portage.process.spawn_bash(
13094 "cd %s; cvs -z0 -q update -dP" % \
13095 (portage._shell_quote(myportdir),), **spawn_kwargs)
13096 if retval != os.EX_OK:
13098 dosyncuri = syncuri
13100 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13101 noiselevel=-1, level=logging.ERROR)
13104 if updatecache_flg and \
13105 myaction != "metadata" and \
13106 "metadata-transfer" not in settings.features:
13107 updatecache_flg = False
13109 # Reload the whole config from scratch.
13110 settings, trees, mtimedb = load_emerge_config(trees=trees)
13111 root_config = trees[settings["ROOT"]]["root_config"]
13112 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13114 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13115 action_metadata(settings, portdb, myopts)
13117 if portage._global_updates(trees, mtimedb["updates"]):
13119 # Reload the whole config from scratch.
13120 settings, trees, mtimedb = load_emerge_config(trees=trees)
13121 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13122 root_config = trees[settings["ROOT"]]["root_config"]
13124 mybestpv = portdb.xmatch("bestmatch-visible",
13125 portage.const.PORTAGE_PACKAGE_ATOM)
13126 mypvs = portage.best(
13127 trees[settings["ROOT"]]["vartree"].dbapi.match(
13128 portage.const.PORTAGE_PACKAGE_ATOM))
13130 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13132 if myaction != "metadata":
13133 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13134 retval = portage.process.spawn(
13135 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13136 dosyncuri], env=settings.environ())
13137 if retval != os.EX_OK:
13138 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13140 if(mybestpv != mypvs) and not "--quiet" in myopts:
13142 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13143 print red(" * ")+"that you update portage now, before any other packages are updated."
13145 print red(" * ")+"To update portage, run 'emerge portage' now."
13148 display_news_notification(root_config, myopts)
13151 def git_sync_timestamps(settings, portdir):
13153 Since git doesn't preserve timestamps, synchronize timestamps between
13154 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13155 for a given file as long as the file in the working tree is not modified
13156 (relative to HEAD).
13158 cache_dir = os.path.join(portdir, "metadata", "cache")
13159 if not os.path.isdir(cache_dir):
13161 writemsg_level(">>> Synchronizing timestamps...\n")
13163 from portage.cache.cache_errors import CacheError
13165 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13166 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13167 except CacheError, e:
13168 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13169 level=logging.ERROR, noiselevel=-1)
13172 ec_dir = os.path.join(portdir, "eclass")
13174 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13175 if f.endswith(".eclass"))
13177 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13178 level=logging.ERROR, noiselevel=-1)
13181 args = [portage.const.BASH_BINARY, "-c",
13182 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13183 portage._shell_quote(portdir)]
13185 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13186 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13188 if rval != os.EX_OK:
13191 modified_eclasses = set(ec for ec in ec_names \
13192 if os.path.join("eclass", ec + ".eclass") in modified_files)
13194 updated_ec_mtimes = {}
13196 for cpv in cache_db:
13197 cpv_split = portage.catpkgsplit(cpv)
13198 if cpv_split is None:
13199 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13200 level=logging.ERROR, noiselevel=-1)
13203 cat, pn, ver, rev = cpv_split
13204 cat, pf = portage.catsplit(cpv)
13205 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13206 if relative_eb_path in modified_files:
13210 cache_entry = cache_db[cpv]
13211 eb_mtime = cache_entry.get("_mtime_")
13212 ec_mtimes = cache_entry.get("_eclasses_")
13214 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13215 level=logging.ERROR, noiselevel=-1)
13217 except CacheError, e:
13218 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13219 (cpv, e), level=logging.ERROR, noiselevel=-1)
13222 if eb_mtime is None:
13223 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13224 level=logging.ERROR, noiselevel=-1)
13228 eb_mtime = long(eb_mtime)
13230 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13231 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13234 if ec_mtimes is None:
13235 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13236 level=logging.ERROR, noiselevel=-1)
13239 if modified_eclasses.intersection(ec_mtimes):
13242 missing_eclasses = set(ec_mtimes).difference(ec_names)
13243 if missing_eclasses:
13244 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13245 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13249 eb_path = os.path.join(portdir, relative_eb_path)
13251 current_eb_mtime = os.stat(eb_path)
13253 writemsg_level("!!! Missing ebuild: %s\n" % \
13254 (cpv,), level=logging.ERROR, noiselevel=-1)
13257 inconsistent = False
13258 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13259 updated_mtime = updated_ec_mtimes.get(ec)
13260 if updated_mtime is not None and updated_mtime != ec_mtime:
13261 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13262 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13263 inconsistent = True
13269 if current_eb_mtime != eb_mtime:
13270 os.utime(eb_path, (eb_mtime, eb_mtime))
13272 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13273 if ec in updated_ec_mtimes:
13275 ec_path = os.path.join(ec_dir, ec + ".eclass")
13276 current_mtime = long(os.stat(ec_path).st_mtime)
13277 if current_mtime != ec_mtime:
13278 os.utime(ec_path, (ec_mtime, ec_mtime))
13279 updated_ec_mtimes[ec] = ec_mtime
13283 def action_metadata(settings, portdb, myopts):
13284 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13285 old_umask = os.umask(0002)
13286 cachedir = os.path.normpath(settings.depcachedir)
13287 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13288 "/lib", "/opt", "/proc", "/root", "/sbin",
13289 "/sys", "/tmp", "/usr", "/var"]:
13290 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13291 "ROOT DIRECTORY ON YOUR SYSTEM."
13292 print >> sys.stderr, \
13293 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13295 if not os.path.exists(cachedir):
13298 ec = portage.eclass_cache.cache(portdb.porttree_root)
13299 myportdir = os.path.realpath(settings["PORTDIR"])
13300 cm = settings.load_best_module("portdbapi.metadbmodule")(
13301 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13303 from portage.cache import util
13305 class percentage_noise_maker(util.quiet_mirroring):
13306 def __init__(self, dbapi):
13308 self.cp_all = dbapi.cp_all()
13309 l = len(self.cp_all)
13310 self.call_update_min = 100000000
13311 self.min_cp_all = l/100.0
13315 def __iter__(self):
13316 for x in self.cp_all:
13318 if self.count > self.min_cp_all:
13319 self.call_update_min = 0
13321 for y in self.dbapi.cp_list(x):
13323 self.call_update_mine = 0
13325 def update(self, *arg):
13327 self.pstr = int(self.pstr) + 1
13330 sys.stdout.write("%s%i%%" % \
13331 ("\b" * (len(str(self.pstr))+1), self.pstr))
13333 self.call_update_min = 10000000
13335 def finish(self, *arg):
13336 sys.stdout.write("\b\b\b\b100%\n")
13339 if "--quiet" in myopts:
13340 def quicky_cpv_generator(cp_all_list):
13341 for x in cp_all_list:
13342 for y in portdb.cp_list(x):
13344 source = quicky_cpv_generator(portdb.cp_all())
13345 noise_maker = portage.cache.util.quiet_mirroring()
13347 noise_maker = source = percentage_noise_maker(portdb)
13348 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13349 eclass_cache=ec, verbose_instance=noise_maker)
13352 os.umask(old_umask)
13354 def action_regen(settings, portdb, max_jobs, max_load):
13355 xterm_titles = "notitles" not in settings.features
13356 emergelog(xterm_titles, " === regen")
13357 #regenerate cache entries
13358 portage.writemsg_stdout("Regenerating cache entries...\n")
13360 os.close(sys.stdin.fileno())
13361 except SystemExit, e:
13362 raise # Needed else can't exit
13367 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13370 portage.writemsg_stdout("done!\n")
13371 return regen.returncode
13373 def action_config(settings, trees, myopts, myfiles):
13374 if len(myfiles) != 1:
13375 print red("!!! config can only take a single package atom at this time\n")
13377 if not is_valid_package_atom(myfiles[0]):
13378 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13380 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13381 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13385 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13386 except portage.exception.AmbiguousPackageName, e:
13387 # Multiple matches thrown from cpv_expand
13390 print "No packages found.\n"
13392 elif len(pkgs) > 1:
13393 if "--ask" in myopts:
13395 print "Please select a package to configure:"
13399 options.append(str(idx))
13400 print options[-1]+") "+pkg
13402 options.append("X")
13403 idx = userquery("Selection?", options)
13406 pkg = pkgs[int(idx)-1]
13408 print "The following packages available:"
13411 print "\nPlease use a specific atom or the --ask option."
13417 if "--ask" in myopts:
13418 if userquery("Ready to configure "+pkg+"?") == "No":
13421 print "Configuring pkg..."
13423 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13424 mysettings = portage.config(clone=settings)
13425 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13426 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13427 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13429 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13430 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13431 if retval == os.EX_OK:
13432 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13433 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13436 def action_info(settings, trees, myopts, myfiles):
13437 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13438 settings.profile_path, settings["CHOST"],
13439 trees[settings["ROOT"]]["vartree"].dbapi)
13441 header_title = "System Settings"
13443 print header_width * "="
13444 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13445 print header_width * "="
13446 print "System uname: "+platform.platform(aliased=1)
13448 lastSync = portage.grabfile(os.path.join(
13449 settings["PORTDIR"], "metadata", "timestamp.chk"))
13450 print "Timestamp of tree:",
13456 output=commands.getstatusoutput("distcc --version")
13458 print str(output[1].split("\n",1)[0]),
13459 if "distcc" in settings.features:
13464 output=commands.getstatusoutput("ccache -V")
13466 print str(output[1].split("\n",1)[0]),
13467 if "ccache" in settings.features:
13472 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13473 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13474 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13475 myvars = portage.util.unique_array(myvars)
13479 if portage.isvalidatom(x):
13480 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13481 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13482 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13484 for pn, ver, rev in pkg_matches:
13486 pkgs.append(ver + "-" + rev)
13490 pkgs = ", ".join(pkgs)
13491 print "%-20s %s" % (x+":", pkgs)
13493 print "%-20s %s" % (x+":", "[NOT VALID]")
13495 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13497 if "--verbose" in myopts:
13498 myvars=settings.keys()
13500 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13501 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13502 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13503 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13505 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13507 myvars = portage.util.unique_array(myvars)
13513 print '%s="%s"' % (x, settings[x])
13515 use = set(settings["USE"].split())
13516 use_expand = settings["USE_EXPAND"].split()
13518 for varname in use_expand:
13519 flag_prefix = varname.lower() + "_"
13520 for f in list(use):
13521 if f.startswith(flag_prefix):
13525 print 'USE="%s"' % " ".join(use),
13526 for varname in use_expand:
13527 myval = settings.get(varname)
13529 print '%s="%s"' % (varname, myval),
13532 unset_vars.append(x)
13534 print "Unset: "+", ".join(unset_vars)
13537 if "--debug" in myopts:
13538 for x in dir(portage):
13539 module = getattr(portage, x)
13540 if "cvs_id_string" in dir(module):
13541 print "%s: %s" % (str(x), str(module.cvs_id_string))
13543 # See if we can find any packages installed matching the strings
13544 # passed on the command line
13546 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13547 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13549 mypkgs.extend(vardb.match(x))
13551 # If some packages were found...
13553 # Get our global settings (we only print stuff if it varies from
13554 # the current config)
13555 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13556 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13558 pkgsettings = portage.config(clone=settings)
13560 for myvar in mydesiredvars:
13561 global_vals[myvar] = set(settings.get(myvar, "").split())
13563 # Loop through each package
13564 # Only print settings if they differ from global settings
13565 header_title = "Package Settings"
13566 print header_width * "="
13567 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13568 print header_width * "="
13569 from portage.output import EOutput
13572 # Get all package specific variables
13573 auxvalues = vardb.aux_get(pkg, auxkeys)
13575 for i in xrange(len(auxkeys)):
13576 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13578 for myvar in mydesiredvars:
13579 # If the package variable doesn't match the
13580 # current global variable, something has changed
13581 # so set diff_found so we know to print
13582 if valuesmap[myvar] != global_vals[myvar]:
13583 diff_values[myvar] = valuesmap[myvar]
13584 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13585 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13586 pkgsettings.reset()
13587 # If a matching ebuild is no longer available in the tree, maybe it
13588 # would make sense to compare against the flags for the best
13589 # available version with the same slot?
13591 if portdb.cpv_exists(pkg):
13593 pkgsettings.setcpv(pkg, mydb=mydb)
13594 if valuesmap["IUSE"].intersection(
13595 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13596 diff_values["USE"] = valuesmap["USE"]
13597 # If a difference was found, print the info for
13600 # Print package info
13601 print "%s was built with the following:" % pkg
13602 for myvar in mydesiredvars + ["USE"]:
13603 if myvar in diff_values:
13604 mylist = list(diff_values[myvar])
13606 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13608 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13609 ebuildpath = vardb.findname(pkg)
13610 if not ebuildpath or not os.path.exists(ebuildpath):
13611 out.ewarn("No ebuild found for '%s'" % pkg)
13613 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13614 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13615 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13618 def action_search(root_config, myopts, myfiles, spinner):
13620 print "emerge: no search terms provided."
13622 searchinstance = search(root_config,
13623 spinner, "--searchdesc" in myopts,
13624 "--quiet" not in myopts, "--usepkg" in myopts,
13625 "--usepkgonly" in myopts)
13626 for mysearch in myfiles:
13628 searchinstance.execute(mysearch)
13629 except re.error, comment:
13630 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13632 searchinstance.output()
13634 def action_depclean(settings, trees, ldpath_mtimes,
13635 myopts, action, myfiles, spinner):
13636 # Kill packages that aren't explicitly merged or are required as a
13637 # dependency of another package. World file is explicit.
13639 # Global depclean or prune operations are not very safe when there are
13640 # missing dependencies since it's unknown how badly incomplete
13641 # the dependency graph is, and we might accidentally remove packages
13642 # that should have been pulled into the graph. On the other hand, it's
13643 # relatively safe to ignore missing deps when only asked to remove
13644 # specific packages.
13645 allow_missing_deps = len(myfiles) > 0
13648 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13649 msg.append("mistakes. Packages that are part of the world set will always\n")
13650 msg.append("be kept. They can be manually added to this set with\n")
13651 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13652 msg.append("package.provided (see portage(5)) will be removed by\n")
13653 msg.append("depclean, even if they are part of the world set.\n")
13655 msg.append("As a safety measure, depclean will not remove any packages\n")
13656 msg.append("unless *all* required dependencies have been resolved. As a\n")
13657 msg.append("consequence, it is often necessary to run %s\n" % \
13658 good("`emerge --update"))
13659 msg.append(good("--newuse --deep @system @world`") + \
13660 " prior to depclean.\n")
13662 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13663 portage.writemsg_stdout("\n")
13665 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13667 xterm_titles = "notitles" not in settings.features
13668 myroot = settings["ROOT"]
13669 root_config = trees[myroot]["root_config"]
13670 getSetAtoms = root_config.setconfig.getSetAtoms
13671 vardb = trees[myroot]["vartree"].dbapi
13673 required_set_names = ("system", "world")
13677 for s in required_set_names:
13678 required_sets[s] = InternalPackageSet(
13679 initial_atoms=getSetAtoms(s))
13682 # When removing packages, use a temporary version of world
13683 # which excludes packages that are intended to be eligible for
13685 world_temp_set = required_sets["world"]
13686 system_set = required_sets["system"]
13688 if not system_set or not world_temp_set:
13691 writemsg_level("!!! You have no system list.\n",
13692 level=logging.ERROR, noiselevel=-1)
13694 if not world_temp_set:
13695 writemsg_level("!!! You have no world file.\n",
13696 level=logging.WARNING, noiselevel=-1)
13698 writemsg_level("!!! Proceeding is likely to " + \
13699 "break your installation.\n",
13700 level=logging.WARNING, noiselevel=-1)
13701 if "--pretend" not in myopts:
13702 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13704 if action == "depclean":
13705 emergelog(xterm_titles, " >>> depclean")
13708 args_set = InternalPackageSet()
13711 if not is_valid_package_atom(x):
13712 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13713 level=logging.ERROR, noiselevel=-1)
13714 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13717 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13718 except portage.exception.AmbiguousPackageName, e:
13719 msg = "The short ebuild name \"" + x + \
13720 "\" is ambiguous. Please specify " + \
13721 "one of the following " + \
13722 "fully-qualified ebuild names instead:"
13723 for line in textwrap.wrap(msg, 70):
13724 writemsg_level("!!! %s\n" % (line,),
13725 level=logging.ERROR, noiselevel=-1)
13727 writemsg_level(" %s\n" % colorize("INFORM", i),
13728 level=logging.ERROR, noiselevel=-1)
13729 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13732 matched_packages = False
13735 matched_packages = True
13737 if not matched_packages:
13738 writemsg_level(">>> No packages selected for removal by %s\n" % \
13742 writemsg_level("\nCalculating dependencies ")
13743 resolver_params = create_depgraph_params(myopts, "remove")
13744 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13745 vardb = resolver.trees[myroot]["vartree"].dbapi
13747 if action == "depclean":
13750 # Pull in everything that's installed but not matched
13751 # by an argument atom since we don't want to clean any
13752 # package if something depends on it.
13754 world_temp_set.clear()
13759 if args_set.findAtomForPackage(pkg) is None:
13760 world_temp_set.add("=" + pkg.cpv)
13762 except portage.exception.InvalidDependString, e:
13763 show_invalid_depstring_notice(pkg,
13764 pkg.metadata["PROVIDE"], str(e))
13766 world_temp_set.add("=" + pkg.cpv)
13769 elif action == "prune":
13771 # Pull in everything that's installed since we don't
13772 # to prune a package if something depends on it.
13773 world_temp_set.clear()
13774 world_temp_set.update(vardb.cp_all())
13778 # Try to prune everything that's slotted.
13779 for cp in vardb.cp_all():
13780 if len(vardb.cp_list(cp)) > 1:
13783 # Remove atoms from world that match installed packages
13784 # that are also matched by argument atoms, but do not remove
13785 # them if they match the highest installed version.
13788 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13789 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13790 raise AssertionError("package expected in matches: " + \
13791 "cp = %s, cpv = %s matches = %s" % \
13792 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13794 highest_version = pkgs_for_cp[-1]
13795 if pkg == highest_version:
13796 # pkg is the highest version
13797 world_temp_set.add("=" + pkg.cpv)
13800 if len(pkgs_for_cp) <= 1:
13801 raise AssertionError("more packages expected: " + \
13802 "cp = %s, cpv = %s matches = %s" % \
13803 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13806 if args_set.findAtomForPackage(pkg) is None:
13807 world_temp_set.add("=" + pkg.cpv)
13809 except portage.exception.InvalidDependString, e:
13810 show_invalid_depstring_notice(pkg,
13811 pkg.metadata["PROVIDE"], str(e))
13813 world_temp_set.add("=" + pkg.cpv)
13817 for s, package_set in required_sets.iteritems():
13818 set_atom = SETPREFIX + s
13819 set_arg = SetArg(arg=set_atom, set=package_set,
13820 root_config=resolver.roots[myroot])
13821 set_args[s] = set_arg
13822 for atom in set_arg.set:
13823 resolver._dep_stack.append(
13824 Dependency(atom=atom, root=myroot, parent=set_arg))
13825 resolver.digraph.add(set_arg, None)
13827 success = resolver._complete_graph()
13828 writemsg_level("\b\b... done!\n")
13830 resolver.display_problems()
13835 def unresolved_deps():
13837 unresolvable = set()
13838 for dep in resolver._initially_unsatisfied_deps:
13839 if isinstance(dep.parent, Package) and \
13840 (dep.priority > UnmergeDepPriority.SOFT):
13841 unresolvable.add((dep.atom, dep.parent.cpv))
13843 if not unresolvable:
13846 if unresolvable and not allow_missing_deps:
13847 prefix = bad(" * ")
13849 msg.append("Dependencies could not be completely resolved due to")
13850 msg.append("the following required packages not being installed:")
13852 for atom, parent in unresolvable:
13853 msg.append(" %s pulled in by:" % (atom,))
13854 msg.append(" %s" % (parent,))
13856 msg.append("Have you forgotten to run " + \
13857 good("`emerge --update --newuse --deep @system @world`") + " prior")
13858 msg.append(("to %s? It may be necessary to manually " + \
13859 "uninstall packages that no longer") % action)
13860 msg.append("exist in the portage tree since " + \
13861 "it may not be possible to satisfy their")
13862 msg.append("dependencies. Also, be aware of " + \
13863 "the --with-bdeps option that is documented")
13864 msg.append("in " + good("`man emerge`") + ".")
13865 if action == "prune":
13867 msg.append("If you would like to ignore " + \
13868 "dependencies then use %s." % good("--nodeps"))
13869 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13870 level=logging.ERROR, noiselevel=-1)
13874 if unresolved_deps():
13877 graph = resolver.digraph.copy()
13878 required_pkgs_total = 0
13880 if isinstance(node, Package):
13881 required_pkgs_total += 1
13883 def show_parents(child_node):
13884 parent_nodes = graph.parent_nodes(child_node)
13885 if not parent_nodes:
13886 # With --prune, the highest version can be pulled in without any
13887 # real parent since all installed packages are pulled in. In that
13888 # case there's nothing to show here.
13891 for node in parent_nodes:
13892 parent_strs.append(str(getattr(node, "cpv", node)))
13895 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13896 for parent_str in parent_strs:
13897 msg.append(" %s\n" % (parent_str,))
13899 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13901 def cmp_pkg_cpv(pkg1, pkg2):
13902 """Sort Package instances by cpv."""
13903 if pkg1.cpv > pkg2.cpv:
13905 elif pkg1.cpv == pkg2.cpv:
13910 def create_cleanlist():
13911 pkgs_to_remove = []
13913 if action == "depclean":
13916 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13919 arg_atom = args_set.findAtomForPackage(pkg)
13920 except portage.exception.InvalidDependString:
13921 # this error has already been displayed by now
13925 if pkg not in graph:
13926 pkgs_to_remove.append(pkg)
13927 elif "--verbose" in myopts:
13931 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13932 if pkg not in graph:
13933 pkgs_to_remove.append(pkg)
13934 elif "--verbose" in myopts:
13937 elif action == "prune":
13938 # Prune really uses all installed instead of world. It's not
13939 # a real reverse dependency so don't display it as such.
13940 graph.remove(set_args["world"])
13942 for atom in args_set:
13943 for pkg in vardb.match_pkgs(atom):
13944 if pkg not in graph:
13945 pkgs_to_remove.append(pkg)
13946 elif "--verbose" in myopts:
13949 if not pkgs_to_remove:
13951 ">>> No packages selected for removal by %s\n" % action)
13952 if "--verbose" not in myopts:
13954 ">>> To see reverse dependencies, use %s\n" % \
13956 if action == "prune":
13958 ">>> To ignore dependencies, use %s\n" % \
13961 return pkgs_to_remove
13963 cleanlist = create_cleanlist()
13966 clean_set = set(cleanlist)
13968 # Check if any of these package are the sole providers of libraries
13969 # with consumers that have not been selected for removal. If so, these
13970 # packages and any dependencies need to be added to the graph.
13971 real_vardb = trees[myroot]["vartree"].dbapi
13972 linkmap = real_vardb.linkmap
13973 liblist = linkmap.listLibraryObjects()
13974 consumer_cache = {}
13975 provider_cache = {}
13979 writemsg_level(">>> Checking for lib consumers...\n")
13981 for pkg in cleanlist:
13982 pkg_dblink = real_vardb._dblink(pkg.cpv)
13983 provided_libs = set()
13985 for lib in liblist:
13986 if pkg_dblink.isowner(lib, myroot):
13987 provided_libs.add(lib)
13989 if not provided_libs:
13993 for lib in provided_libs:
13994 lib_consumers = consumer_cache.get(lib)
13995 if lib_consumers is None:
13996 lib_consumers = linkmap.findConsumers(lib)
13997 consumer_cache[lib] = lib_consumers
13999 consumers[lib] = lib_consumers
14004 for lib, lib_consumers in consumers.items():
14005 for consumer_file in list(lib_consumers):
14006 if pkg_dblink.isowner(consumer_file, myroot):
14007 lib_consumers.remove(consumer_file)
14008 if not lib_consumers:
14014 for lib, lib_consumers in consumers.iteritems():
14016 soname = soname_cache.get(lib)
14018 soname = linkmap.getSoname(lib)
14019 soname_cache[lib] = soname
14021 consumer_providers = []
14022 for lib_consumer in lib_consumers:
14023 providers = provider_cache.get(lib)
14024 if providers is None:
14025 providers = linkmap.findProviders(lib_consumer)
14026 provider_cache[lib_consumer] = providers
14027 if soname not in providers:
14028 # Why does this happen?
14030 consumer_providers.append(
14031 (lib_consumer, providers[soname]))
14033 consumers[lib] = consumer_providers
14035 consumer_map[pkg] = consumers
14039 search_files = set()
14040 for consumers in consumer_map.itervalues():
14041 for lib, consumer_providers in consumers.iteritems():
14042 for lib_consumer, providers in consumer_providers:
14043 search_files.add(lib_consumer)
14044 search_files.update(providers)
14046 writemsg_level(">>> Assigning files to packages...\n")
14047 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14049 for pkg, consumers in consumer_map.items():
14050 for lib, consumer_providers in consumers.items():
14051 lib_consumers = set()
14053 for lib_consumer, providers in consumer_providers:
14054 owner_set = file_owners.get(lib_consumer)
14055 provider_dblinks = set()
14056 provider_pkgs = set()
14058 if len(providers) > 1:
14059 for provider in providers:
14060 provider_set = file_owners.get(provider)
14061 if provider_set is not None:
14062 provider_dblinks.update(provider_set)
14064 if len(provider_dblinks) > 1:
14065 for provider_dblink in provider_dblinks:
14066 pkg_key = ("installed", myroot,
14067 provider_dblink.mycpv, "nomerge")
14068 if pkg_key not in clean_set:
14069 provider_pkgs.add(vardb.get(pkg_key))
14074 if owner_set is not None:
14075 lib_consumers.update(owner_set)
14077 for consumer_dblink in list(lib_consumers):
14078 if ("installed", myroot, consumer_dblink.mycpv,
14079 "nomerge") in clean_set:
14080 lib_consumers.remove(consumer_dblink)
14084 consumers[lib] = lib_consumers
14088 del consumer_map[pkg]
14091 # TODO: Implement a package set for rebuilding consumer packages.
14093 msg = "In order to avoid breakage of link level " + \
14094 "dependencies, one or more packages will not be removed. " + \
14095 "This can be solved by rebuilding " + \
14096 "the packages that pulled them in."
14098 prefix = bad(" * ")
14099 from textwrap import wrap
14100 writemsg_level("".join(prefix + "%s\n" % line for \
14101 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14104 for pkg, consumers in consumer_map.iteritems():
14105 unique_consumers = set(chain(*consumers.values()))
14106 unique_consumers = sorted(consumer.mycpv \
14107 for consumer in unique_consumers)
14109 msg.append(" %s pulled in by:" % (pkg.cpv,))
14110 for consumer in unique_consumers:
14111 msg.append(" %s" % (consumer,))
14113 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14114 level=logging.WARNING, noiselevel=-1)
14116 # Add lib providers to the graph as children of lib consumers,
14117 # and also add any dependencies pulled in by the provider.
14118 writemsg_level(">>> Adding lib providers to graph...\n")
14120 for pkg, consumers in consumer_map.iteritems():
14121 for consumer_dblink in set(chain(*consumers.values())):
14122 consumer_pkg = vardb.get(("installed", myroot,
14123 consumer_dblink.mycpv, "nomerge"))
14124 if not resolver._add_pkg(pkg,
14125 Dependency(parent=consumer_pkg,
14126 priority=UnmergeDepPriority(runtime=True),
14128 resolver.display_problems()
14131 writemsg_level("\nCalculating dependencies ")
14132 success = resolver._complete_graph()
14133 writemsg_level("\b\b... done!\n")
14134 resolver.display_problems()
14137 if unresolved_deps():
14140 graph = resolver.digraph.copy()
14141 required_pkgs_total = 0
14143 if isinstance(node, Package):
14144 required_pkgs_total += 1
14145 cleanlist = create_cleanlist()
14148 clean_set = set(cleanlist)
14150 # Use a topological sort to create an unmerge order such that
14151 # each package is unmerged before it's dependencies. This is
14152 # necessary to avoid breaking things that may need to run
14153 # during pkg_prerm or pkg_postrm phases.
14155 # Create a new graph to account for dependencies between the
14156 # packages being unmerged.
14160 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14161 runtime = UnmergeDepPriority(runtime=True)
14162 runtime_post = UnmergeDepPriority(runtime_post=True)
14163 buildtime = UnmergeDepPriority(buildtime=True)
14165 "RDEPEND": runtime,
14166 "PDEPEND": runtime_post,
14167 "DEPEND": buildtime,
14170 for node in clean_set:
14171 graph.add(node, None)
14173 node_use = node.metadata["USE"].split()
14174 for dep_type in dep_keys:
14175 depstr = node.metadata[dep_type]
14179 portage.dep._dep_check_strict = False
14180 success, atoms = portage.dep_check(depstr, None, settings,
14181 myuse=node_use, trees=resolver._graph_trees,
14184 portage.dep._dep_check_strict = True
14186 # Ignore invalid deps of packages that will
14187 # be uninstalled anyway.
14190 priority = priority_map[dep_type]
14192 if not isinstance(atom, portage.dep.Atom):
14193 # Ignore invalid atoms returned from dep_check().
14197 matches = vardb.match_pkgs(atom)
14200 for child_node in matches:
14201 if child_node in clean_set:
14202 graph.add(child_node, node, priority=priority)
14205 if len(graph.order) == len(graph.root_nodes()):
14206 # If there are no dependencies between packages
14207 # let unmerge() group them by cat/pn.
14209 cleanlist = [pkg.cpv for pkg in graph.order]
14211 # Order nodes from lowest to highest overall reference count for
14212 # optimal root node selection.
14213 node_refcounts = {}
14214 for node in graph.order:
14215 node_refcounts[node] = len(graph.parent_nodes(node))
14216 def cmp_reference_count(node1, node2):
14217 return node_refcounts[node1] - node_refcounts[node2]
14218 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14220 ignore_priority_range = [None]
14221 ignore_priority_range.extend(
14222 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14223 while not graph.empty():
14224 for ignore_priority in ignore_priority_range:
14225 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14229 raise AssertionError("no root nodes")
14230 if ignore_priority is not None:
14231 # Some deps have been dropped due to circular dependencies,
14232 # so only pop one node in order do minimize the number that
14237 cleanlist.append(node.cpv)
14239 unmerge(root_config, myopts, "unmerge", cleanlist,
14240 ldpath_mtimes, ordered=ordered)
14242 if action == "prune":
14245 if not cleanlist and "--quiet" in myopts:
14248 print "Packages installed: "+str(len(vardb.cpv_all()))
14249 print "Packages in world: " + \
14250 str(len(root_config.sets["world"].getAtoms()))
14251 print "Packages in system: " + \
14252 str(len(root_config.sets["system"].getAtoms()))
14253 print "Required packages: "+str(required_pkgs_total)
14254 if "--pretend" in myopts:
14255 print "Number to remove: "+str(len(cleanlist))
14257 print "Number removed: "+str(len(cleanlist))
14259 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14261 Construct a depgraph for the given resume list. This will raise
14262 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14264 @returns: (success, depgraph, dropped_tasks)
14267 skip_unsatisfied = True
14268 mergelist = mtimedb["resume"]["mergelist"]
14269 dropped_tasks = set()
14271 mydepgraph = depgraph(settings, trees,
14272 myopts, myparams, spinner)
14274 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14275 skip_masked=skip_masked)
14276 except depgraph.UnsatisfiedResumeDep, e:
14277 if not skip_unsatisfied:
14280 graph = mydepgraph.digraph
14281 unsatisfied_parents = dict((dep.parent, dep.parent) \
14282 for dep in e.value)
14283 traversed_nodes = set()
14284 unsatisfied_stack = list(unsatisfied_parents)
14285 while unsatisfied_stack:
14286 pkg = unsatisfied_stack.pop()
14287 if pkg in traversed_nodes:
14289 traversed_nodes.add(pkg)
14291 # If this package was pulled in by a parent
14292 # package scheduled for merge, removing this
14293 # package may cause the the parent package's
14294 # dependency to become unsatisfied.
14295 for parent_node in graph.parent_nodes(pkg):
14296 if not isinstance(parent_node, Package) \
14297 or parent_node.operation not in ("merge", "nomerge"):
14300 graph.child_nodes(parent_node,
14301 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14302 if pkg in unsatisfied:
14303 unsatisfied_parents[parent_node] = parent_node
14304 unsatisfied_stack.append(parent_node)
14306 pruned_mergelist = []
14307 for x in mergelist:
14308 if isinstance(x, list) and \
14309 tuple(x) not in unsatisfied_parents:
14310 pruned_mergelist.append(x)
14312 # If the mergelist doesn't shrink then this loop is infinite.
14313 if len(pruned_mergelist) == len(mergelist):
14314 # This happens if a package can't be dropped because
14315 # it's already installed, but it has unsatisfied PDEPEND.
14317 mergelist[:] = pruned_mergelist
14319 # Exclude installed packages that have been removed from the graph due
14320 # to failure to build/install runtime dependencies after the dependent
14321 # package has already been installed.
14322 dropped_tasks.update(pkg for pkg in \
14323 unsatisfied_parents if pkg.operation != "nomerge")
14324 mydepgraph.break_refs(unsatisfied_parents)
14326 del e, graph, traversed_nodes, \
14327 unsatisfied_parents, unsatisfied_stack
14331 return (success, mydepgraph, dropped_tasks)
14333 def action_build(settings, trees, mtimedb,
14334 myopts, myaction, myfiles, spinner):
14336 # validate the state of the resume data
14337 # so that we can make assumptions later.
14338 for k in ("resume", "resume_backup"):
14339 if k not in mtimedb:
14341 resume_data = mtimedb[k]
14342 if not isinstance(resume_data, dict):
14345 mergelist = resume_data.get("mergelist")
14346 if not isinstance(mergelist, list):
14349 for x in mergelist:
14350 if not (isinstance(x, list) and len(x) == 4):
14352 pkg_type, pkg_root, pkg_key, pkg_action = x
14353 if pkg_root not in trees:
14354 # Current $ROOT setting differs,
14355 # so the list must be stale.
14361 resume_opts = resume_data.get("myopts")
14362 if not isinstance(resume_opts, (dict, list)):
14365 favorites = resume_data.get("favorites")
14366 if not isinstance(favorites, list):
14371 if "--resume" in myopts and \
14372 ("resume" in mtimedb or
14373 "resume_backup" in mtimedb):
14375 if "resume" not in mtimedb:
14376 mtimedb["resume"] = mtimedb["resume_backup"]
14377 del mtimedb["resume_backup"]
14379 # "myopts" is a list for backward compatibility.
14380 resume_opts = mtimedb["resume"].get("myopts", [])
14381 if isinstance(resume_opts, list):
14382 resume_opts = dict((k,True) for k in resume_opts)
14383 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14384 resume_opts.pop(opt, None)
14385 myopts.update(resume_opts)
14387 if "--debug" in myopts:
14388 writemsg_level("myopts %s\n" % (myopts,))
14390 # Adjust config according to options of the command being resumed.
14391 for myroot in trees:
14392 mysettings = trees[myroot]["vartree"].settings
14393 mysettings.unlock()
14394 adjust_config(myopts, mysettings)
14396 del myroot, mysettings
14398 ldpath_mtimes = mtimedb["ldpath"]
14401 buildpkgonly = "--buildpkgonly" in myopts
14402 pretend = "--pretend" in myopts
14403 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14404 ask = "--ask" in myopts
14405 nodeps = "--nodeps" in myopts
14406 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14407 tree = "--tree" in myopts
14408 if nodeps and tree:
14410 del myopts["--tree"]
14411 portage.writemsg(colorize("WARN", " * ") + \
14412 "--tree is broken with --nodeps. Disabling...\n")
14413 debug = "--debug" in myopts
14414 verbose = "--verbose" in myopts
14415 quiet = "--quiet" in myopts
14416 if pretend or fetchonly:
14417 # make the mtimedb readonly
14418 mtimedb.filename = None
14419 if '--digest' in myopts or 'digest' in settings.features:
14420 if '--digest' in myopts:
14421 msg = "The --digest option"
14423 msg = "The FEATURES=digest setting"
14425 msg += " can prevent corruption from being" + \
14426 " noticed. The `repoman manifest` command is the preferred" + \
14427 " way to generate manifests and it is capable of doing an" + \
14428 " entire repository or category at once."
14429 prefix = bad(" * ")
14430 writemsg(prefix + "\n")
14431 from textwrap import wrap
14432 for line in wrap(msg, 72):
14433 writemsg("%s%s\n" % (prefix, line))
14434 writemsg(prefix + "\n")
14436 if "--quiet" not in myopts and \
14437 ("--pretend" in myopts or "--ask" in myopts or \
14438 "--tree" in myopts or "--verbose" in myopts):
14440 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14442 elif "--buildpkgonly" in myopts:
14446 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14448 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14452 print darkgreen("These are the packages that would be %s, in order:") % action
14455 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14456 if not show_spinner:
14457 spinner.update = spinner.update_quiet
14460 favorites = mtimedb["resume"].get("favorites")
14461 if not isinstance(favorites, list):
14465 print "Calculating dependencies ",
14466 myparams = create_depgraph_params(myopts, myaction)
14468 resume_data = mtimedb["resume"]
14469 mergelist = resume_data["mergelist"]
14470 if mergelist and "--skipfirst" in myopts:
14471 for i, task in enumerate(mergelist):
14472 if isinstance(task, list) and \
14473 task and task[-1] == "merge":
14480 success, mydepgraph, dropped_tasks = resume_depgraph(
14481 settings, trees, mtimedb, myopts, myparams, spinner)
14482 except (portage.exception.PackageNotFound,
14483 depgraph.UnsatisfiedResumeDep), e:
14484 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14485 mydepgraph = e.depgraph
14488 from textwrap import wrap
14489 from portage.output import EOutput
14492 resume_data = mtimedb["resume"]
14493 mergelist = resume_data.get("mergelist")
14494 if not isinstance(mergelist, list):
14496 if mergelist and debug or (verbose and not quiet):
14497 out.eerror("Invalid resume list:")
14500 for task in mergelist:
14501 if isinstance(task, list):
14502 out.eerror(indent + str(tuple(task)))
14505 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14506 out.eerror("One or more packages are either masked or " + \
14507 "have missing dependencies:")
14510 for dep in e.value:
14511 if dep.atom is None:
14512 out.eerror(indent + "Masked package:")
14513 out.eerror(2 * indent + str(dep.parent))
14516 out.eerror(indent + str(dep.atom) + " pulled in by:")
14517 out.eerror(2 * indent + str(dep.parent))
14519 msg = "The resume list contains packages " + \
14520 "that are either masked or have " + \
14521 "unsatisfied dependencies. " + \
14522 "Please restart/continue " + \
14523 "the operation manually, or use --skipfirst " + \
14524 "to skip the first package in the list and " + \
14525 "any other packages that may be " + \
14526 "masked or have missing dependencies."
14527 for line in wrap(msg, 72):
14529 elif isinstance(e, portage.exception.PackageNotFound):
14530 out.eerror("An expected package is " + \
14531 "not available: %s" % str(e))
14533 msg = "The resume list contains one or more " + \
14534 "packages that are no longer " + \
14535 "available. Please restart/continue " + \
14536 "the operation manually."
14537 for line in wrap(msg, 72):
14541 print "\b\b... done!"
14545 portage.writemsg("!!! One or more packages have been " + \
14546 "dropped due to\n" + \
14547 "!!! masking or unsatisfied dependencies:\n\n",
14549 for task in dropped_tasks:
14550 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14551 portage.writemsg("\n", noiselevel=-1)
14554 if mydepgraph is not None:
14555 mydepgraph.display_problems()
14556 if not (ask or pretend):
14557 # delete the current list and also the backup
14558 # since it's probably stale too.
14559 for k in ("resume", "resume_backup"):
14560 mtimedb.pop(k, None)
14565 if ("--resume" in myopts):
14566 print darkgreen("emerge: It seems we have nothing to resume...")
14569 myparams = create_depgraph_params(myopts, myaction)
14570 if "--quiet" not in myopts and "--nodeps" not in myopts:
14571 print "Calculating dependencies ",
14573 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14575 retval, favorites = mydepgraph.select_files(myfiles)
14576 except portage.exception.PackageNotFound, e:
14577 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14579 except portage.exception.PackageSetNotFound, e:
14580 root_config = trees[settings["ROOT"]]["root_config"]
14581 display_missing_pkg_set(root_config, e.value)
14584 print "\b\b... done!"
14586 mydepgraph.display_problems()
14589 if "--pretend" not in myopts and \
14590 ("--ask" in myopts or "--tree" in myopts or \
14591 "--verbose" in myopts) and \
14592 not ("--quiet" in myopts and "--ask" not in myopts):
14593 if "--resume" in myopts:
14594 mymergelist = mydepgraph.altlist()
14595 if len(mymergelist) == 0:
14596 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14598 favorites = mtimedb["resume"]["favorites"]
14599 retval = mydepgraph.display(
14600 mydepgraph.altlist(reversed=tree),
14601 favorites=favorites)
14602 mydepgraph.display_problems()
14603 if retval != os.EX_OK:
14605 prompt="Would you like to resume merging these packages?"
14607 retval = mydepgraph.display(
14608 mydepgraph.altlist(reversed=("--tree" in myopts)),
14609 favorites=favorites)
14610 mydepgraph.display_problems()
14611 if retval != os.EX_OK:
14614 for x in mydepgraph.altlist():
14615 if isinstance(x, Package) and x.operation == "merge":
14619 sets = trees[settings["ROOT"]]["root_config"].sets
14620 world_candidates = None
14621 if "--noreplace" in myopts and \
14622 not oneshot and favorites:
14623 # Sets that are not world candidates are filtered
14624 # out here since the favorites list needs to be
14625 # complete for depgraph.loadResumeCommand() to
14626 # operate correctly.
14627 world_candidates = [x for x in favorites \
14628 if not (x.startswith(SETPREFIX) and \
14629 not sets[x[1:]].world_candidate)]
14630 if "--noreplace" in myopts and \
14631 not oneshot and world_candidates:
14633 for x in world_candidates:
14634 print " %s %s" % (good("*"), x)
14635 prompt="Would you like to add these packages to your world favorites?"
14636 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14637 prompt="Nothing to merge; would you like to auto-clean packages?"
14640 print "Nothing to merge; quitting."
14643 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14644 prompt="Would you like to fetch the source files for these packages?"
14646 prompt="Would you like to merge these packages?"
14648 if "--ask" in myopts and userquery(prompt) == "No":
14653 # Don't ask again (e.g. when auto-cleaning packages after merge)
14654 myopts.pop("--ask", None)
14656 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14657 if ("--resume" in myopts):
14658 mymergelist = mydepgraph.altlist()
14659 if len(mymergelist) == 0:
14660 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14662 favorites = mtimedb["resume"]["favorites"]
14663 retval = mydepgraph.display(
14664 mydepgraph.altlist(reversed=tree),
14665 favorites=favorites)
14666 mydepgraph.display_problems()
14667 if retval != os.EX_OK:
14670 retval = mydepgraph.display(
14671 mydepgraph.altlist(reversed=("--tree" in myopts)),
14672 favorites=favorites)
14673 mydepgraph.display_problems()
14674 if retval != os.EX_OK:
14676 if "--buildpkgonly" in myopts:
14677 graph_copy = mydepgraph.digraph.clone()
14678 removed_nodes = set()
14679 for node in graph_copy:
14680 if not isinstance(node, Package) or \
14681 node.operation == "nomerge":
14682 removed_nodes.add(node)
14683 graph_copy.difference_update(removed_nodes)
14684 if not graph_copy.hasallzeros(ignore_priority = \
14685 DepPrioritySatisfiedRange.ignore_medium):
14686 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14687 print "!!! You have to merge the dependencies before you can build this package.\n"
14690 if "--buildpkgonly" in myopts:
14691 graph_copy = mydepgraph.digraph.clone()
14692 removed_nodes = set()
14693 for node in graph_copy:
14694 if not isinstance(node, Package) or \
14695 node.operation == "nomerge":
14696 removed_nodes.add(node)
14697 graph_copy.difference_update(removed_nodes)
14698 if not graph_copy.hasallzeros(ignore_priority = \
14699 DepPrioritySatisfiedRange.ignore_medium):
14700 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14701 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14704 if ("--resume" in myopts):
14705 favorites=mtimedb["resume"]["favorites"]
14706 mymergelist = mydepgraph.altlist()
14707 mydepgraph.break_refs(mymergelist)
14708 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14709 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14710 del mydepgraph, mymergelist
14711 clear_caches(trees)
14713 retval = mergetask.merge()
14714 merge_count = mergetask.curval
14716 if "resume" in mtimedb and \
14717 "mergelist" in mtimedb["resume"] and \
14718 len(mtimedb["resume"]["mergelist"]) > 1:
14719 mtimedb["resume_backup"] = mtimedb["resume"]
14720 del mtimedb["resume"]
14722 mtimedb["resume"]={}
14723 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14724 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14725 # a list type for options.
14726 mtimedb["resume"]["myopts"] = myopts.copy()
14728 # Convert Atom instances to plain str.
14729 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14731 pkglist = mydepgraph.altlist()
14732 mydepgraph.saveNomergeFavorites()
14733 mydepgraph.break_refs(pkglist)
14734 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14735 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14736 del mydepgraph, pkglist
14737 clear_caches(trees)
14739 retval = mergetask.merge()
14740 merge_count = mergetask.curval
14742 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14743 if "yes" == settings.get("AUTOCLEAN"):
14744 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14745 unmerge(trees[settings["ROOT"]]["root_config"],
14746 myopts, "clean", [],
14747 ldpath_mtimes, autoclean=1)
14749 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14750 + " AUTOCLEAN is disabled. This can cause serious"
14751 + " problems due to overlapping packages.\n")
14752 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14756 def multiple_actions(action1, action2):
14757 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14758 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14761 def insert_optional_args(args):
14763 Parse optional arguments and insert a value if one has
14764 not been provided. This is done before feeding the args
14765 to the optparse parser since that parser does not support
14766 this feature natively.
14770 jobs_opts = ("-j", "--jobs")
14771 root_deps_opt = '--root-deps'
14772 root_deps_choices = ('True', 'rdeps')
14773 arg_stack = args[:]
14774 arg_stack.reverse()
14776 arg = arg_stack.pop()
14778 if arg == root_deps_opt:
14779 new_args.append(arg)
14780 if arg_stack and arg_stack[-1] in root_deps_choices:
14781 new_args.append(arg_stack.pop())
14783 # insert default argument
14784 new_args.append('True')
14787 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14788 if not (short_job_opt or arg in jobs_opts):
14789 new_args.append(arg)
14792 # Insert an empty placeholder in order to
14793 # satisfy the requirements of optparse.
14795 new_args.append("--jobs")
14798 if short_job_opt and len(arg) > 2:
14799 if arg[:2] == "-j":
14801 job_count = int(arg[2:])
14803 saved_opts = arg[2:]
14806 saved_opts = arg[1:].replace("j", "")
14808 if job_count is None and arg_stack:
14810 job_count = int(arg_stack[-1])
14814 # Discard the job count from the stack
14815 # since we're consuming it here.
14818 if job_count is None:
14819 # unlimited number of jobs
14820 new_args.append("True")
14822 new_args.append(str(job_count))
14824 if saved_opts is not None:
14825 new_args.append("-" + saved_opts)
14829 def parse_opts(tmpcmdline, silent=False):
14834 global actions, options, shortmapping
14836 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14837 argument_options = {
14839 "help":"specify the location for portage configuration files",
14843 "help":"enable or disable color output",
14845 "choices":("y", "n")
14850 "help" : "Specifies the number of packages to build " + \
14856 "--load-average": {
14858 "help" :"Specifies that no new builds should be started " + \
14859 "if there are other builds running and the load average " + \
14860 "is at least LOAD (a floating-point number).",
14866 "help":"include unnecessary build time dependencies",
14868 "choices":("y", "n")
14871 "help":"specify conditions to trigger package reinstallation",
14873 "choices":["changed-use"]
14876 "help" : "specify the target root filesystem for merging packages",
14881 "help" : "modify interpretation of depedencies",
14883 "choices" :("True", "rdeps")
14887 from optparse import OptionParser
14888 parser = OptionParser()
14889 if parser.has_option("--help"):
14890 parser.remove_option("--help")
14892 for action_opt in actions:
14893 parser.add_option("--" + action_opt, action="store_true",
14894 dest=action_opt.replace("-", "_"), default=False)
14895 for myopt in options:
14896 parser.add_option(myopt, action="store_true",
14897 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14898 for shortopt, longopt in shortmapping.iteritems():
14899 parser.add_option("-" + shortopt, action="store_true",
14900 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14901 for myalias, myopt in longopt_aliases.iteritems():
14902 parser.add_option(myalias, action="store_true",
14903 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14905 for myopt, kwargs in argument_options.iteritems():
14906 parser.add_option(myopt,
14907 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14909 tmpcmdline = insert_optional_args(tmpcmdline)
14911 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14913 if myoptions.root_deps == "True":
14914 myoptions.root_deps = True
14918 if myoptions.jobs == "True":
14922 jobs = int(myoptions.jobs)
14926 if jobs is not True and \
14930 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14931 (myoptions.jobs,), noiselevel=-1)
14933 myoptions.jobs = jobs
14935 if myoptions.load_average:
14937 load_average = float(myoptions.load_average)
14941 if load_average <= 0.0:
14942 load_average = None
14944 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14945 (myoptions.load_average,), noiselevel=-1)
14947 myoptions.load_average = load_average
14949 for myopt in options:
14950 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14952 myopts[myopt] = True
14954 for myopt in argument_options:
14955 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14959 if myoptions.searchdesc:
14960 myoptions.search = True
14962 for action_opt in actions:
14963 v = getattr(myoptions, action_opt.replace("-", "_"))
14966 multiple_actions(myaction, action_opt)
14968 myaction = action_opt
14972 return myaction, myopts, myfiles
14974 def validate_ebuild_environment(trees):
14975 for myroot in trees:
14976 settings = trees[myroot]["vartree"].settings
14977 settings.validate()
14979 def clear_caches(trees):
14980 for d in trees.itervalues():
14981 d["porttree"].dbapi.melt()
14982 d["porttree"].dbapi._aux_cache.clear()
14983 d["bintree"].dbapi._aux_cache.clear()
14984 d["bintree"].dbapi._clear_cache()
14985 d["vartree"].dbapi.linkmap._clear_cache()
14986 portage.dircache.clear()
14989 def load_emerge_config(trees=None):
14991 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14992 v = os.environ.get(envvar, None)
14993 if v and v.strip():
14995 trees = portage.create_trees(trees=trees, **kwargs)
14997 for root, root_trees in trees.iteritems():
14998 settings = root_trees["vartree"].settings
14999 setconfig = load_default_config(settings, root_trees)
15000 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15002 settings = trees["/"]["vartree"].settings
15004 for myroot in trees:
15006 settings = trees[myroot]["vartree"].settings
15009 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15010 mtimedb = portage.MtimeDB(mtimedbfile)
15012 return settings, trees, mtimedb
15014 def adjust_config(myopts, settings):
15015 """Make emerge specific adjustments to the config."""
15017 # To enhance usability, make some vars case insensitive by forcing them to
15019 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15020 if myvar in settings:
15021 settings[myvar] = settings[myvar].lower()
15022 settings.backup_changes(myvar)
15025 # Kill noauto as it will break merges otherwise.
15026 if "noauto" in settings.features:
15027 settings.features.remove('noauto')
15028 settings['FEATURES'] = ' '.join(sorted(settings.features))
15029 settings.backup_changes("FEATURES")
15033 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15034 except ValueError, e:
15035 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15036 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15037 settings["CLEAN_DELAY"], noiselevel=-1)
15038 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15039 settings.backup_changes("CLEAN_DELAY")
15041 EMERGE_WARNING_DELAY = 10
15043 EMERGE_WARNING_DELAY = int(settings.get(
15044 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15045 except ValueError, e:
15046 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15047 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15048 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15049 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15050 settings.backup_changes("EMERGE_WARNING_DELAY")
15052 if "--quiet" in myopts:
15053 settings["PORTAGE_QUIET"]="1"
15054 settings.backup_changes("PORTAGE_QUIET")
15056 if "--verbose" in myopts:
15057 settings["PORTAGE_VERBOSE"] = "1"
15058 settings.backup_changes("PORTAGE_VERBOSE")
15060 # Set so that configs will be merged regardless of remembered status
15061 if ("--noconfmem" in myopts):
15062 settings["NOCONFMEM"]="1"
15063 settings.backup_changes("NOCONFMEM")
15065 # Set various debug markers... They should be merged somehow.
15068 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15069 if PORTAGE_DEBUG not in (0, 1):
15070 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15071 PORTAGE_DEBUG, noiselevel=-1)
15072 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15075 except ValueError, e:
15076 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15077 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15078 settings["PORTAGE_DEBUG"], noiselevel=-1)
15080 if "--debug" in myopts:
15082 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15083 settings.backup_changes("PORTAGE_DEBUG")
15085 if settings.get("NOCOLOR") not in ("yes","true"):
15086 portage.output.havecolor = 1
15088 """The explicit --color < y | n > option overrides the NOCOLOR environment
15089 variable and stdout auto-detection."""
15090 if "--color" in myopts:
15091 if "y" == myopts["--color"]:
15092 portage.output.havecolor = 1
15093 settings["NOCOLOR"] = "false"
15095 portage.output.havecolor = 0
15096 settings["NOCOLOR"] = "true"
15097 settings.backup_changes("NOCOLOR")
15098 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15099 portage.output.havecolor = 0
15100 settings["NOCOLOR"] = "true"
15101 settings.backup_changes("NOCOLOR")
15103 def apply_priorities(settings):
15107 def nice(settings):
15109 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15110 except (OSError, ValueError), e:
15111 out = portage.output.EOutput()
15112 out.eerror("Failed to change nice value to '%s'" % \
15113 settings["PORTAGE_NICENESS"])
15114 out.eerror("%s\n" % str(e))
15116 def ionice(settings):
15118 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15120 ionice_cmd = shlex.split(ionice_cmd)
15124 from portage.util import varexpand
15125 variables = {"PID" : str(os.getpid())}
15126 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15129 rval = portage.process.spawn(cmd, env=os.environ)
15130 except portage.exception.CommandNotFound:
15131 # The OS kernel probably doesn't support ionice,
15132 # so return silently.
15135 if rval != os.EX_OK:
15136 out = portage.output.EOutput()
15137 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15138 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15140 def display_missing_pkg_set(root_config, set_name):
15143 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15144 "The following sets exist:") % \
15145 colorize("INFORM", set_name))
15148 for s in sorted(root_config.sets):
15149 msg.append(" %s" % s)
15152 writemsg_level("".join("%s\n" % l for l in msg),
15153 level=logging.ERROR, noiselevel=-1)
15155 def expand_set_arguments(myfiles, myaction, root_config):
15157 setconfig = root_config.setconfig
15159 sets = setconfig.getSets()
15161 # In order to know exactly which atoms/sets should be added to the
15162 # world file, the depgraph performs set expansion later. It will get
15163 # confused about where the atoms came from if it's not allowed to
15164 # expand them itself.
15165 do_not_expand = (None, )
15168 if a in ("system", "world"):
15169 newargs.append(SETPREFIX+a)
15176 # separators for set arguments
15180 # WARNING: all operators must be of equal length
15182 DIFF_OPERATOR = "-@"
15183 UNION_OPERATOR = "+@"
15185 for i in range(0, len(myfiles)):
15186 if myfiles[i].startswith(SETPREFIX):
15189 x = myfiles[i][len(SETPREFIX):]
15192 start = x.find(ARG_START)
15193 end = x.find(ARG_END)
15194 if start > 0 and start < end:
15195 namepart = x[:start]
15196 argpart = x[start+1:end]
15198 # TODO: implement proper quoting
15199 args = argpart.split(",")
15203 k, v = a.split("=", 1)
15206 options[a] = "True"
15207 setconfig.update(namepart, options)
15208 newset += (x[:start-len(namepart)]+namepart)
15209 x = x[end+len(ARG_END):]
15213 myfiles[i] = SETPREFIX+newset
15215 sets = setconfig.getSets()
15217 # display errors that occured while loading the SetConfig instance
15218 for e in setconfig.errors:
15219 print colorize("BAD", "Error during set creation: %s" % e)
15221 # emerge relies on the existance of sets with names "world" and "system"
15222 required_sets = ("world", "system")
15225 for s in required_sets:
15227 missing_sets.append(s)
15229 if len(missing_sets) > 2:
15230 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15231 missing_sets_str += ', and "%s"' % missing_sets[-1]
15232 elif len(missing_sets) == 2:
15233 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15235 missing_sets_str = '"%s"' % missing_sets[-1]
15236 msg = ["emerge: incomplete set configuration, " + \
15237 "missing set(s): %s" % missing_sets_str]
15239 msg.append(" sets defined: %s" % ", ".join(sets))
15240 msg.append(" This usually means that '%s'" % \
15241 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15242 msg.append(" is missing or corrupt.")
15244 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15246 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15249 if a.startswith(SETPREFIX):
15250 # support simple set operations (intersection, difference and union)
15251 # on the commandline. Expressions are evaluated strictly left-to-right
15252 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15253 expression = a[len(SETPREFIX):]
15256 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15257 is_pos = expression.rfind(IS_OPERATOR)
15258 diff_pos = expression.rfind(DIFF_OPERATOR)
15259 union_pos = expression.rfind(UNION_OPERATOR)
15260 op_pos = max(is_pos, diff_pos, union_pos)
15261 s1 = expression[:op_pos]
15262 s2 = expression[op_pos+len(IS_OPERATOR):]
15263 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15265 display_missing_pkg_set(root_config, s2)
15267 expr_sets.insert(0, s2)
15268 expr_ops.insert(0, op)
15270 if not expression in sets:
15271 display_missing_pkg_set(root_config, expression)
15273 expr_sets.insert(0, expression)
15274 result = set(setconfig.getSetAtoms(expression))
15275 for i in range(0, len(expr_ops)):
15276 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15277 if expr_ops[i] == IS_OPERATOR:
15278 result.intersection_update(s2)
15279 elif expr_ops[i] == DIFF_OPERATOR:
15280 result.difference_update(s2)
15281 elif expr_ops[i] == UNION_OPERATOR:
15284 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15285 newargs.extend(result)
15287 s = a[len(SETPREFIX):]
15289 display_missing_pkg_set(root_config, s)
15291 setconfig.active.append(s)
15293 set_atoms = setconfig.getSetAtoms(s)
15294 except portage.exception.PackageSetNotFound, e:
15295 writemsg_level(("emerge: the given set '%s' " + \
15296 "contains a non-existent set named '%s'.\n") % \
15297 (s, e), level=logging.ERROR, noiselevel=-1)
15299 if myaction in unmerge_actions and \
15300 not sets[s].supportsOperation("unmerge"):
15301 sys.stderr.write("emerge: the given set '%s' does " % s + \
15302 "not support unmerge operations\n")
15304 elif not set_atoms:
15305 print "emerge: '%s' is an empty set" % s
15306 elif myaction not in do_not_expand:
15307 newargs.extend(set_atoms)
15309 newargs.append(SETPREFIX+s)
15310 for e in sets[s].errors:
15314 return (newargs, retval)
15316 def repo_name_check(trees):
15317 missing_repo_names = set()
15318 for root, root_trees in trees.iteritems():
15319 if "porttree" in root_trees:
15320 portdb = root_trees["porttree"].dbapi
15321 missing_repo_names.update(portdb.porttrees)
15322 repos = portdb.getRepositories()
15324 missing_repo_names.discard(portdb.getRepositoryPath(r))
15325 if portdb.porttree_root in missing_repo_names and \
15326 not os.path.exists(os.path.join(
15327 portdb.porttree_root, "profiles")):
15328 # This is normal if $PORTDIR happens to be empty,
15329 # so don't warn about it.
15330 missing_repo_names.remove(portdb.porttree_root)
15332 if missing_repo_names:
15334 msg.append("WARNING: One or more repositories " + \
15335 "have missing repo_name entries:")
15337 for p in missing_repo_names:
15338 msg.append("\t%s/profiles/repo_name" % (p,))
15340 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15341 "should be a plain text file containing a unique " + \
15342 "name for the repository on the first line.", 70))
15343 writemsg_level("".join("%s\n" % l for l in msg),
15344 level=logging.WARNING, noiselevel=-1)
15346 return bool(missing_repo_names)
15348 def config_protect_check(trees):
15349 for root, root_trees in trees.iteritems():
15350 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15351 msg = "!!! CONFIG_PROTECT is empty"
15353 msg += " for '%s'" % root
15354 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15356 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15358 if "--quiet" in myopts:
15359 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15360 print "!!! one of the following fully-qualified ebuild names instead:\n"
15361 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15362 print " " + colorize("INFORM", cp)
15365 s = search(root_config, spinner, "--searchdesc" in myopts,
15366 "--quiet" not in myopts, "--usepkg" in myopts,
15367 "--usepkgonly" in myopts)
15368 null_cp = portage.dep_getkey(insert_category_into_atom(
15370 cat, atom_pn = portage.catsplit(null_cp)
15371 s.searchkey = atom_pn
15372 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15375 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15376 print "!!! one of the above fully-qualified ebuild names instead.\n"
15378 def profile_check(trees, myaction, myopts):
15379 if myaction in ("info", "sync"):
15381 elif "--version" in myopts or "--help" in myopts:
15383 for root, root_trees in trees.iteritems():
15384 if root_trees["root_config"].settings.profiles:
15386 # generate some profile related warning messages
15387 validate_ebuild_environment(trees)
15388 msg = "If you have just changed your profile configuration, you " + \
15389 "should revert back to the previous configuration. Due to " + \
15390 "your current profile being invalid, allowed actions are " + \
15391 "limited to --help, --info, --sync, and --version."
15392 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15393 level=logging.ERROR, noiselevel=-1)
15398 global portage # NFC why this is necessary now - genone
15399 portage._disable_legacy_globals()
15400 # Disable color until we're sure that it should be enabled (after
15401 # EMERGE_DEFAULT_OPTS has been parsed).
15402 portage.output.havecolor = 0
15403 # This first pass is just for options that need to be known as early as
15404 # possible, such as --config-root. They will be parsed again later,
15405 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15406 # the value of --config-root).
15407 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15408 if "--debug" in myopts:
15409 os.environ["PORTAGE_DEBUG"] = "1"
15410 if "--config-root" in myopts:
15411 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15412 if "--root" in myopts:
15413 os.environ["ROOT"] = myopts["--root"]
15415 # Portage needs to ensure a sane umask for the files it creates.
15417 settings, trees, mtimedb = load_emerge_config()
15418 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15419 rval = profile_check(trees, myaction, myopts)
15420 if rval != os.EX_OK:
15423 if portage._global_updates(trees, mtimedb["updates"]):
15425 # Reload the whole config from scratch.
15426 settings, trees, mtimedb = load_emerge_config(trees=trees)
15427 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15429 xterm_titles = "notitles" not in settings.features
15432 if "--ignore-default-opts" not in myopts:
15433 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15434 tmpcmdline.extend(sys.argv[1:])
15435 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15437 if "--digest" in myopts:
15438 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15439 # Reload the whole config from scratch so that the portdbapi internal
15440 # config is updated with new FEATURES.
15441 settings, trees, mtimedb = load_emerge_config(trees=trees)
15442 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15444 for myroot in trees:
15445 mysettings = trees[myroot]["vartree"].settings
15446 mysettings.unlock()
15447 adjust_config(myopts, mysettings)
15448 if '--pretend' not in myopts and myaction in \
15449 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15450 mysettings["PORTAGE_COUNTER_HASH"] = \
15451 trees[myroot]["vartree"].dbapi._counter_hash()
15452 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15454 del myroot, mysettings
15456 apply_priorities(settings)
15458 spinner = stdout_spinner()
15459 if "candy" in settings.features:
15460 spinner.update = spinner.update_scroll
15462 if "--quiet" not in myopts:
15463 portage.deprecated_profile_check(settings=settings)
15464 repo_name_check(trees)
15465 config_protect_check(trees)
15467 for mytrees in trees.itervalues():
15468 mydb = mytrees["porttree"].dbapi
15469 # Freeze the portdbapi for performance (memoize all xmatch results).
15473 if "moo" in myfiles:
15476 Larry loves Gentoo (""" + platform.system() + """)
15478 _______________________
15479 < Have you mooed today? >
15480 -----------------------
15490 ext = os.path.splitext(x)[1]
15491 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15492 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15495 root_config = trees[settings["ROOT"]]["root_config"]
15496 if myaction == "list-sets":
15497 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15501 # only expand sets for actions taking package arguments
15502 oldargs = myfiles[:]
15503 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15504 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15505 if retval != os.EX_OK:
15508 # Need to handle empty sets specially, otherwise emerge will react
15509 # with the help message for empty argument lists
15510 if oldargs and not myfiles:
15511 print "emerge: no targets left after set expansion"
15514 if ("--tree" in myopts) and ("--columns" in myopts):
15515 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15518 if ("--quiet" in myopts):
15519 spinner.update = spinner.update_quiet
15520 portage.util.noiselimit = -1
15522 # Always create packages if FEATURES=buildpkg
15523 # Imply --buildpkg if --buildpkgonly
15524 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15525 if "--buildpkg" not in myopts:
15526 myopts["--buildpkg"] = True
15528 # Always try and fetch binary packages if FEATURES=getbinpkg
15529 if ("getbinpkg" in settings.features):
15530 myopts["--getbinpkg"] = True
15532 if "--buildpkgonly" in myopts:
15533 # --buildpkgonly will not merge anything, so
15534 # it cancels all binary package options.
15535 for opt in ("--getbinpkg", "--getbinpkgonly",
15536 "--usepkg", "--usepkgonly"):
15537 myopts.pop(opt, None)
15539 if "--fetch-all-uri" in myopts:
15540 myopts["--fetchonly"] = True
15542 if "--skipfirst" in myopts and "--resume" not in myopts:
15543 myopts["--resume"] = True
15545 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15546 myopts["--usepkgonly"] = True
15548 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15549 myopts["--getbinpkg"] = True
15551 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15552 myopts["--usepkg"] = True
15554 # Also allow -K to apply --usepkg/-k
15555 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15556 myopts["--usepkg"] = True
15558 # Allow -p to remove --ask
15559 if ("--pretend" in myopts) and ("--ask" in myopts):
15560 print ">>> --pretend disables --ask... removing --ask from options."
15561 del myopts["--ask"]
15563 # forbid --ask when not in a terminal
15564 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15565 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15566 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15570 if settings.get("PORTAGE_DEBUG", "") == "1":
15571 spinner.update = spinner.update_quiet
15573 if "python-trace" in settings.features:
15574 import portage.debug
15575 portage.debug.set_trace(True)
15577 if not ("--quiet" in myopts):
15578 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15579 spinner.update = spinner.update_basic
15581 if myaction == 'version':
15582 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15583 settings.profile_path, settings["CHOST"],
15584 trees[settings["ROOT"]]["vartree"].dbapi)
15586 elif "--help" in myopts:
15587 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15590 if "--debug" in myopts:
15591 print "myaction", myaction
15592 print "myopts", myopts
15594 if not myaction and not myfiles and "--resume" not in myopts:
15595 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15598 pretend = "--pretend" in myopts
15599 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15600 buildpkgonly = "--buildpkgonly" in myopts
15602 # check if root user is the current user for the actions where emerge needs this
15603 if portage.secpass < 2:
15604 # We've already allowed "--version" and "--help" above.
15605 if "--pretend" not in myopts and myaction not in ("search","info"):
15606 need_superuser = not \
15608 (buildpkgonly and secpass >= 1) or \
15609 myaction in ("metadata", "regen") or \
15610 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15611 if portage.secpass < 1 or \
15614 access_desc = "superuser"
15616 access_desc = "portage group"
15617 # Always show portage_group_warning() when only portage group
15618 # access is required but the user is not in the portage group.
15619 from portage.data import portage_group_warning
15620 if "--ask" in myopts:
15621 myopts["--pretend"] = True
15622 del myopts["--ask"]
15623 print ("%s access is required... " + \
15624 "adding --pretend to options.\n") % access_desc
15625 if portage.secpass < 1 and not need_superuser:
15626 portage_group_warning()
15628 sys.stderr.write(("emerge: %s access is " + \
15629 "required.\n\n") % access_desc)
15630 if portage.secpass < 1 and not need_superuser:
15631 portage_group_warning()
15634 disable_emergelog = False
15635 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15637 disable_emergelog = True
15639 if myaction in ("search", "info"):
15640 disable_emergelog = True
15641 if disable_emergelog:
15642 """ Disable emergelog for everything except build or unmerge
15643 operations. This helps minimize parallel emerge.log entries that can
15644 confuse log parsers. We especially want it disabled during
15645 parallel-fetch, which uses --resume --fetchonly."""
15647 def emergelog(*pargs, **kargs):
15650 if not "--pretend" in myopts:
15651 emergelog(xterm_titles, "Started emerge on: "+\
15652 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15655 myelogstr=" ".join(myopts)
15657 myelogstr+=" "+myaction
15659 myelogstr += " " + " ".join(oldargs)
15660 emergelog(xterm_titles, " *** emerge " + myelogstr)
15663 def emergeexitsig(signum, frame):
15664 signal.signal(signal.SIGINT, signal.SIG_IGN)
15665 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15666 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15667 sys.exit(100+signum)
15668 signal.signal(signal.SIGINT, emergeexitsig)
15669 signal.signal(signal.SIGTERM, emergeexitsig)
15672 """This gets out final log message in before we quit."""
15673 if "--pretend" not in myopts:
15674 emergelog(xterm_titles, " *** terminating.")
15675 if "notitles" not in settings.features:
15677 portage.atexit_register(emergeexit)
15679 if myaction in ("config", "metadata", "regen", "sync"):
15680 if "--pretend" in myopts:
15681 sys.stderr.write(("emerge: The '%s' action does " + \
15682 "not support '--pretend'.\n") % myaction)
15685 if "sync" == myaction:
15686 return action_sync(settings, trees, mtimedb, myopts, myaction)
15687 elif "metadata" == myaction:
15688 action_metadata(settings, portdb, myopts)
15689 elif myaction=="regen":
15690 validate_ebuild_environment(trees)
15691 return action_regen(settings, portdb, myopts.get("--jobs"),
15692 myopts.get("--load-average"))
15694 elif "config"==myaction:
15695 validate_ebuild_environment(trees)
15696 action_config(settings, trees, myopts, myfiles)
15699 elif "search"==myaction:
15700 validate_ebuild_environment(trees)
15701 action_search(trees[settings["ROOT"]]["root_config"],
15702 myopts, myfiles, spinner)
15703 elif myaction in ("clean", "unmerge") or \
15704 (myaction == "prune" and "--nodeps" in myopts):
15705 validate_ebuild_environment(trees)
15707 # Ensure atoms are valid before calling unmerge().
15708 # For backward compat, leading '=' is not required.
15710 if is_valid_package_atom(x) or \
15711 is_valid_package_atom("=" + x):
15714 msg.append("'%s' is not a valid package atom." % (x,))
15715 msg.append("Please check ebuild(5) for full details.")
15716 writemsg_level("".join("!!! %s\n" % line for line in msg),
15717 level=logging.ERROR, noiselevel=-1)
15720 # When given a list of atoms, unmerge
15721 # them in the order given.
15722 ordered = myaction == "unmerge"
15723 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15724 mtimedb["ldpath"], ordered=ordered):
15725 if not (buildpkgonly or fetchonly or pretend):
15726 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15728 elif myaction in ("depclean", "info", "prune"):
15730 # Ensure atoms are valid before calling unmerge().
15731 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15734 if is_valid_package_atom(x):
15736 valid_atoms.append(
15737 portage.dep_expand(x, mydb=vardb, settings=settings))
15738 except portage.exception.AmbiguousPackageName, e:
15739 msg = "The short ebuild name \"" + x + \
15740 "\" is ambiguous. Please specify " + \
15741 "one of the following " + \
15742 "fully-qualified ebuild names instead:"
15743 for line in textwrap.wrap(msg, 70):
15744 writemsg_level("!!! %s\n" % (line,),
15745 level=logging.ERROR, noiselevel=-1)
15747 writemsg_level(" %s\n" % colorize("INFORM", i),
15748 level=logging.ERROR, noiselevel=-1)
15749 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15753 msg.append("'%s' is not a valid package atom." % (x,))
15754 msg.append("Please check ebuild(5) for full details.")
15755 writemsg_level("".join("!!! %s\n" % line for line in msg),
15756 level=logging.ERROR, noiselevel=-1)
15759 if myaction == "info":
15760 return action_info(settings, trees, myopts, valid_atoms)
15762 validate_ebuild_environment(trees)
15763 action_depclean(settings, trees, mtimedb["ldpath"],
15764 myopts, myaction, valid_atoms, spinner)
15765 if not (buildpkgonly or fetchonly or pretend):
15766 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15767 # "update", "system", or just process files:
15769 validate_ebuild_environment(trees)
15772 if x.startswith(SETPREFIX) or \
15773 is_valid_package_atom(x):
15775 if x[:1] == os.sep:
15783 msg.append("'%s' is not a valid package atom." % (x,))
15784 msg.append("Please check ebuild(5) for full details.")
15785 writemsg_level("".join("!!! %s\n" % line for line in msg),
15786 level=logging.ERROR, noiselevel=-1)
15789 if "--pretend" not in myopts:
15790 display_news_notification(root_config, myopts)
15791 retval = action_build(settings, trees, mtimedb,
15792 myopts, myaction, myfiles, spinner)
15793 root_config = trees[settings["ROOT"]]["root_config"]
15794 post_emerge(root_config, myopts, mtimedb, retval)