2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if isinstance(mysize, basestring):
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 self.sets = self.setconfig.getSets()
774 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776 def create_world_atom(pkg, args_set, root_config):
777 """Create a new atom for the world file if one does not exist. If the
778 argument atom is precise enough to identify a specific slot then a slot
779 atom will be returned. Atoms that are in the system set may also be stored
780 in world since system atoms can only match one slot while world atoms can
781 be greedy with respect to slots. Unslotted system packages will not be
784 arg_atom = args_set.findAtomForPackage(pkg)
787 cp = portage.dep_getkey(arg_atom)
789 sets = root_config.sets
790 portdb = root_config.trees["porttree"].dbapi
791 vardb = root_config.trees["vartree"].dbapi
792 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793 for cpv in portdb.match(cp))
794 slotted = len(available_slots) > 1 or \
795 (len(available_slots) == 1 and "0" not in available_slots)
797 # check the vdb in case this is multislot
798 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799 for cpv in vardb.match(cp))
800 slotted = len(available_slots) > 1 or \
801 (len(available_slots) == 1 and "0" not in available_slots)
802 if slotted and arg_atom != cp:
803 # If the user gave a specific atom, store it as a
804 # slot atom in the world file.
805 slot_atom = pkg.slot_atom
807 # For USE=multislot, there are a couple of cases to
810 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811 # unknown value, so just record an unslotted atom.
813 # 2) SLOT comes from an installed package and there is no
814 # matching SLOT in the portage tree.
816 # Make sure that the slot atom is available in either the
817 # portdb or the vardb, since otherwise the user certainly
818 # doesn't want the SLOT atom recorded in the world file
819 # (case 1 above). If it's only available in the vardb,
820 # the user may be trying to prevent a USE=multislot
821 # package from being removed by --depclean (case 2 above).
824 if not portdb.match(slot_atom):
825 # SLOT seems to come from an installed multislot package
827 # If there is no installed package matching the SLOT atom,
828 # it probably changed SLOT spontaneously due to USE=multislot,
829 # so just record an unslotted atom.
830 if vardb.match(slot_atom):
831 # Now verify that the argument is precise
832 # enough to identify a specific slot.
833 matches = mydb.match(arg_atom)
834 matched_slots = set()
836 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837 if len(matched_slots) == 1:
838 new_world_atom = slot_atom
840 if new_world_atom == sets["world"].findAtomForPackage(pkg):
841 # Both atoms would be identical, so there's nothing to add.
844 # Unlike world atoms, system atoms are not greedy for slots, so they
845 # can't be safely excluded from world if they are slotted.
846 system_atom = sets["system"].findAtomForPackage(pkg)
848 if not portage.dep_getkey(system_atom).startswith("virtual/"):
850 # System virtuals aren't safe to exclude from world since they can
851 # match multiple old-style virtuals but only one of them will be
852 # pulled in by update or depclean.
853 providers = portdb.mysettings.getvirtuals().get(
854 portage.dep_getkey(system_atom))
855 if providers and len(providers) == 1 and providers[0] == cp:
857 return new_world_atom
859 def filter_iuse_defaults(iuse):
861 if flag.startswith("+") or flag.startswith("-"):
866 class SlotObject(object):
867 __slots__ = ("__weakref__",)
869 def __init__(self, **kwargs):
870 classes = [self.__class__]
875 classes.extend(c.__bases__)
876 slots = getattr(c, "__slots__", None)
880 myvalue = kwargs.get(myattr, None)
881 setattr(self, myattr, myvalue)
885 Create a new instance and copy all attributes
886 defined from __slots__ (including those from
889 obj = self.__class__()
891 classes = [self.__class__]
896 classes.extend(c.__bases__)
897 slots = getattr(c, "__slots__", None)
901 setattr(obj, myattr, getattr(self, myattr))
905 class AbstractDepPriority(SlotObject):
906 __slots__ = ("buildtime", "runtime", "runtime_post")
908 def __lt__(self, other):
909 return self.__int__() < other
911 def __le__(self, other):
912 return self.__int__() <= other
914 def __eq__(self, other):
915 return self.__int__() == other
917 def __ne__(self, other):
918 return self.__int__() != other
920 def __gt__(self, other):
921 return self.__int__() > other
923 def __ge__(self, other):
924 return self.__int__() >= other
928 return copy.copy(self)
930 class DepPriority(AbstractDepPriority):
932 __slots__ = ("satisfied", "optional", "rebuild")
944 if self.runtime_post:
945 return "runtime_post"
948 class BlockerDepPriority(DepPriority):
956 BlockerDepPriority.instance = BlockerDepPriority()
958 class UnmergeDepPriority(AbstractDepPriority):
959 __slots__ = ("optional", "satisfied",)
961 Combination of properties Priority Category
966 (none of the above) -2 SOFT
976 if self.runtime_post:
983 myvalue = self.__int__()
984 if myvalue > self.SOFT:
988 class DepPriorityNormalRange(object):
990 DepPriority properties Index Category
994 runtime_post 2 MEDIUM_SOFT
996 (none of the above) 0 NONE
1004 def _ignore_optional(cls, priority):
1005 if priority.__class__ is not DepPriority:
1007 return bool(priority.optional)
1010 def _ignore_runtime_post(cls, priority):
1011 if priority.__class__ is not DepPriority:
1013 return bool(priority.optional or priority.runtime_post)
1016 def _ignore_runtime(cls, priority):
1017 if priority.__class__ is not DepPriority:
1019 return not priority.buildtime
1021 ignore_medium = _ignore_runtime
1022 ignore_medium_soft = _ignore_runtime_post
1023 ignore_soft = _ignore_optional
1025 DepPriorityNormalRange.ignore_priority = (
1027 DepPriorityNormalRange._ignore_optional,
1028 DepPriorityNormalRange._ignore_runtime_post,
1029 DepPriorityNormalRange._ignore_runtime
1032 class DepPrioritySatisfiedRange(object):
1034 DepPriority Index Category
1036 not satisfied and buildtime HARD
1037 not satisfied and runtime 7 MEDIUM
1038 not satisfied and runtime_post 6 MEDIUM_SOFT
1039 satisfied and buildtime and rebuild 5 SOFT
1040 satisfied and buildtime 4 SOFT
1041 satisfied and runtime 3 SOFT
1042 satisfied and runtime_post 2 SOFT
1044 (none of the above) 0 NONE
1052 def _ignore_optional(cls, priority):
1053 if priority.__class__ is not DepPriority:
1055 return bool(priority.optional)
1058 def _ignore_satisfied_runtime_post(cls, priority):
1059 if priority.__class__ is not DepPriority:
1061 if priority.optional:
1063 if not priority.satisfied:
1065 return bool(priority.runtime_post)
1068 def _ignore_satisfied_runtime(cls, priority):
1069 if priority.__class__ is not DepPriority:
1071 if priority.optional:
1073 if not priority.satisfied:
1075 return not priority.buildtime
1078 def _ignore_satisfied_buildtime(cls, priority):
1079 if priority.__class__ is not DepPriority:
1081 if priority.optional:
1083 if not priority.satisfied:
1085 if priority.buildtime:
1086 return not priority.rebuild
1090 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1091 if priority.__class__ is not DepPriority:
1093 if priority.optional:
1095 return bool(priority.satisfied)
1098 def _ignore_runtime_post(cls, priority):
1099 if priority.__class__ is not DepPriority:
1101 return bool(priority.optional or \
1102 priority.satisfied or \
1103 priority.runtime_post)
1106 def _ignore_runtime(cls, priority):
1107 if priority.__class__ is not DepPriority:
1109 return bool(priority.satisfied or \
1110 not priority.buildtime)
1112 ignore_medium = _ignore_runtime
1113 ignore_medium_soft = _ignore_runtime_post
1114 ignore_soft = _ignore_satisfied_buildtime_rebuild
1116 DepPrioritySatisfiedRange.ignore_priority = (
1118 DepPrioritySatisfiedRange._ignore_optional,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1120 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1122 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1123 DepPrioritySatisfiedRange._ignore_runtime_post,
1124 DepPrioritySatisfiedRange._ignore_runtime
1127 def _find_deep_system_runtime_deps(graph):
1128 deep_system_deps = set()
1131 if not isinstance(node, Package) or \
1132 node.operation == 'uninstall':
1134 if node.root_config.sets['system'].findAtomForPackage(node):
1135 node_stack.append(node)
1137 def ignore_priority(priority):
1139 Ignore non-runtime priorities.
1141 if isinstance(priority, DepPriority) and \
1142 (priority.runtime or priority.runtime_post):
1147 node = node_stack.pop()
1148 if node in deep_system_deps:
1150 deep_system_deps.add(node)
1151 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1152 if not isinstance(child, Package) or \
1153 child.operation == 'uninstall':
1155 node_stack.append(child)
1157 return deep_system_deps
1159 class FakeVartree(portage.vartree):
1160 """This is implements an in-memory copy of a vartree instance that provides
1161 all the interfaces required for use by the depgraph. The vardb is locked
1162 during the constructor call just long enough to read a copy of the
1163 installed package information. This allows the depgraph to do it's
1164 dependency calculations without holding a lock on the vardb. It also
1165 allows things like vardb global updates to be done in memory so that the
1166 user doesn't necessarily need write access to the vardb in cases where
1167 global updates are necessary (updates are performed when necessary if there
1168 is not a matching ebuild in the tree)."""
1169 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1170 self._root_config = root_config
1171 if pkg_cache is None:
1173 real_vartree = root_config.trees["vartree"]
1174 portdb = root_config.trees["porttree"].dbapi
1175 self.root = real_vartree.root
1176 self.settings = real_vartree.settings
1177 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1178 if "_mtime_" not in mykeys:
1179 mykeys.append("_mtime_")
1180 self._db_keys = mykeys
1181 self._pkg_cache = pkg_cache
1182 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1183 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1185 # At least the parent needs to exist for the lock file.
1186 portage.util.ensure_dirs(vdb_path)
1187 except portage.exception.PortageException:
1191 if acquire_lock and os.access(vdb_path, os.W_OK):
1192 vdb_lock = portage.locks.lockdir(vdb_path)
1193 real_dbapi = real_vartree.dbapi
1195 for cpv in real_dbapi.cpv_all():
1196 cache_key = ("installed", self.root, cpv, "nomerge")
1197 pkg = self._pkg_cache.get(cache_key)
1199 metadata = pkg.metadata
1201 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1202 myslot = metadata["SLOT"]
1203 mycp = portage.dep_getkey(cpv)
1204 myslot_atom = "%s:%s" % (mycp, myslot)
1206 mycounter = long(metadata["COUNTER"])
1209 metadata["COUNTER"] = str(mycounter)
1210 other_counter = slot_counters.get(myslot_atom, None)
1211 if other_counter is not None:
1212 if other_counter > mycounter:
1214 slot_counters[myslot_atom] = mycounter
1216 pkg = Package(built=True, cpv=cpv,
1217 installed=True, metadata=metadata,
1218 root_config=root_config, type_name="installed")
1219 self._pkg_cache[pkg] = pkg
1220 self.dbapi.cpv_inject(pkg)
1221 real_dbapi.flush_cache()
1224 portage.locks.unlockdir(vdb_lock)
1225 # Populate the old-style virtuals using the cached values.
1226 if not self.settings.treeVirtuals:
1227 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1228 portage.getCPFromCPV, self.get_all_provides())
1230 # Intialize variables needed for lazy cache pulls of the live ebuild
1231 # metadata. This ensures that the vardb lock is released ASAP, without
1232 # being delayed in case cache generation is triggered.
1233 self._aux_get = self.dbapi.aux_get
1234 self.dbapi.aux_get = self._aux_get_wrapper
1235 self._match = self.dbapi.match
1236 self.dbapi.match = self._match_wrapper
1237 self._aux_get_history = set()
1238 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1239 self._portdb = portdb
1240 self._global_updates = None
1242 def _match_wrapper(self, cpv, use_cache=1):
1244 Make sure the metadata in Package instances gets updated for any
1245 cpv that is returned from a match() call, since the metadata can
1246 be accessed directly from the Package instance instead of via
1249 matches = self._match(cpv, use_cache=use_cache)
1251 if cpv in self._aux_get_history:
1253 self._aux_get_wrapper(cpv, [])
1256 def _aux_get_wrapper(self, pkg, wants):
1257 if pkg in self._aux_get_history:
1258 return self._aux_get(pkg, wants)
1259 self._aux_get_history.add(pkg)
1261 # Use the live ebuild metadata if possible.
1262 live_metadata = dict(izip(self._portdb_keys,
1263 self._portdb.aux_get(pkg, self._portdb_keys)))
1264 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1266 self.dbapi.aux_update(pkg, live_metadata)
1267 except (KeyError, portage.exception.PortageException):
1268 if self._global_updates is None:
1269 self._global_updates = \
1270 grab_global_updates(self._portdb.porttree_root)
1271 perform_global_updates(
1272 pkg, self.dbapi, self._global_updates)
1273 return self._aux_get(pkg, wants)
1275 def sync(self, acquire_lock=1):
1277 Call this method to synchronize state with the real vardb
1278 after one or more packages may have been installed or
1281 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1283 # At least the parent needs to exist for the lock file.
1284 portage.util.ensure_dirs(vdb_path)
1285 except portage.exception.PortageException:
1289 if acquire_lock and os.access(vdb_path, os.W_OK):
1290 vdb_lock = portage.locks.lockdir(vdb_path)
1294 portage.locks.unlockdir(vdb_lock)
1298 real_vardb = self._root_config.trees["vartree"].dbapi
1299 current_cpv_set = frozenset(real_vardb.cpv_all())
1300 pkg_vardb = self.dbapi
1301 aux_get_history = self._aux_get_history
1303 # Remove any packages that have been uninstalled.
1304 for pkg in list(pkg_vardb):
1305 if pkg.cpv not in current_cpv_set:
1306 pkg_vardb.cpv_remove(pkg)
1307 aux_get_history.discard(pkg.cpv)
1309 # Validate counters and timestamps.
1312 validation_keys = ["COUNTER", "_mtime_"]
1313 for cpv in current_cpv_set:
1315 pkg_hash_key = ("installed", root, cpv, "nomerge")
1316 pkg = pkg_vardb.get(pkg_hash_key)
1318 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1320 counter = long(counter)
1324 if counter != pkg.counter or \
1326 pkg_vardb.cpv_remove(pkg)
1327 aux_get_history.discard(pkg.cpv)
1331 pkg = self._pkg(cpv)
1333 other_counter = slot_counters.get(pkg.slot_atom)
1334 if other_counter is not None:
1335 if other_counter > pkg.counter:
1338 slot_counters[pkg.slot_atom] = pkg.counter
1339 pkg_vardb.cpv_inject(pkg)
1341 real_vardb.flush_cache()
1343 def _pkg(self, cpv):
1344 root_config = self._root_config
1345 real_vardb = root_config.trees["vartree"].dbapi
1346 pkg = Package(cpv=cpv, installed=True,
1347 metadata=izip(self._db_keys,
1348 real_vardb.aux_get(cpv, self._db_keys)),
1349 root_config=root_config,
1350 type_name="installed")
1353 mycounter = long(pkg.metadata["COUNTER"])
1356 pkg.metadata["COUNTER"] = str(mycounter)
1360 def grab_global_updates(portdir):
1361 from portage.update import grab_updates, parse_updates
1362 updpath = os.path.join(portdir, "profiles", "updates")
1364 rawupdates = grab_updates(updpath)
1365 except portage.exception.DirectoryNotFound:
1368 for mykey, mystat, mycontent in rawupdates:
1369 commands, errors = parse_updates(mycontent)
1370 upd_commands.extend(commands)
1373 def perform_global_updates(mycpv, mydb, mycommands):
1374 from portage.update import update_dbentries
1375 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1376 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1377 updates = update_dbentries(mycommands, aux_dict)
1379 mydb.aux_update(mycpv, updates)
1381 def visible(pkgsettings, pkg):
1383 Check if a package is visible. This can raise an InvalidDependString
1384 exception if LICENSE is invalid.
1385 TODO: optionally generate a list of masking reasons
1387 @returns: True if the package is visible, False otherwise.
1389 if not pkg.metadata["SLOT"]:
1391 if not pkg.installed:
1392 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1394 eapi = pkg.metadata["EAPI"]
1395 if not portage.eapi_is_supported(eapi):
1397 if not pkg.installed:
1398 if portage._eapi_is_deprecated(eapi):
1400 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1402 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1404 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1407 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1409 except portage.exception.InvalidDependString:
1413 def get_masking_status(pkg, pkgsettings, root_config):
1415 mreasons = portage.getmaskingstatus(
1416 pkg, settings=pkgsettings,
1417 portdb=root_config.trees["porttree"].dbapi)
1419 if not pkg.installed:
1420 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1421 mreasons.append("CHOST: %s" % \
1422 pkg.metadata["CHOST"])
1424 if not pkg.metadata["SLOT"]:
1425 mreasons.append("invalid: SLOT is undefined")
1429 def get_mask_info(root_config, cpv, pkgsettings,
1430 db, pkg_type, built, installed, db_keys):
1433 metadata = dict(izip(db_keys,
1434 db.aux_get(cpv, db_keys)))
1437 if metadata and not built:
1438 pkgsettings.setcpv(cpv, mydb=metadata)
1439 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1440 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1441 if metadata is None:
1442 mreasons = ["corruption"]
1444 eapi = metadata['EAPI']
1447 if not portage.eapi_is_supported(eapi):
1448 mreasons = ['EAPI %s' % eapi]
1450 pkg = Package(type_name=pkg_type, root_config=root_config,
1451 cpv=cpv, built=built, installed=installed, metadata=metadata)
1452 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1453 return metadata, mreasons
1455 def show_masked_packages(masked_packages):
1456 shown_licenses = set()
1457 shown_comments = set()
1458 # Maybe there is both an ebuild and a binary. Only
1459 # show one of them to avoid redundant appearance.
1461 have_eapi_mask = False
1462 for (root_config, pkgsettings, cpv,
1463 metadata, mreasons) in masked_packages:
1464 if cpv in shown_cpvs:
1467 comment, filename = None, None
1468 if "package.mask" in mreasons:
1469 comment, filename = \
1470 portage.getmaskingreason(
1471 cpv, metadata=metadata,
1472 settings=pkgsettings,
1473 portdb=root_config.trees["porttree"].dbapi,
1474 return_location=True)
1475 missing_licenses = []
1477 if not portage.eapi_is_supported(metadata["EAPI"]):
1478 have_eapi_mask = True
1480 missing_licenses = \
1481 pkgsettings._getMissingLicenses(
1483 except portage.exception.InvalidDependString:
1484 # This will have already been reported
1485 # above via mreasons.
1488 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1489 if comment and comment not in shown_comments:
1492 shown_comments.add(comment)
1493 portdb = root_config.trees["porttree"].dbapi
1494 for l in missing_licenses:
1495 l_path = portdb.findLicensePath(l)
1496 if l in shown_licenses:
1498 msg = ("A copy of the '%s' license" + \
1499 " is located at '%s'.") % (l, l_path)
1502 shown_licenses.add(l)
1503 return have_eapi_mask
1505 class Task(SlotObject):
1506 __slots__ = ("_hash_key", "_hash_value")
1508 def _get_hash_key(self):
1509 hash_key = getattr(self, "_hash_key", None)
1510 if hash_key is None:
1511 raise NotImplementedError(self)
1514 def __eq__(self, other):
1515 return self._get_hash_key() == other
1517 def __ne__(self, other):
1518 return self._get_hash_key() != other
1521 hash_value = getattr(self, "_hash_value", None)
1522 if hash_value is None:
1523 self._hash_value = hash(self._get_hash_key())
1524 return self._hash_value
1527 return len(self._get_hash_key())
1529 def __getitem__(self, key):
1530 return self._get_hash_key()[key]
1533 return iter(self._get_hash_key())
1535 def __contains__(self, key):
1536 return key in self._get_hash_key()
1539 return str(self._get_hash_key())
1541 class Blocker(Task):
1543 __hash__ = Task.__hash__
1544 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1546 def __init__(self, **kwargs):
1547 Task.__init__(self, **kwargs)
1548 self.cp = portage.dep_getkey(self.atom)
1550 def _get_hash_key(self):
1551 hash_key = getattr(self, "_hash_key", None)
1552 if hash_key is None:
1554 ("blocks", self.root, self.atom, self.eapi)
1555 return self._hash_key
1557 class Package(Task):
1559 __hash__ = Task.__hash__
1560 __slots__ = ("built", "cpv", "depth",
1561 "installed", "metadata", "onlydeps", "operation",
1562 "root_config", "type_name",
1563 "category", "counter", "cp", "cpv_split",
1564 "inherited", "iuse", "mtime",
1565 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1568 "CHOST", "COUNTER", "DEPEND", "EAPI",
1569 "INHERITED", "IUSE", "KEYWORDS",
1570 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1571 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1573 def __init__(self, **kwargs):
1574 Task.__init__(self, **kwargs)
1575 self.root = self.root_config.root
1576 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1577 self.cp = portage.cpv_getkey(self.cpv)
1580 # Avoid an InvalidAtom exception when creating slot_atom.
1581 # This package instance will be masked due to empty SLOT.
1583 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1584 self.category, self.pf = portage.catsplit(self.cpv)
1585 self.cpv_split = portage.catpkgsplit(self.cpv)
1586 self.pv_split = self.cpv_split[1:]
1590 __slots__ = ("__weakref__", "enabled")
1592 def __init__(self, use):
1593 self.enabled = frozenset(use)
1595 class _iuse(object):
1597 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1599 def __init__(self, tokens, iuse_implicit):
1600 self.tokens = tuple(tokens)
1601 self.iuse_implicit = iuse_implicit
1608 enabled.append(x[1:])
1610 disabled.append(x[1:])
1613 self.enabled = frozenset(enabled)
1614 self.disabled = frozenset(disabled)
1615 self.all = frozenset(chain(enabled, disabled, other))
1617 def __getattribute__(self, name):
1620 return object.__getattribute__(self, "regex")
1621 except AttributeError:
1622 all = object.__getattribute__(self, "all")
1623 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1624 # Escape anything except ".*" which is supposed
1625 # to pass through from _get_implicit_iuse()
1626 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1627 regex = "^(%s)$" % "|".join(regex)
1628 regex = regex.replace("\\.\\*", ".*")
1629 self.regex = re.compile(regex)
1630 return object.__getattribute__(self, name)
1632 def _get_hash_key(self):
1633 hash_key = getattr(self, "_hash_key", None)
1634 if hash_key is None:
1635 if self.operation is None:
1636 self.operation = "merge"
1637 if self.onlydeps or self.installed:
1638 self.operation = "nomerge"
1640 (self.type_name, self.root, self.cpv, self.operation)
1641 return self._hash_key
1643 def __lt__(self, other):
1644 if other.cp != self.cp:
1646 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1650 def __le__(self, other):
1651 if other.cp != self.cp:
1653 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1657 def __gt__(self, other):
1658 if other.cp != self.cp:
1660 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1664 def __ge__(self, other):
1665 if other.cp != self.cp:
1667 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1671 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1672 if not x.startswith("UNUSED_"))
1673 _all_metadata_keys.discard("CDEPEND")
1674 _all_metadata_keys.update(Package.metadata_keys)
1676 from portage.cache.mappings import slot_dict_class
1677 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1679 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1681 Detect metadata updates and synchronize Package attributes.
1684 __slots__ = ("_pkg",)
1685 _wrapped_keys = frozenset(
1686 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1688 def __init__(self, pkg, metadata):
1689 _PackageMetadataWrapperBase.__init__(self)
1691 self.update(metadata)
1693 def __setitem__(self, k, v):
1694 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1695 if k in self._wrapped_keys:
1696 getattr(self, "_set_" + k.lower())(k, v)
1698 def _set_inherited(self, k, v):
1699 if isinstance(v, basestring):
1700 v = frozenset(v.split())
1701 self._pkg.inherited = v
1703 def _set_iuse(self, k, v):
1704 self._pkg.iuse = self._pkg._iuse(
1705 v.split(), self._pkg.root_config.iuse_implicit)
1707 def _set_slot(self, k, v):
1710 def _set_use(self, k, v):
1711 self._pkg.use = self._pkg._use(v.split())
1713 def _set_counter(self, k, v):
1714 if isinstance(v, basestring):
1719 self._pkg.counter = v
1721 def _set__mtime_(self, k, v):
1722 if isinstance(v, basestring):
1729 class EbuildFetchonly(SlotObject):
1731 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1734 settings = self.settings
1736 portdb = pkg.root_config.trees["porttree"].dbapi
1737 ebuild_path = portdb.findname(pkg.cpv)
1738 settings.setcpv(pkg)
1739 debug = settings.get("PORTAGE_DEBUG") == "1"
1740 use_cache = 1 # always true
1741 portage.doebuild_environment(ebuild_path, "fetch",
1742 settings["ROOT"], settings, debug, use_cache, portdb)
1743 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1746 rval = self._execute_with_builddir()
1748 rval = portage.doebuild(ebuild_path, "fetch",
1749 settings["ROOT"], settings, debug=debug,
1750 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1751 mydbapi=portdb, tree="porttree")
1753 if rval != os.EX_OK:
1754 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1755 eerror(msg, phase="unpack", key=pkg.cpv)
1759 def _execute_with_builddir(self):
1760 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1761 # ensuring sane $PWD (bug #239560) and storing elog
1762 # messages. Use a private temp directory, in order
1763 # to avoid locking the main one.
1764 settings = self.settings
1765 global_tmpdir = settings["PORTAGE_TMPDIR"]
1766 from tempfile import mkdtemp
1768 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1770 if e.errno != portage.exception.PermissionDenied.errno:
1772 raise portage.exception.PermissionDenied(global_tmpdir)
1773 settings["PORTAGE_TMPDIR"] = private_tmpdir
1774 settings.backup_changes("PORTAGE_TMPDIR")
1776 retval = self._execute()
1778 settings["PORTAGE_TMPDIR"] = global_tmpdir
1779 settings.backup_changes("PORTAGE_TMPDIR")
1780 shutil.rmtree(private_tmpdir)
1784 settings = self.settings
1786 root_config = pkg.root_config
1787 portdb = root_config.trees["porttree"].dbapi
1788 ebuild_path = portdb.findname(pkg.cpv)
1789 debug = settings.get("PORTAGE_DEBUG") == "1"
1790 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1792 retval = portage.doebuild(ebuild_path, "fetch",
1793 self.settings["ROOT"], self.settings, debug=debug,
1794 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1795 mydbapi=portdb, tree="porttree")
1797 if retval != os.EX_OK:
1798 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1799 eerror(msg, phase="unpack", key=pkg.cpv)
1801 portage.elog.elog_process(self.pkg.cpv, self.settings)
1804 class PollConstants(object):
1807 Provides POLL* constants that are equivalent to those from the
1808 select module, for use by PollSelectAdapter.
1811 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1814 locals()[k] = getattr(select, k, v)
1818 class AsynchronousTask(SlotObject):
1820 Subclasses override _wait() and _poll() so that calls
1821 to public methods can be wrapped for implementing
1822 hooks such as exit listener notification.
1824 Sublasses should call self.wait() to notify exit listeners after
1825 the task is complete and self.returncode has been set.
1828 __slots__ = ("background", "cancelled", "returncode") + \
1829 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1833 Start an asynchronous task and then return as soon as possible.
1839 raise NotImplementedError(self)
1842 return self.returncode is None
1849 return self.returncode
1852 if self.returncode is None:
1855 return self.returncode
1858 return self.returncode
1861 self.cancelled = True
1864 def addStartListener(self, f):
1866 The function will be called with one argument, a reference to self.
1868 if self._start_listeners is None:
1869 self._start_listeners = []
1870 self._start_listeners.append(f)
1872 def removeStartListener(self, f):
1873 if self._start_listeners is None:
1875 self._start_listeners.remove(f)
1877 def _start_hook(self):
1878 if self._start_listeners is not None:
1879 start_listeners = self._start_listeners
1880 self._start_listeners = None
1882 for f in start_listeners:
1885 def addExitListener(self, f):
1887 The function will be called with one argument, a reference to self.
1889 if self._exit_listeners is None:
1890 self._exit_listeners = []
1891 self._exit_listeners.append(f)
1893 def removeExitListener(self, f):
1894 if self._exit_listeners is None:
1895 if self._exit_listener_stack is not None:
1896 self._exit_listener_stack.remove(f)
1898 self._exit_listeners.remove(f)
1900 def _wait_hook(self):
1902 Call this method after the task completes, just before returning
1903 the returncode from wait() or poll(). This hook is
1904 used to trigger exit listeners when the returncode first
1907 if self.returncode is not None and \
1908 self._exit_listeners is not None:
1910 # This prevents recursion, in case one of the
1911 # exit handlers triggers this method again by
1912 # calling wait(). Use a stack that gives
1913 # removeExitListener() an opportunity to consume
1914 # listeners from the stack, before they can get
1915 # called below. This is necessary because a call
1916 # to one exit listener may result in a call to
1917 # removeExitListener() for another listener on
1918 # the stack. That listener needs to be removed
1919 # from the stack since it would be inconsistent
1920 # to call it after it has been been passed into
1921 # removeExitListener().
1922 self._exit_listener_stack = self._exit_listeners
1923 self._exit_listeners = None
1925 self._exit_listener_stack.reverse()
1926 while self._exit_listener_stack:
1927 self._exit_listener_stack.pop()(self)
1929 class AbstractPollTask(AsynchronousTask):
1931 __slots__ = ("scheduler",) + \
1935 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1936 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1939 def _unregister(self):
1940 raise NotImplementedError(self)
1942 def _unregister_if_appropriate(self, event):
1943 if self._registered:
1944 if event & self._exceptional_events:
1947 elif event & PollConstants.POLLHUP:
1951 class PipeReader(AbstractPollTask):
1954 Reads output from one or more files and saves it in memory,
1955 for retrieval via the getvalue() method. This is driven by
1956 the scheduler's poll() loop, so it runs entirely within the
1960 __slots__ = ("input_files",) + \
1961 ("_read_data", "_reg_ids")
1964 self._reg_ids = set()
1965 self._read_data = []
1966 for k, f in self.input_files.iteritems():
1967 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1968 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1969 self._reg_ids.add(self.scheduler.register(f.fileno(),
1970 self._registered_events, self._output_handler))
1971 self._registered = True
1974 return self._registered
1977 if self.returncode is None:
1979 self.cancelled = True
1983 if self.returncode is not None:
1984 return self.returncode
1986 if self._registered:
1987 self.scheduler.schedule(self._reg_ids)
1990 self.returncode = os.EX_OK
1991 return self.returncode
1994 """Retrieve the entire contents"""
1995 if sys.hexversion >= 0x3000000:
1996 return bytes().join(self._read_data)
1997 return "".join(self._read_data)
2000 """Free the memory buffer."""
2001 self._read_data = None
2003 def _output_handler(self, fd, event):
2005 if event & PollConstants.POLLIN:
2007 for f in self.input_files.itervalues():
2008 if fd == f.fileno():
2011 buf = array.array('B')
2013 buf.fromfile(f, self._bufsize)
2018 self._read_data.append(buf.tostring())
2023 self._unregister_if_appropriate(event)
2024 return self._registered
2026 def _unregister(self):
2028 Unregister from the scheduler and close open files.
2031 self._registered = False
2033 if self._reg_ids is not None:
2034 for reg_id in self._reg_ids:
2035 self.scheduler.unregister(reg_id)
2036 self._reg_ids = None
2038 if self.input_files is not None:
2039 for f in self.input_files.itervalues():
2041 self.input_files = None
2043 class CompositeTask(AsynchronousTask):
2045 __slots__ = ("scheduler",) + ("_current_task",)
2048 return self._current_task is not None
2051 self.cancelled = True
2052 if self._current_task is not None:
2053 self._current_task.cancel()
2057 This does a loop calling self._current_task.poll()
2058 repeatedly as long as the value of self._current_task
2059 keeps changing. It calls poll() a maximum of one time
2060 for a given self._current_task instance. This is useful
2061 since calling poll() on a task can trigger advance to
2062 the next task could eventually lead to the returncode
2063 being set in cases when polling only a single task would
2064 not have the same effect.
2069 task = self._current_task
2070 if task is None or task is prev:
2071 # don't poll the same task more than once
2076 return self.returncode
2082 task = self._current_task
2084 # don't wait for the same task more than once
2087 # Before the task.wait() method returned, an exit
2088 # listener should have set self._current_task to either
2089 # a different task or None. Something is wrong.
2090 raise AssertionError("self._current_task has not " + \
2091 "changed since calling wait", self, task)
2095 return self.returncode
2097 def _assert_current(self, task):
2099 Raises an AssertionError if the given task is not the
2100 same one as self._current_task. This can be useful
2103 if task is not self._current_task:
2104 raise AssertionError("Unrecognized task: %s" % (task,))
2106 def _default_exit(self, task):
2108 Calls _assert_current() on the given task and then sets the
2109 composite returncode attribute if task.returncode != os.EX_OK.
2110 If the task failed then self._current_task will be set to None.
2111 Subclasses can use this as a generic task exit callback.
2114 @returns: The task.returncode attribute.
2116 self._assert_current(task)
2117 if task.returncode != os.EX_OK:
2118 self.returncode = task.returncode
2119 self._current_task = None
2120 return task.returncode
2122 def _final_exit(self, task):
2124 Assumes that task is the final task of this composite task.
2125 Calls _default_exit() and sets self.returncode to the task's
2126 returncode and sets self._current_task to None.
2128 self._default_exit(task)
2129 self._current_task = None
2130 self.returncode = task.returncode
2131 return self.returncode
2133 def _default_final_exit(self, task):
2135 This calls _final_exit() and then wait().
2137 Subclasses can use this as a generic final task exit callback.
2140 self._final_exit(task)
2143 def _start_task(self, task, exit_handler):
2145 Register exit handler for the given task, set it
2146 as self._current_task, and call task.start().
2148 Subclasses can use this as a generic way to start
2152 task.addExitListener(exit_handler)
2153 self._current_task = task
2156 class TaskSequence(CompositeTask):
2158 A collection of tasks that executes sequentially. Each task
2159 must have a addExitListener() method that can be used as
2160 a means to trigger movement from one task to the next.
2163 __slots__ = ("_task_queue",)
2165 def __init__(self, **kwargs):
2166 AsynchronousTask.__init__(self, **kwargs)
2167 self._task_queue = deque()
2169 def add(self, task):
2170 self._task_queue.append(task)
2173 self._start_next_task()
2176 self._task_queue.clear()
2177 CompositeTask.cancel(self)
2179 def _start_next_task(self):
2180 self._start_task(self._task_queue.popleft(),
2181 self._task_exit_handler)
2183 def _task_exit_handler(self, task):
2184 if self._default_exit(task) != os.EX_OK:
2186 elif self._task_queue:
2187 self._start_next_task()
2189 self._final_exit(task)
2192 class SubProcess(AbstractPollTask):
2194 __slots__ = ("pid",) + \
2195 ("_files", "_reg_id")
2197 # A file descriptor is required for the scheduler to monitor changes from
2198 # inside a poll() loop. When logging is not enabled, create a pipe just to
2199 # serve this purpose alone.
2203 if self.returncode is not None:
2204 return self.returncode
2205 if self.pid is None:
2206 return self.returncode
2207 if self._registered:
2208 return self.returncode
2211 retval = os.waitpid(self.pid, os.WNOHANG)
2213 if e.errno != errno.ECHILD:
2216 retval = (self.pid, 1)
2218 if retval == (0, 0):
2220 self._set_returncode(retval)
2221 return self.returncode
2226 os.kill(self.pid, signal.SIGTERM)
2228 if e.errno != errno.ESRCH:
2232 self.cancelled = True
2233 if self.pid is not None:
2235 return self.returncode
2238 return self.pid is not None and \
2239 self.returncode is None
2243 if self.returncode is not None:
2244 return self.returncode
2246 if self._registered:
2247 self.scheduler.schedule(self._reg_id)
2249 if self.returncode is not None:
2250 return self.returncode
2253 wait_retval = os.waitpid(self.pid, 0)
2255 if e.errno != errno.ECHILD:
2258 self._set_returncode((self.pid, 1))
2260 self._set_returncode(wait_retval)
2262 return self.returncode
2264 def _unregister(self):
2266 Unregister from the scheduler and close open files.
2269 self._registered = False
2271 if self._reg_id is not None:
2272 self.scheduler.unregister(self._reg_id)
2275 if self._files is not None:
2276 for f in self._files.itervalues():
2280 def _set_returncode(self, wait_retval):
2282 retval = wait_retval[1]
2284 if retval != os.EX_OK:
2286 retval = (retval & 0xff) << 8
2288 retval = retval >> 8
2290 self.returncode = retval
2292 class SpawnProcess(SubProcess):
2295 Constructor keyword args are passed into portage.process.spawn().
2296 The required "args" keyword argument will be passed as the first
2300 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2301 "uid", "gid", "groups", "umask", "logfile",
2302 "path_lookup", "pre_exec")
2304 __slots__ = ("args",) + \
2307 _file_names = ("log", "process", "stdout")
2308 _files_dict = slot_dict_class(_file_names, prefix="")
2315 if self.fd_pipes is None:
2317 fd_pipes = self.fd_pipes
2318 fd_pipes.setdefault(0, sys.stdin.fileno())
2319 fd_pipes.setdefault(1, sys.stdout.fileno())
2320 fd_pipes.setdefault(2, sys.stderr.fileno())
2322 # flush any pending output
2323 for fd in fd_pipes.itervalues():
2324 if fd == sys.stdout.fileno():
2326 if fd == sys.stderr.fileno():
2329 logfile = self.logfile
2330 self._files = self._files_dict()
2333 master_fd, slave_fd = self._pipe(fd_pipes)
2334 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2335 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2338 fd_pipes_orig = fd_pipes.copy()
2340 # TODO: Use job control functions like tcsetpgrp() to control
2341 # access to stdin. Until then, use /dev/null so that any
2342 # attempts to read from stdin will immediately return EOF
2343 # instead of blocking indefinitely.
2344 null_input = open('/dev/null', 'rb')
2345 fd_pipes[0] = null_input.fileno()
2347 fd_pipes[0] = fd_pipes_orig[0]
2349 files.process = os.fdopen(master_fd, 'rb')
2350 if logfile is not None:
2352 fd_pipes[1] = slave_fd
2353 fd_pipes[2] = slave_fd
2355 files.log = open(logfile, mode='ab')
2356 portage.util.apply_secpass_permissions(logfile,
2357 uid=portage.portage_uid, gid=portage.portage_gid,
2360 if not self.background:
2361 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2363 output_handler = self._output_handler
2367 # Create a dummy pipe so the scheduler can monitor
2368 # the process from inside a poll() loop.
2369 fd_pipes[self._dummy_pipe_fd] = slave_fd
2371 fd_pipes[1] = slave_fd
2372 fd_pipes[2] = slave_fd
2373 output_handler = self._dummy_handler
2376 for k in self._spawn_kwarg_names:
2377 v = getattr(self, k)
2381 kwargs["fd_pipes"] = fd_pipes
2382 kwargs["returnpid"] = True
2383 kwargs.pop("logfile", None)
2385 self._reg_id = self.scheduler.register(files.process.fileno(),
2386 self._registered_events, output_handler)
2387 self._registered = True
2389 retval = self._spawn(self.args, **kwargs)
2392 if null_input is not None:
2395 if isinstance(retval, int):
2398 self.returncode = retval
2402 self.pid = retval[0]
2403 portage.process.spawned_pids.remove(self.pid)
2405 def _pipe(self, fd_pipes):
2407 @type fd_pipes: dict
2408 @param fd_pipes: pipes from which to copy terminal size if desired.
2412 def _spawn(self, args, **kwargs):
2413 return portage.process.spawn(args, **kwargs)
2415 def _output_handler(self, fd, event):
2417 if event & PollConstants.POLLIN:
2420 buf = array.array('B')
2422 buf.fromfile(files.process, self._bufsize)
2427 if not self.background:
2428 buf.tofile(files.stdout)
2429 files.stdout.flush()
2430 buf.tofile(files.log)
2436 self._unregister_if_appropriate(event)
2437 return self._registered
2439 def _dummy_handler(self, fd, event):
2441 This method is mainly interested in detecting EOF, since
2442 the only purpose of the pipe is to allow the scheduler to
2443 monitor the process from inside a poll() loop.
2446 if event & PollConstants.POLLIN:
2448 buf = array.array('B')
2450 buf.fromfile(self._files.process, self._bufsize)
2460 self._unregister_if_appropriate(event)
2461 return self._registered
2463 class MiscFunctionsProcess(SpawnProcess):
2465 Spawns misc-functions.sh with an existing ebuild environment.
2468 __slots__ = ("commands", "phase", "pkg", "settings")
2471 settings = self.settings
2472 settings.pop("EBUILD_PHASE", None)
2473 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2474 misc_sh_binary = os.path.join(portage_bin_path,
2475 os.path.basename(portage.const.MISC_SH_BINARY))
2477 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2478 self.logfile = settings.get("PORTAGE_LOG_FILE")
2480 portage._doebuild_exit_status_unlink(
2481 settings.get("EBUILD_EXIT_STATUS_FILE"))
2483 SpawnProcess._start(self)
2485 def _spawn(self, args, **kwargs):
2486 settings = self.settings
2487 debug = settings.get("PORTAGE_DEBUG") == "1"
2488 return portage.spawn(" ".join(args), settings,
2489 debug=debug, **kwargs)
2491 def _set_returncode(self, wait_retval):
2492 SpawnProcess._set_returncode(self, wait_retval)
2493 self.returncode = portage._doebuild_exit_status_check_and_log(
2494 self.settings, self.phase, self.returncode)
2496 class EbuildFetcher(SpawnProcess):
2498 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2503 root_config = self.pkg.root_config
2504 portdb = root_config.trees["porttree"].dbapi
2505 ebuild_path = portdb.findname(self.pkg.cpv)
2506 settings = self.config_pool.allocate()
2507 settings.setcpv(self.pkg)
2509 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2510 # should not be touched since otherwise it could interfere with
2511 # another instance of the same cpv concurrently being built for a
2512 # different $ROOT (currently, builds only cooperate with prefetchers
2513 # that are spawned for the same $ROOT).
2514 if not self.prefetch:
2515 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2516 self._build_dir.lock()
2517 self._build_dir.clean_log()
2518 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2519 if self.logfile is None:
2520 self.logfile = settings.get("PORTAGE_LOG_FILE")
2526 # If any incremental variables have been overridden
2527 # via the environment, those values need to be passed
2528 # along here so that they are correctly considered by
2529 # the config instance in the subproccess.
2530 fetch_env = os.environ.copy()
2532 nocolor = settings.get("NOCOLOR")
2533 if nocolor is not None:
2534 fetch_env["NOCOLOR"] = nocolor
2536 fetch_env["PORTAGE_NICENESS"] = "0"
2538 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2540 ebuild_binary = os.path.join(
2541 settings["PORTAGE_BIN_PATH"], "ebuild")
2543 fetch_args = [ebuild_binary, ebuild_path, phase]
2544 debug = settings.get("PORTAGE_DEBUG") == "1"
2546 fetch_args.append("--debug")
2548 self.args = fetch_args
2549 self.env = fetch_env
2550 SpawnProcess._start(self)
2552 def _pipe(self, fd_pipes):
2553 """When appropriate, use a pty so that fetcher progress bars,
2554 like wget has, will work properly."""
2555 if self.background or not sys.stdout.isatty():
2556 # When the output only goes to a log file,
2557 # there's no point in creating a pty.
2559 stdout_pipe = fd_pipes.get(1)
2560 got_pty, master_fd, slave_fd = \
2561 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2562 return (master_fd, slave_fd)
2564 def _set_returncode(self, wait_retval):
2565 SpawnProcess._set_returncode(self, wait_retval)
2566 # Collect elog messages that might have been
2567 # created by the pkg_nofetch phase.
2568 if self._build_dir is not None:
2569 # Skip elog messages for prefetch, in order to avoid duplicates.
2570 if not self.prefetch and self.returncode != os.EX_OK:
2572 if self.logfile is not None:
2574 elog_out = open(self.logfile, 'a')
2575 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2576 if self.logfile is not None:
2577 msg += ", Log file:"
2578 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2579 if self.logfile is not None:
2580 eerror(" '%s'" % (self.logfile,),
2581 phase="unpack", key=self.pkg.cpv, out=elog_out)
2582 if elog_out is not None:
2584 if not self.prefetch:
2585 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2586 features = self._build_dir.settings.features
2587 if self.returncode == os.EX_OK:
2588 self._build_dir.clean_log()
2589 self._build_dir.unlock()
2590 self.config_pool.deallocate(self._build_dir.settings)
2591 self._build_dir = None
2593 class EbuildBuildDir(SlotObject):
2595 __slots__ = ("dir_path", "pkg", "settings",
2596 "locked", "_catdir", "_lock_obj")
2598 def __init__(self, **kwargs):
2599 SlotObject.__init__(self, **kwargs)
2604 This raises an AlreadyLocked exception if lock() is called
2605 while a lock is already held. In order to avoid this, call
2606 unlock() or check whether the "locked" attribute is True
2607 or False before calling lock().
2609 if self._lock_obj is not None:
2610 raise self.AlreadyLocked((self._lock_obj,))
2612 dir_path = self.dir_path
2613 if dir_path is None:
2614 root_config = self.pkg.root_config
2615 portdb = root_config.trees["porttree"].dbapi
2616 ebuild_path = portdb.findname(self.pkg.cpv)
2617 settings = self.settings
2618 settings.setcpv(self.pkg)
2619 debug = settings.get("PORTAGE_DEBUG") == "1"
2620 use_cache = 1 # always true
2621 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2622 self.settings, debug, use_cache, portdb)
2623 dir_path = self.settings["PORTAGE_BUILDDIR"]
2625 catdir = os.path.dirname(dir_path)
2626 self._catdir = catdir
2628 portage.util.ensure_dirs(os.path.dirname(catdir),
2629 gid=portage.portage_gid,
2633 catdir_lock = portage.locks.lockdir(catdir)
2634 portage.util.ensure_dirs(catdir,
2635 gid=portage.portage_gid,
2637 self._lock_obj = portage.locks.lockdir(dir_path)
2639 self.locked = self._lock_obj is not None
2640 if catdir_lock is not None:
2641 portage.locks.unlockdir(catdir_lock)
2643 def clean_log(self):
2644 """Discard existing log."""
2645 settings = self.settings
2647 for x in ('.logid', 'temp/build.log'):
2649 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2654 if self._lock_obj is None:
2657 portage.locks.unlockdir(self._lock_obj)
2658 self._lock_obj = None
2661 catdir = self._catdir
2664 catdir_lock = portage.locks.lockdir(catdir)
2670 if e.errno not in (errno.ENOENT,
2671 errno.ENOTEMPTY, errno.EEXIST):
2674 portage.locks.unlockdir(catdir_lock)
2676 class AlreadyLocked(portage.exception.PortageException):
2679 class EbuildBuild(CompositeTask):
2681 __slots__ = ("args_set", "config_pool", "find_blockers",
2682 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2683 "prefetcher", "settings", "world_atom") + \
2684 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2688 logger = self.logger
2691 settings = self.settings
2692 world_atom = self.world_atom
2693 root_config = pkg.root_config
2696 portdb = root_config.trees[tree].dbapi
2697 settings.setcpv(pkg)
2698 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2699 ebuild_path = portdb.findname(self.pkg.cpv)
2700 self._ebuild_path = ebuild_path
2702 prefetcher = self.prefetcher
2703 if prefetcher is None:
2705 elif not prefetcher.isAlive():
2707 elif prefetcher.poll() is None:
2709 waiting_msg = "Fetching files " + \
2710 "in the background. " + \
2711 "To view fetch progress, run `tail -f " + \
2712 "/var/log/emerge-fetch.log` in another " + \
2714 msg_prefix = colorize("GOOD", " * ")
2715 from textwrap import wrap
2716 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2717 for line in wrap(waiting_msg, 65))
2718 if not self.background:
2719 writemsg(waiting_msg, noiselevel=-1)
2721 self._current_task = prefetcher
2722 prefetcher.addExitListener(self._prefetch_exit)
2725 self._prefetch_exit(prefetcher)
2727 def _prefetch_exit(self, prefetcher):
2731 settings = self.settings
2734 fetcher = EbuildFetchonly(
2735 fetch_all=opts.fetch_all_uri,
2736 pkg=pkg, pretend=opts.pretend,
2738 retval = fetcher.execute()
2739 self.returncode = retval
2743 fetcher = EbuildFetcher(config_pool=self.config_pool,
2744 fetchall=opts.fetch_all_uri,
2745 fetchonly=opts.fetchonly,
2746 background=self.background,
2747 pkg=pkg, scheduler=self.scheduler)
2749 self._start_task(fetcher, self._fetch_exit)
2751 def _fetch_exit(self, fetcher):
2755 fetch_failed = False
2757 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2759 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2761 if fetch_failed and fetcher.logfile is not None and \
2762 os.path.exists(fetcher.logfile):
2763 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2765 if not fetch_failed and fetcher.logfile is not None:
2766 # Fetch was successful, so remove the fetch log.
2768 os.unlink(fetcher.logfile)
2772 if fetch_failed or opts.fetchonly:
2776 logger = self.logger
2778 pkg_count = self.pkg_count
2779 scheduler = self.scheduler
2780 settings = self.settings
2781 features = settings.features
2782 ebuild_path = self._ebuild_path
2783 system_set = pkg.root_config.sets["system"]
2785 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2786 self._build_dir.lock()
2788 # Cleaning is triggered before the setup
2789 # phase, in portage.doebuild().
2790 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2791 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2792 short_msg = "emerge: (%s of %s) %s Clean" % \
2793 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2794 logger.log(msg, short_msg=short_msg)
2796 #buildsyspkg: Check if we need to _force_ binary package creation
2797 self._issyspkg = "buildsyspkg" in features and \
2798 system_set.findAtomForPackage(pkg) and \
2801 if opts.buildpkg or self._issyspkg:
2803 self._buildpkg = True
2805 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2806 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2807 short_msg = "emerge: (%s of %s) %s Compile" % \
2808 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2809 logger.log(msg, short_msg=short_msg)
2812 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2813 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2814 short_msg = "emerge: (%s of %s) %s Compile" % \
2815 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2816 logger.log(msg, short_msg=short_msg)
2818 build = EbuildExecuter(background=self.background, pkg=pkg,
2819 scheduler=scheduler, settings=settings)
2820 self._start_task(build, self._build_exit)
2822 def _unlock_builddir(self):
2823 portage.elog.elog_process(self.pkg.cpv, self.settings)
2824 self._build_dir.unlock()
2826 def _build_exit(self, build):
2827 if self._default_exit(build) != os.EX_OK:
2828 self._unlock_builddir()
2833 buildpkg = self._buildpkg
2836 self._final_exit(build)
2841 msg = ">>> This is a system package, " + \
2842 "let's pack a rescue tarball.\n"
2844 log_path = self.settings.get("PORTAGE_LOG_FILE")
2845 if log_path is not None:
2846 log_file = open(log_path, 'a')
2852 if not self.background:
2853 portage.writemsg_stdout(msg, noiselevel=-1)
2855 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2856 scheduler=self.scheduler, settings=self.settings)
2858 self._start_task(packager, self._buildpkg_exit)
2860 def _buildpkg_exit(self, packager):
2862 Released build dir lock when there is a failure or
2863 when in buildpkgonly mode. Otherwise, the lock will
2864 be released when merge() is called.
2867 if self._default_exit(packager) != os.EX_OK:
2868 self._unlock_builddir()
2872 if self.opts.buildpkgonly:
2873 # Need to call "clean" phase for buildpkgonly mode
2874 portage.elog.elog_process(self.pkg.cpv, self.settings)
2876 clean_phase = EbuildPhase(background=self.background,
2877 pkg=self.pkg, phase=phase,
2878 scheduler=self.scheduler, settings=self.settings,
2880 self._start_task(clean_phase, self._clean_exit)
2883 # Continue holding the builddir lock until
2884 # after the package has been installed.
2885 self._current_task = None
2886 self.returncode = packager.returncode
2889 def _clean_exit(self, clean_phase):
2890 if self._final_exit(clean_phase) != os.EX_OK or \
2891 self.opts.buildpkgonly:
2892 self._unlock_builddir()
2897 Install the package and then clean up and release locks.
2898 Only call this after the build has completed successfully
2899 and neither fetchonly nor buildpkgonly mode are enabled.
2902 find_blockers = self.find_blockers
2903 ldpath_mtimes = self.ldpath_mtimes
2904 logger = self.logger
2906 pkg_count = self.pkg_count
2907 settings = self.settings
2908 world_atom = self.world_atom
2909 ebuild_path = self._ebuild_path
2912 merge = EbuildMerge(find_blockers=self.find_blockers,
2913 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2914 pkg_count=pkg_count, pkg_path=ebuild_path,
2915 scheduler=self.scheduler,
2916 settings=settings, tree=tree, world_atom=world_atom)
2918 msg = " === (%s of %s) Merging (%s::%s)" % \
2919 (pkg_count.curval, pkg_count.maxval,
2920 pkg.cpv, ebuild_path)
2921 short_msg = "emerge: (%s of %s) %s Merge" % \
2922 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2923 logger.log(msg, short_msg=short_msg)
2926 rval = merge.execute()
2928 self._unlock_builddir()
2932 class EbuildExecuter(CompositeTask):
2934 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2936 _phases = ("prepare", "configure", "compile", "test", "install")
2938 _live_eclasses = frozenset([
2948 self._tree = "porttree"
2951 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2952 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2953 self._start_task(clean_phase, self._clean_phase_exit)
2955 def _clean_phase_exit(self, clean_phase):
2957 if self._default_exit(clean_phase) != os.EX_OK:
2962 scheduler = self.scheduler
2963 settings = self.settings
2966 # This initializes PORTAGE_LOG_FILE.
2967 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2969 setup_phase = EbuildPhase(background=self.background,
2970 pkg=pkg, phase="setup", scheduler=scheduler,
2971 settings=settings, tree=self._tree)
2973 setup_phase.addExitListener(self._setup_exit)
2974 self._current_task = setup_phase
2975 self.scheduler.scheduleSetup(setup_phase)
2977 def _setup_exit(self, setup_phase):
2979 if self._default_exit(setup_phase) != os.EX_OK:
2983 unpack_phase = EbuildPhase(background=self.background,
2984 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2985 settings=self.settings, tree=self._tree)
2987 if self._live_eclasses.intersection(self.pkg.inherited):
2988 # Serialize $DISTDIR access for live ebuilds since
2989 # otherwise they can interfere with eachother.
2991 unpack_phase.addExitListener(self._unpack_exit)
2992 self._current_task = unpack_phase
2993 self.scheduler.scheduleUnpack(unpack_phase)
2996 self._start_task(unpack_phase, self._unpack_exit)
2998 def _unpack_exit(self, unpack_phase):
3000 if self._default_exit(unpack_phase) != os.EX_OK:
3004 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3007 phases = self._phases
3008 eapi = pkg.metadata["EAPI"]
3009 if eapi in ("0", "1"):
3010 # skip src_prepare and src_configure
3013 for phase in phases:
3014 ebuild_phases.add(EbuildPhase(background=self.background,
3015 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3016 settings=self.settings, tree=self._tree))
3018 self._start_task(ebuild_phases, self._default_final_exit)
3020 class EbuildMetadataPhase(SubProcess):
3023 Asynchronous interface for the ebuild "depend" phase which is
3024 used to extract metadata from the ebuild.
3027 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3028 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3031 _file_names = ("ebuild",)
3032 _files_dict = slot_dict_class(_file_names, prefix="")
3036 settings = self.settings
3037 settings.setcpv(self.cpv)
3038 ebuild_path = self.ebuild_path
3041 if 'parse-eapi-glep-55' in settings.features:
3042 pf, eapi = portage._split_ebuild_name_glep55(
3043 os.path.basename(ebuild_path))
3044 if eapi is None and \
3045 'parse-eapi-ebuild-head' in settings.features:
3046 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3047 mode='r', encoding='utf_8', errors='replace'))
3049 if eapi is not None:
3050 if not portage.eapi_is_supported(eapi):
3051 self.metadata_callback(self.cpv, self.ebuild_path,
3052 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3053 self.returncode = os.EX_OK
3057 settings.configdict['pkg']['EAPI'] = eapi
3059 debug = settings.get("PORTAGE_DEBUG") == "1"
3063 if self.fd_pipes is not None:
3064 fd_pipes = self.fd_pipes.copy()
3068 fd_pipes.setdefault(0, sys.stdin.fileno())
3069 fd_pipes.setdefault(1, sys.stdout.fileno())
3070 fd_pipes.setdefault(2, sys.stderr.fileno())
3072 # flush any pending output
3073 for fd in fd_pipes.itervalues():
3074 if fd == sys.stdout.fileno():
3076 if fd == sys.stderr.fileno():
3079 fd_pipes_orig = fd_pipes.copy()
3080 self._files = self._files_dict()
3083 master_fd, slave_fd = os.pipe()
3084 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3085 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3087 fd_pipes[self._metadata_fd] = slave_fd
3089 self._raw_metadata = []
3090 files.ebuild = os.fdopen(master_fd, 'r')
3091 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3092 self._registered_events, self._output_handler)
3093 self._registered = True
3095 retval = portage.doebuild(ebuild_path, "depend",
3096 settings["ROOT"], settings, debug,
3097 mydbapi=self.portdb, tree="porttree",
3098 fd_pipes=fd_pipes, returnpid=True)
3102 if isinstance(retval, int):
3103 # doebuild failed before spawning
3105 self.returncode = retval
3109 self.pid = retval[0]
3110 portage.process.spawned_pids.remove(self.pid)
3112 def _output_handler(self, fd, event):
3114 if event & PollConstants.POLLIN:
3115 self._raw_metadata.append(self._files.ebuild.read())
3116 if not self._raw_metadata[-1]:
3120 self._unregister_if_appropriate(event)
3121 return self._registered
3123 def _set_returncode(self, wait_retval):
3124 SubProcess._set_returncode(self, wait_retval)
3125 if self.returncode == os.EX_OK:
3126 metadata_lines = "".join(self._raw_metadata).splitlines()
3127 if len(portage.auxdbkeys) != len(metadata_lines):
3128 # Don't trust bash's returncode if the
3129 # number of lines is incorrect.
3132 metadata = izip(portage.auxdbkeys, metadata_lines)
3133 self.metadata_callback(self.cpv, self.ebuild_path,
3134 self.repo_path, metadata, self.ebuild_mtime)
3136 class EbuildProcess(SpawnProcess):
3138 __slots__ = ("phase", "pkg", "settings", "tree")
3141 # Don't open the log file during the clean phase since the
3142 # open file can result in an nfs lock on $T/build.log which
3143 # prevents the clean phase from removing $T.
3144 if self.phase not in ("clean", "cleanrm"):
3145 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3146 SpawnProcess._start(self)
3148 def _pipe(self, fd_pipes):
3149 stdout_pipe = fd_pipes.get(1)
3150 got_pty, master_fd, slave_fd = \
3151 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3152 return (master_fd, slave_fd)
3154 def _spawn(self, args, **kwargs):
3156 root_config = self.pkg.root_config
3158 mydbapi = root_config.trees[tree].dbapi
3159 settings = self.settings
3160 ebuild_path = settings["EBUILD"]
3161 debug = settings.get("PORTAGE_DEBUG") == "1"
3163 rval = portage.doebuild(ebuild_path, self.phase,
3164 root_config.root, settings, debug,
3165 mydbapi=mydbapi, tree=tree, **kwargs)
3169 def _set_returncode(self, wait_retval):
3170 SpawnProcess._set_returncode(self, wait_retval)
3172 if self.phase not in ("clean", "cleanrm"):
3173 self.returncode = portage._doebuild_exit_status_check_and_log(
3174 self.settings, self.phase, self.returncode)
3176 if self.phase == "test" and self.returncode != os.EX_OK and \
3177 "test-fail-continue" in self.settings.features:
3178 self.returncode = os.EX_OK
3180 portage._post_phase_userpriv_perms(self.settings)
3182 class EbuildPhase(CompositeTask):
3184 __slots__ = ("background", "pkg", "phase",
3185 "scheduler", "settings", "tree")
3187 _post_phase_cmds = portage._post_phase_cmds
3191 ebuild_process = EbuildProcess(background=self.background,
3192 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3193 settings=self.settings, tree=self.tree)
3195 self._start_task(ebuild_process, self._ebuild_exit)
3197 def _ebuild_exit(self, ebuild_process):
3199 if self.phase == "install":
3201 log_path = self.settings.get("PORTAGE_LOG_FILE")
3203 if self.background and log_path is not None:
3204 log_file = open(log_path, 'a')
3207 portage._check_build_log(self.settings, out=out)
3209 if log_file is not None:
3212 if self._default_exit(ebuild_process) != os.EX_OK:
3216 settings = self.settings
3218 if self.phase == "install":
3219 portage._post_src_install_chost_fix(settings)
3220 portage._post_src_install_uid_fix(settings)
3222 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3223 if post_phase_cmds is not None:
3224 post_phase = MiscFunctionsProcess(background=self.background,
3225 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3226 scheduler=self.scheduler, settings=settings)
3227 self._start_task(post_phase, self._post_phase_exit)
3230 self.returncode = ebuild_process.returncode
3231 self._current_task = None
3234 def _post_phase_exit(self, post_phase):
3235 if self._final_exit(post_phase) != os.EX_OK:
3236 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3238 self._current_task = None
3242 class EbuildBinpkg(EbuildProcess):
3244 This assumes that src_install() has successfully completed.
3246 __slots__ = ("_binpkg_tmpfile",)
3249 self.phase = "package"
3250 self.tree = "porttree"
3252 root_config = pkg.root_config
3253 portdb = root_config.trees["porttree"].dbapi
3254 bintree = root_config.trees["bintree"]
3255 ebuild_path = portdb.findname(self.pkg.cpv)
3256 settings = self.settings
3257 debug = settings.get("PORTAGE_DEBUG") == "1"
3259 bintree.prevent_collision(pkg.cpv)
3260 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3261 pkg.cpv + ".tbz2." + str(os.getpid()))
3262 self._binpkg_tmpfile = binpkg_tmpfile
3263 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3264 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3267 EbuildProcess._start(self)
3269 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3271 def _set_returncode(self, wait_retval):
3272 EbuildProcess._set_returncode(self, wait_retval)
3275 bintree = pkg.root_config.trees["bintree"]
3276 binpkg_tmpfile = self._binpkg_tmpfile
3277 if self.returncode == os.EX_OK:
3278 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3280 class EbuildMerge(SlotObject):
3282 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3283 "pkg", "pkg_count", "pkg_path", "pretend",
3284 "scheduler", "settings", "tree", "world_atom")
3287 root_config = self.pkg.root_config
3288 settings = self.settings
3289 retval = portage.merge(settings["CATEGORY"],
3290 settings["PF"], settings["D"],
3291 os.path.join(settings["PORTAGE_BUILDDIR"],
3292 "build-info"), root_config.root, settings,
3293 myebuild=settings["EBUILD"],
3294 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3295 vartree=root_config.trees["vartree"],
3296 prev_mtimes=self.ldpath_mtimes,
3297 scheduler=self.scheduler,
3298 blockers=self.find_blockers)
3300 if retval == os.EX_OK:
3301 self.world_atom(self.pkg)
3306 def _log_success(self):
3308 pkg_count = self.pkg_count
3309 pkg_path = self.pkg_path
3310 logger = self.logger
3311 if "noclean" not in self.settings.features:
3312 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3313 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3314 logger.log((" === (%s of %s) " + \
3315 "Post-Build Cleaning (%s::%s)") % \
3316 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3317 short_msg=short_msg)
3318 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3319 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3321 class PackageUninstall(AsynchronousTask):
3323 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3327 unmerge(self.pkg.root_config, self.opts, "unmerge",
3328 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3329 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3330 writemsg_level=self._writemsg_level)
3331 except UninstallFailure, e:
3332 self.returncode = e.status
3334 self.returncode = os.EX_OK
3337 def _writemsg_level(self, msg, level=0, noiselevel=0):
3339 log_path = self.settings.get("PORTAGE_LOG_FILE")
3340 background = self.background
3342 if log_path is None:
3343 if not (background and level < logging.WARNING):
3344 portage.util.writemsg_level(msg,
3345 level=level, noiselevel=noiselevel)
3348 portage.util.writemsg_level(msg,
3349 level=level, noiselevel=noiselevel)
3351 f = open(log_path, 'a')
3357 class Binpkg(CompositeTask):
3359 __slots__ = ("find_blockers",
3360 "ldpath_mtimes", "logger", "opts",
3361 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3362 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3363 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3365 def _writemsg_level(self, msg, level=0, noiselevel=0):
3367 if not self.background:
3368 portage.util.writemsg_level(msg,
3369 level=level, noiselevel=noiselevel)
3371 log_path = self.settings.get("PORTAGE_LOG_FILE")
3372 if log_path is not None:
3373 f = open(log_path, 'a')
3382 settings = self.settings
3383 settings.setcpv(pkg)
3384 self._tree = "bintree"
3385 self._bintree = self.pkg.root_config.trees[self._tree]
3386 self._verify = not self.opts.pretend
3388 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3389 "portage", pkg.category, pkg.pf)
3390 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3391 pkg=pkg, settings=settings)
3392 self._image_dir = os.path.join(dir_path, "image")
3393 self._infloc = os.path.join(dir_path, "build-info")
3394 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3395 settings["EBUILD"] = self._ebuild_path
3396 debug = settings.get("PORTAGE_DEBUG") == "1"
3397 portage.doebuild_environment(self._ebuild_path, "setup",
3398 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3399 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3401 # The prefetcher has already completed or it
3402 # could be running now. If it's running now,
3403 # wait for it to complete since it holds
3404 # a lock on the file being fetched. The
3405 # portage.locks functions are only designed
3406 # to work between separate processes. Since
3407 # the lock is held by the current process,
3408 # use the scheduler and fetcher methods to
3409 # synchronize with the fetcher.
3410 prefetcher = self.prefetcher
3411 if prefetcher is None:
3413 elif not prefetcher.isAlive():
3415 elif prefetcher.poll() is None:
3417 waiting_msg = ("Fetching '%s' " + \
3418 "in the background. " + \
3419 "To view fetch progress, run `tail -f " + \
3420 "/var/log/emerge-fetch.log` in another " + \
3421 "terminal.") % prefetcher.pkg_path
3422 msg_prefix = colorize("GOOD", " * ")
3423 from textwrap import wrap
3424 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3425 for line in wrap(waiting_msg, 65))
3426 if not self.background:
3427 writemsg(waiting_msg, noiselevel=-1)
3429 self._current_task = prefetcher
3430 prefetcher.addExitListener(self._prefetch_exit)
3433 self._prefetch_exit(prefetcher)
3435 def _prefetch_exit(self, prefetcher):
3438 pkg_count = self.pkg_count
3439 if not (self.opts.pretend or self.opts.fetchonly):
3440 self._build_dir.lock()
3441 # If necessary, discard old log so that we don't
3443 self._build_dir.clean_log()
3444 # Initialze PORTAGE_LOG_FILE.
3445 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3446 fetcher = BinpkgFetcher(background=self.background,
3447 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3448 pretend=self.opts.pretend, scheduler=self.scheduler)
3449 pkg_path = fetcher.pkg_path
3450 self._pkg_path = pkg_path
3452 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3454 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3455 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3456 short_msg = "emerge: (%s of %s) %s Fetch" % \
3457 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3458 self.logger.log(msg, short_msg=short_msg)
3459 self._start_task(fetcher, self._fetcher_exit)
3462 self._fetcher_exit(fetcher)
3464 def _fetcher_exit(self, fetcher):
3466 # The fetcher only has a returncode when
3467 # --getbinpkg is enabled.
3468 if fetcher.returncode is not None:
3469 self._fetched_pkg = True
3470 if self._default_exit(fetcher) != os.EX_OK:
3471 self._unlock_builddir()
3475 if self.opts.pretend:
3476 self._current_task = None
3477 self.returncode = os.EX_OK
3485 logfile = self.settings.get("PORTAGE_LOG_FILE")
3486 verifier = BinpkgVerifier(background=self.background,
3487 logfile=logfile, pkg=self.pkg)
3488 self._start_task(verifier, self._verifier_exit)
3491 self._verifier_exit(verifier)
3493 def _verifier_exit(self, verifier):
3494 if verifier is not None and \
3495 self._default_exit(verifier) != os.EX_OK:
3496 self._unlock_builddir()
3500 logger = self.logger
3502 pkg_count = self.pkg_count
3503 pkg_path = self._pkg_path
3505 if self._fetched_pkg:
3506 self._bintree.inject(pkg.cpv, filename=pkg_path)
3508 if self.opts.fetchonly:
3509 self._current_task = None
3510 self.returncode = os.EX_OK
3514 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3515 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3516 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3517 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3518 logger.log(msg, short_msg=short_msg)
3521 settings = self.settings
3522 ebuild_phase = EbuildPhase(background=self.background,
3523 pkg=pkg, phase=phase, scheduler=self.scheduler,
3524 settings=settings, tree=self._tree)
3526 self._start_task(ebuild_phase, self._clean_exit)
3528 def _clean_exit(self, clean_phase):
3529 if self._default_exit(clean_phase) != os.EX_OK:
3530 self._unlock_builddir()
3534 dir_path = self._build_dir.dir_path
3536 infloc = self._infloc
3538 pkg_path = self._pkg_path
3541 for mydir in (dir_path, self._image_dir, infloc):
3542 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3543 gid=portage.data.portage_gid, mode=dir_mode)
3545 # This initializes PORTAGE_LOG_FILE.
3546 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3547 self._writemsg_level(">>> Extracting info\n")
3549 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3550 check_missing_metadata = ("CATEGORY", "PF")
3551 missing_metadata = set()
3552 for k in check_missing_metadata:
3553 v = pkg_xpak.getfile(k)
3555 missing_metadata.add(k)
3557 pkg_xpak.unpackinfo(infloc)
3558 for k in missing_metadata:
3566 f = open(os.path.join(infloc, k), 'wb')
3572 # Store the md5sum in the vdb.
3573 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3575 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3579 # This gives bashrc users an opportunity to do various things
3580 # such as remove binary packages after they're installed.
3581 settings = self.settings
3582 settings.setcpv(self.pkg)
3583 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3584 settings.backup_changes("PORTAGE_BINPKG_FILE")
3587 setup_phase = EbuildPhase(background=self.background,
3588 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3589 settings=settings, tree=self._tree)
3591 setup_phase.addExitListener(self._setup_exit)
3592 self._current_task = setup_phase
3593 self.scheduler.scheduleSetup(setup_phase)
3595 def _setup_exit(self, setup_phase):
3596 if self._default_exit(setup_phase) != os.EX_OK:
3597 self._unlock_builddir()
3601 extractor = BinpkgExtractorAsync(background=self.background,
3602 image_dir=self._image_dir,
3603 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3604 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3605 self._start_task(extractor, self._extractor_exit)
3607 def _extractor_exit(self, extractor):
3608 if self._final_exit(extractor) != os.EX_OK:
3609 self._unlock_builddir()
3610 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3614 def _unlock_builddir(self):
3615 if self.opts.pretend or self.opts.fetchonly:
3617 portage.elog.elog_process(self.pkg.cpv, self.settings)
3618 self._build_dir.unlock()
3622 # This gives bashrc users an opportunity to do various things
3623 # such as remove binary packages after they're installed.
3624 settings = self.settings
3625 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3626 settings.backup_changes("PORTAGE_BINPKG_FILE")
3628 merge = EbuildMerge(find_blockers=self.find_blockers,
3629 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3630 pkg=self.pkg, pkg_count=self.pkg_count,
3631 pkg_path=self._pkg_path, scheduler=self.scheduler,
3632 settings=settings, tree=self._tree, world_atom=self.world_atom)
3635 retval = merge.execute()
3637 settings.pop("PORTAGE_BINPKG_FILE", None)
3638 self._unlock_builddir()
3641 class BinpkgFetcher(SpawnProcess):
3643 __slots__ = ("pkg", "pretend",
3644 "locked", "pkg_path", "_lock_obj")
3646 def __init__(self, **kwargs):
3647 SpawnProcess.__init__(self, **kwargs)
3649 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3657 pretend = self.pretend
3658 bintree = pkg.root_config.trees["bintree"]
3659 settings = bintree.settings
3660 use_locks = "distlocks" in settings.features
3661 pkg_path = self.pkg_path
3664 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3667 exists = os.path.exists(pkg_path)
3668 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3669 if not (pretend or resume):
3670 # Remove existing file or broken symlink.
3676 # urljoin doesn't work correctly with
3677 # unrecognized protocols like sftp
3678 if bintree._remote_has_index:
3679 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3681 rel_uri = pkg.cpv + ".tbz2"
3682 uri = bintree._remote_base_uri.rstrip("/") + \
3683 "/" + rel_uri.lstrip("/")
3685 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3686 "/" + pkg.pf + ".tbz2"
3689 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3690 self.returncode = os.EX_OK
3694 protocol = urlparse.urlparse(uri)[0]
3695 fcmd_prefix = "FETCHCOMMAND"
3697 fcmd_prefix = "RESUMECOMMAND"
3698 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3700 fcmd = settings.get(fcmd_prefix)
3703 "DISTDIR" : os.path.dirname(pkg_path),
3705 "FILE" : os.path.basename(pkg_path)
3708 fetch_env = dict(settings.iteritems())
3709 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3710 for x in shlex.split(fcmd)]
3712 if self.fd_pipes is None:
3714 fd_pipes = self.fd_pipes
3716 # Redirect all output to stdout since some fetchers like
3717 # wget pollute stderr (if portage detects a problem then it
3718 # can send it's own message to stderr).
3719 fd_pipes.setdefault(0, sys.stdin.fileno())
3720 fd_pipes.setdefault(1, sys.stdout.fileno())
3721 fd_pipes.setdefault(2, sys.stdout.fileno())
3723 self.args = fetch_args
3724 self.env = fetch_env
3725 SpawnProcess._start(self)
3727 def _set_returncode(self, wait_retval):
3728 SpawnProcess._set_returncode(self, wait_retval)
3729 if self.returncode == os.EX_OK:
3730 # If possible, update the mtime to match the remote package if
3731 # the fetcher didn't already do it automatically.
3732 bintree = self.pkg.root_config.trees["bintree"]
3733 if bintree._remote_has_index:
3734 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3735 if remote_mtime is not None:
3737 remote_mtime = long(remote_mtime)
3742 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3746 if remote_mtime != local_mtime:
3748 os.utime(self.pkg_path,
3749 (remote_mtime, remote_mtime))
3758 This raises an AlreadyLocked exception if lock() is called
3759 while a lock is already held. In order to avoid this, call
3760 unlock() or check whether the "locked" attribute is True
3761 or False before calling lock().
3763 if self._lock_obj is not None:
3764 raise self.AlreadyLocked((self._lock_obj,))
3766 self._lock_obj = portage.locks.lockfile(
3767 self.pkg_path, wantnewlockfile=1)
3770 class AlreadyLocked(portage.exception.PortageException):
3774 if self._lock_obj is None:
3776 portage.locks.unlockfile(self._lock_obj)
3777 self._lock_obj = None
3780 class BinpkgVerifier(AsynchronousTask):
3781 __slots__ = ("logfile", "pkg",)
3785 Note: Unlike a normal AsynchronousTask.start() method,
3786 this one does all work is synchronously. The returncode
3787 attribute will be set before it returns.
3791 root_config = pkg.root_config
3792 bintree = root_config.trees["bintree"]
3794 stdout_orig = sys.stdout
3795 stderr_orig = sys.stderr
3797 if self.background and self.logfile is not None:
3798 log_file = open(self.logfile, 'a')
3800 if log_file is not None:
3801 sys.stdout = log_file
3802 sys.stderr = log_file
3804 bintree.digestCheck(pkg)
3805 except portage.exception.FileNotFound:
3806 writemsg("!!! Fetching Binary failed " + \
3807 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3809 except portage.exception.DigestException, e:
3810 writemsg("\n!!! Digest verification failed:\n",
3812 writemsg("!!! %s\n" % e.value[0],
3814 writemsg("!!! Reason: %s\n" % e.value[1],
3816 writemsg("!!! Got: %s\n" % e.value[2],
3818 writemsg("!!! Expected: %s\n" % e.value[3],
3821 if rval != os.EX_OK:
3822 pkg_path = bintree.getname(pkg.cpv)
3823 head, tail = os.path.split(pkg_path)
3824 temp_filename = portage._checksum_failure_temp_file(head, tail)
3825 writemsg("File renamed to '%s'\n" % (temp_filename,),
3828 sys.stdout = stdout_orig
3829 sys.stderr = stderr_orig
3830 if log_file is not None:
3833 self.returncode = rval
3836 class BinpkgPrefetcher(CompositeTask):
3838 __slots__ = ("pkg",) + \
3839 ("pkg_path", "_bintree",)
3842 self._bintree = self.pkg.root_config.trees["bintree"]
3843 fetcher = BinpkgFetcher(background=self.background,
3844 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3845 scheduler=self.scheduler)
3846 self.pkg_path = fetcher.pkg_path
3847 self._start_task(fetcher, self._fetcher_exit)
3849 def _fetcher_exit(self, fetcher):
3851 if self._default_exit(fetcher) != os.EX_OK:
3855 verifier = BinpkgVerifier(background=self.background,
3856 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3857 self._start_task(verifier, self._verifier_exit)
3859 def _verifier_exit(self, verifier):
3860 if self._default_exit(verifier) != os.EX_OK:
3864 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3866 self._current_task = None
3867 self.returncode = os.EX_OK
3870 class BinpkgExtractorAsync(SpawnProcess):
3872 __slots__ = ("image_dir", "pkg", "pkg_path")
3874 _shell_binary = portage.const.BASH_BINARY
3877 self.args = [self._shell_binary, "-c",
3878 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3879 (portage._shell_quote(self.pkg_path),
3880 portage._shell_quote(self.image_dir))]
3882 self.env = self.pkg.root_config.settings.environ()
3883 SpawnProcess._start(self)
3885 class MergeListItem(CompositeTask):
3888 TODO: For parallel scheduling, everything here needs asynchronous
3889 execution support (start, poll, and wait methods).
3892 __slots__ = ("args_set",
3893 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3894 "find_blockers", "logger", "mtimedb", "pkg",
3895 "pkg_count", "pkg_to_replace", "prefetcher",
3896 "settings", "statusMessage", "world_atom") + \
3902 build_opts = self.build_opts
3905 # uninstall, executed by self.merge()
3906 self.returncode = os.EX_OK
3910 args_set = self.args_set
3911 find_blockers = self.find_blockers
3912 logger = self.logger
3913 mtimedb = self.mtimedb
3914 pkg_count = self.pkg_count
3915 scheduler = self.scheduler
3916 settings = self.settings
3917 world_atom = self.world_atom
3918 ldpath_mtimes = mtimedb["ldpath"]
3920 action_desc = "Emerging"
3922 if pkg.type_name == "binary":
3923 action_desc += " binary"
3925 if build_opts.fetchonly:
3926 action_desc = "Fetching"
3928 msg = "%s (%s of %s) %s" % \
3930 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3931 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3932 colorize("GOOD", pkg.cpv))
3934 portdb = pkg.root_config.trees["porttree"].dbapi
3935 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3936 if portdir_repo_name:
3937 pkg_repo_name = pkg.metadata.get("repository")
3938 if pkg_repo_name != portdir_repo_name:
3939 if not pkg_repo_name:
3940 pkg_repo_name = "unknown repo"
3941 msg += " from %s" % pkg_repo_name
3944 msg += " %s %s" % (preposition, pkg.root)
3946 if not build_opts.pretend:
3947 self.statusMessage(msg)
3948 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3949 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3951 if pkg.type_name == "ebuild":
3953 build = EbuildBuild(args_set=args_set,
3954 background=self.background,
3955 config_pool=self.config_pool,
3956 find_blockers=find_blockers,
3957 ldpath_mtimes=ldpath_mtimes, logger=logger,
3958 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3959 prefetcher=self.prefetcher, scheduler=scheduler,
3960 settings=settings, world_atom=world_atom)
3962 self._install_task = build
3963 self._start_task(build, self._default_final_exit)
3966 elif pkg.type_name == "binary":
3968 binpkg = Binpkg(background=self.background,
3969 find_blockers=find_blockers,
3970 ldpath_mtimes=ldpath_mtimes, logger=logger,
3971 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3972 prefetcher=self.prefetcher, settings=settings,
3973 scheduler=scheduler, world_atom=world_atom)
3975 self._install_task = binpkg
3976 self._start_task(binpkg, self._default_final_exit)
3980 self._install_task.poll()
3981 return self.returncode
3984 self._install_task.wait()
3985 return self.returncode
3990 build_opts = self.build_opts
3991 find_blockers = self.find_blockers
3992 logger = self.logger
3993 mtimedb = self.mtimedb
3994 pkg_count = self.pkg_count
3995 prefetcher = self.prefetcher
3996 scheduler = self.scheduler
3997 settings = self.settings
3998 world_atom = self.world_atom
3999 ldpath_mtimes = mtimedb["ldpath"]
4002 if not (build_opts.buildpkgonly or \
4003 build_opts.fetchonly or build_opts.pretend):
4005 uninstall = PackageUninstall(background=self.background,
4006 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4007 pkg=pkg, scheduler=scheduler, settings=settings)
4010 retval = uninstall.wait()
4011 if retval != os.EX_OK:
4015 if build_opts.fetchonly or \
4016 build_opts.buildpkgonly:
4017 return self.returncode
4019 retval = self._install_task.install()
4022 class PackageMerge(AsynchronousTask):
4024 TODO: Implement asynchronous merge so that the scheduler can
4025 run while a merge is executing.
4028 __slots__ = ("merge",)
4032 pkg = self.merge.pkg
4033 pkg_count = self.merge.pkg_count
4036 action_desc = "Uninstalling"
4037 preposition = "from"
4039 action_desc = "Installing"
4042 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4045 msg += " %s %s" % (preposition, pkg.root)
4047 if not self.merge.build_opts.fetchonly and \
4048 not self.merge.build_opts.pretend and \
4049 not self.merge.build_opts.buildpkgonly:
4050 self.merge.statusMessage(msg)
4052 self.returncode = self.merge.merge()
4055 class DependencyArg(object):
4056 def __init__(self, arg=None, root_config=None):
4058 self.root_config = root_config
4061 return str(self.arg)
4063 class AtomArg(DependencyArg):
4064 def __init__(self, atom=None, **kwargs):
4065 DependencyArg.__init__(self, **kwargs)
4067 if not isinstance(self.atom, portage.dep.Atom):
4068 self.atom = portage.dep.Atom(self.atom)
4069 self.set = (self.atom, )
4071 class PackageArg(DependencyArg):
4072 def __init__(self, package=None, **kwargs):
4073 DependencyArg.__init__(self, **kwargs)
4074 self.package = package
4075 self.atom = portage.dep.Atom("=" + package.cpv)
4076 self.set = (self.atom, )
4078 class SetArg(DependencyArg):
4079 def __init__(self, set=None, **kwargs):
4080 DependencyArg.__init__(self, **kwargs)
4082 self.name = self.arg[len(SETPREFIX):]
4084 class Dependency(SlotObject):
4085 __slots__ = ("atom", "blocker", "depth",
4086 "parent", "onlydeps", "priority", "root")
4087 def __init__(self, **kwargs):
4088 SlotObject.__init__(self, **kwargs)
4089 if self.priority is None:
4090 self.priority = DepPriority()
4091 if self.depth is None:
4094 class BlockerCache(portage.cache.mappings.MutableMapping):
4095 """This caches blockers of installed packages so that dep_check does not
4096 have to be done for every single installed package on every invocation of
4097 emerge. The cache is invalidated whenever it is detected that something
4098 has changed that might alter the results of dep_check() calls:
4099 1) the set of installed packages (including COUNTER) has changed
4100 2) the old-style virtuals have changed
4103 # Number of uncached packages to trigger cache update, since
4104 # it's wasteful to update it for every vdb change.
4105 _cache_threshold = 5
4107 class BlockerData(object):
4109 __slots__ = ("__weakref__", "atoms", "counter")
4111 def __init__(self, counter, atoms):
4112 self.counter = counter
4115 def __init__(self, myroot, vardb):
4117 self._virtuals = vardb.settings.getvirtuals()
4118 self._cache_filename = os.path.join(myroot,
4119 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4120 self._cache_version = "1"
4121 self._cache_data = None
4122 self._modified = set()
4127 f = open(self._cache_filename, mode='rb')
4128 mypickle = pickle.Unpickler(f)
4130 mypickle.find_global = None
4131 except AttributeError:
4132 # TODO: If py3k, override Unpickler.find_class().
4134 self._cache_data = mypickle.load()
4137 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4138 if isinstance(e, pickle.UnpicklingError):
4139 writemsg("!!! Error loading '%s': %s\n" % \
4140 (self._cache_filename, str(e)), noiselevel=-1)
4143 cache_valid = self._cache_data and \
4144 isinstance(self._cache_data, dict) and \
4145 self._cache_data.get("version") == self._cache_version and \
4146 isinstance(self._cache_data.get("blockers"), dict)
4148 # Validate all the atoms and counters so that
4149 # corruption is detected as soon as possible.
4150 invalid_items = set()
4151 for k, v in self._cache_data["blockers"].iteritems():
4152 if not isinstance(k, basestring):
4153 invalid_items.add(k)
4156 if portage.catpkgsplit(k) is None:
4157 invalid_items.add(k)
4159 except portage.exception.InvalidData:
4160 invalid_items.add(k)
4162 if not isinstance(v, tuple) or \
4164 invalid_items.add(k)
4167 if not isinstance(counter, (int, long)):
4168 invalid_items.add(k)
4170 if not isinstance(atoms, (list, tuple)):
4171 invalid_items.add(k)
4173 invalid_atom = False
4175 if not isinstance(atom, basestring):
4178 if atom[:1] != "!" or \
4179 not portage.isvalidatom(
4180 atom, allow_blockers=True):
4184 invalid_items.add(k)
4187 for k in invalid_items:
4188 del self._cache_data["blockers"][k]
4189 if not self._cache_data["blockers"]:
4193 self._cache_data = {"version":self._cache_version}
4194 self._cache_data["blockers"] = {}
4195 self._cache_data["virtuals"] = self._virtuals
4196 self._modified.clear()
4199 """If the current user has permission and the internal blocker cache
4200 been updated, save it to disk and mark it unmodified. This is called
4201 by emerge after it has proccessed blockers for all installed packages.
4202 Currently, the cache is only written if the user has superuser
4203 privileges (since that's required to obtain a lock), but all users
4204 have read access and benefit from faster blocker lookups (as long as
4205 the entire cache is still valid). The cache is stored as a pickled
4206 dict object with the following format:
4210 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4211 "virtuals" : vardb.settings.getvirtuals()
4214 if len(self._modified) >= self._cache_threshold and \
4217 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4218 pickle.dump(self._cache_data, f, protocol=2)
4220 portage.util.apply_secpass_permissions(
4221 self._cache_filename, gid=portage.portage_gid, mode=0644)
4222 except (IOError, OSError), e:
4224 self._modified.clear()
4226 def __setitem__(self, cpv, blocker_data):
4228 Update the cache and mark it as modified for a future call to
4231 @param cpv: Package for which to cache blockers.
4233 @param blocker_data: An object with counter and atoms attributes.
4234 @type blocker_data: BlockerData
4236 self._cache_data["blockers"][cpv] = \
4237 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4238 self._modified.add(cpv)
4241 if self._cache_data is None:
4242 # triggered by python-trace
4244 return iter(self._cache_data["blockers"])
4246 def __delitem__(self, cpv):
4247 del self._cache_data["blockers"][cpv]
4249 def __getitem__(self, cpv):
4252 @returns: An object with counter and atoms attributes.
4254 return self.BlockerData(*self._cache_data["blockers"][cpv])
4256 class BlockerDB(object):
4258 def __init__(self, root_config):
4259 self._root_config = root_config
4260 self._vartree = root_config.trees["vartree"]
4261 self._portdb = root_config.trees["porttree"].dbapi
4263 self._dep_check_trees = None
4264 self._fake_vartree = None
4266 def _get_fake_vartree(self, acquire_lock=0):
4267 fake_vartree = self._fake_vartree
4268 if fake_vartree is None:
4269 fake_vartree = FakeVartree(self._root_config,
4270 acquire_lock=acquire_lock)
4271 self._fake_vartree = fake_vartree
4272 self._dep_check_trees = { self._vartree.root : {
4273 "porttree" : fake_vartree,
4274 "vartree" : fake_vartree,
4277 fake_vartree.sync(acquire_lock=acquire_lock)
4280 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4281 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4282 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4283 settings = self._vartree.settings
4284 stale_cache = set(blocker_cache)
4285 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4286 dep_check_trees = self._dep_check_trees
4287 vardb = fake_vartree.dbapi
4288 installed_pkgs = list(vardb)
4290 for inst_pkg in installed_pkgs:
4291 stale_cache.discard(inst_pkg.cpv)
4292 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4293 if cached_blockers is not None and \
4294 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4295 cached_blockers = None
4296 if cached_blockers is not None:
4297 blocker_atoms = cached_blockers.atoms
4299 # Use aux_get() to trigger FakeVartree global
4300 # updates on *DEPEND when appropriate.
4301 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4303 portage.dep._dep_check_strict = False
4304 success, atoms = portage.dep_check(depstr,
4305 vardb, settings, myuse=inst_pkg.use.enabled,
4306 trees=dep_check_trees, myroot=inst_pkg.root)
4308 portage.dep._dep_check_strict = True
4310 pkg_location = os.path.join(inst_pkg.root,
4311 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4312 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4313 (pkg_location, atoms), noiselevel=-1)
4316 blocker_atoms = [atom for atom in atoms \
4317 if atom.startswith("!")]
4318 blocker_atoms.sort()
4319 counter = long(inst_pkg.metadata["COUNTER"])
4320 blocker_cache[inst_pkg.cpv] = \
4321 blocker_cache.BlockerData(counter, blocker_atoms)
4322 for cpv in stale_cache:
4323 del blocker_cache[cpv]
4324 blocker_cache.flush()
4326 blocker_parents = digraph()
4328 for pkg in installed_pkgs:
4329 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4330 blocker_atom = blocker_atom.lstrip("!")
4331 blocker_atoms.append(blocker_atom)
4332 blocker_parents.add(blocker_atom, pkg)
4334 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4335 blocking_pkgs = set()
4336 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4337 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4339 # Check for blockers in the other direction.
4340 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4342 portage.dep._dep_check_strict = False
4343 success, atoms = portage.dep_check(depstr,
4344 vardb, settings, myuse=new_pkg.use.enabled,
4345 trees=dep_check_trees, myroot=new_pkg.root)
4347 portage.dep._dep_check_strict = True
4349 # We should never get this far with invalid deps.
4350 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4353 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4356 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4357 for inst_pkg in installed_pkgs:
4359 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4360 except (portage.exception.InvalidDependString, StopIteration):
4362 blocking_pkgs.add(inst_pkg)
4364 return blocking_pkgs
4366 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4368 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4369 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4370 p_type, p_root, p_key, p_status = parent_node
4372 if p_status == "nomerge":
4373 category, pf = portage.catsplit(p_key)
4374 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4375 msg.append("Portage is unable to process the dependencies of the ")
4376 msg.append("'%s' package. " % p_key)
4377 msg.append("In order to correct this problem, the package ")
4378 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4379 msg.append("As a temporary workaround, the --nodeps option can ")
4380 msg.append("be used to ignore all dependencies. For reference, ")
4381 msg.append("the problematic dependencies can be found in the ")
4382 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4384 msg.append("This package can not be installed. ")
4385 msg.append("Please notify the '%s' package maintainer " % p_key)
4386 msg.append("about this problem.")
4388 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4389 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4391 class PackageVirtualDbapi(portage.dbapi):
4393 A dbapi-like interface class that represents the state of the installed
4394 package database as new packages are installed, replacing any packages
4395 that previously existed in the same slot. The main difference between
4396 this class and fakedbapi is that this one uses Package instances
4397 internally (passed in via cpv_inject() and cpv_remove() calls).
4399 def __init__(self, settings):
4400 portage.dbapi.__init__(self)
4401 self.settings = settings
4402 self._match_cache = {}
4408 Remove all packages.
4412 self._cp_map.clear()
4413 self._cpv_map.clear()
4416 obj = PackageVirtualDbapi(self.settings)
4417 obj._match_cache = self._match_cache.copy()
4418 obj._cp_map = self._cp_map.copy()
4419 for k, v in obj._cp_map.iteritems():
4420 obj._cp_map[k] = v[:]
4421 obj._cpv_map = self._cpv_map.copy()
4425 return self._cpv_map.itervalues()
4427 def __contains__(self, item):
4428 existing = self._cpv_map.get(item.cpv)
4429 if existing is not None and \
4434 def get(self, item, default=None):
4435 cpv = getattr(item, "cpv", None)
4439 type_name, root, cpv, operation = item
4441 existing = self._cpv_map.get(cpv)
4442 if existing is not None and \
4447 def match_pkgs(self, atom):
4448 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4450 def _clear_cache(self):
4451 if self._categories is not None:
4452 self._categories = None
4453 if self._match_cache:
4454 self._match_cache = {}
4456 def match(self, origdep, use_cache=1):
4457 result = self._match_cache.get(origdep)
4458 if result is not None:
4460 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4461 self._match_cache[origdep] = result
4464 def cpv_exists(self, cpv):
4465 return cpv in self._cpv_map
4467 def cp_list(self, mycp, use_cache=1):
4468 cachelist = self._match_cache.get(mycp)
4469 # cp_list() doesn't expand old-style virtuals
4470 if cachelist and cachelist[0].startswith(mycp):
4472 cpv_list = self._cp_map.get(mycp)
4473 if cpv_list is None:
4476 cpv_list = [pkg.cpv for pkg in cpv_list]
4477 self._cpv_sort_ascending(cpv_list)
4478 if not (not cpv_list and mycp.startswith("virtual/")):
4479 self._match_cache[mycp] = cpv_list
4483 return list(self._cp_map)
4486 return list(self._cpv_map)
4488 def cpv_inject(self, pkg):
4489 cp_list = self._cp_map.get(pkg.cp)
4492 self._cp_map[pkg.cp] = cp_list
4493 e_pkg = self._cpv_map.get(pkg.cpv)
4494 if e_pkg is not None:
4497 self.cpv_remove(e_pkg)
4498 for e_pkg in cp_list:
4499 if e_pkg.slot_atom == pkg.slot_atom:
4502 self.cpv_remove(e_pkg)
4505 self._cpv_map[pkg.cpv] = pkg
4508 def cpv_remove(self, pkg):
4509 old_pkg = self._cpv_map.get(pkg.cpv)
4512 self._cp_map[pkg.cp].remove(pkg)
4513 del self._cpv_map[pkg.cpv]
4516 def aux_get(self, cpv, wants):
4517 metadata = self._cpv_map[cpv].metadata
4518 return [metadata.get(x, "") for x in wants]
4520 def aux_update(self, cpv, values):
4521 self._cpv_map[cpv].metadata.update(values)
4524 class depgraph(object):
4526 pkg_tree_map = RootConfig.pkg_tree_map
4528 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4530 def __init__(self, settings, trees, myopts, myparams, spinner):
4531 self.settings = settings
4532 self.target_root = settings["ROOT"]
4533 self.myopts = myopts
4534 self.myparams = myparams
4536 if settings.get("PORTAGE_DEBUG", "") == "1":
4538 self.spinner = spinner
4539 self._running_root = trees["/"]["root_config"]
4540 self._opts_no_restart = Scheduler._opts_no_restart
4541 self.pkgsettings = {}
4542 # Maps slot atom to package for each Package added to the graph.
4543 self._slot_pkg_map = {}
4544 # Maps nodes to the reasons they were selected for reinstallation.
4545 self._reinstall_nodes = {}
4548 self._trees_orig = trees
4550 # Contains a filtered view of preferred packages that are selected
4551 # from available repositories.
4552 self._filtered_trees = {}
4553 # Contains installed packages and new packages that have been added
4555 self._graph_trees = {}
4556 # All Package instances
4557 self._pkg_cache = {}
4558 for myroot in trees:
4559 self.trees[myroot] = {}
4560 # Create a RootConfig instance that references
4561 # the FakeVartree instead of the real one.
4562 self.roots[myroot] = RootConfig(
4563 trees[myroot]["vartree"].settings,
4565 trees[myroot]["root_config"].setconfig)
4566 for tree in ("porttree", "bintree"):
4567 self.trees[myroot][tree] = trees[myroot][tree]
4568 self.trees[myroot]["vartree"] = \
4569 FakeVartree(trees[myroot]["root_config"],
4570 pkg_cache=self._pkg_cache)
4571 self.pkgsettings[myroot] = portage.config(
4572 clone=self.trees[myroot]["vartree"].settings)
4573 self._slot_pkg_map[myroot] = {}
4574 vardb = self.trees[myroot]["vartree"].dbapi
4575 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4576 "--buildpkgonly" not in self.myopts
4577 # This fakedbapi instance will model the state that the vdb will
4578 # have after new packages have been installed.
4579 fakedb = PackageVirtualDbapi(vardb.settings)
4580 if preload_installed_pkgs:
4582 self.spinner.update()
4583 # This triggers metadata updates via FakeVartree.
4584 vardb.aux_get(pkg.cpv, [])
4585 fakedb.cpv_inject(pkg)
4587 # Now that the vardb state is cached in our FakeVartree,
4588 # we won't be needing the real vartree cache for awhile.
4589 # To make some room on the heap, clear the vardbapi
4591 trees[myroot]["vartree"].dbapi._clear_cache()
4594 self.mydbapi[myroot] = fakedb
4597 graph_tree.dbapi = fakedb
4598 self._graph_trees[myroot] = {}
4599 self._filtered_trees[myroot] = {}
4600 # Substitute the graph tree for the vartree in dep_check() since we
4601 # want atom selections to be consistent with package selections
4602 # have already been made.
4603 self._graph_trees[myroot]["porttree"] = graph_tree
4604 self._graph_trees[myroot]["vartree"] = graph_tree
4605 def filtered_tree():
4607 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4608 self._filtered_trees[myroot]["porttree"] = filtered_tree
4610 # Passing in graph_tree as the vartree here could lead to better
4611 # atom selections in some cases by causing atoms for packages that
4612 # have been added to the graph to be preferred over other choices.
4613 # However, it can trigger atom selections that result in
4614 # unresolvable direct circular dependencies. For example, this
4615 # happens with gwydion-dylan which depends on either itself or
4616 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4617 # gwydion-dylan-bin needs to be selected in order to avoid a
4618 # an unresolvable direct circular dependency.
4620 # To solve the problem described above, pass in "graph_db" so that
4621 # packages that have been added to the graph are distinguishable
4622 # from other available packages and installed packages. Also, pass
4623 # the parent package into self._select_atoms() calls so that
4624 # unresolvable direct circular dependencies can be detected and
4625 # avoided when possible.
4626 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4627 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4630 portdb = self.trees[myroot]["porttree"].dbapi
4631 bindb = self.trees[myroot]["bintree"].dbapi
4632 vardb = self.trees[myroot]["vartree"].dbapi
4633 # (db, pkg_type, built, installed, db_keys)
4634 if "--usepkgonly" not in self.myopts:
4635 db_keys = list(portdb._aux_cache_keys)
4636 dbs.append((portdb, "ebuild", False, False, db_keys))
4637 if "--usepkg" in self.myopts:
4638 db_keys = list(bindb._aux_cache_keys)
4639 dbs.append((bindb, "binary", True, False, db_keys))
4640 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4641 dbs.append((vardb, "installed", True, True, db_keys))
4642 self._filtered_trees[myroot]["dbs"] = dbs
4643 if "--usepkg" in self.myopts:
4644 self.trees[myroot]["bintree"].populate(
4645 "--getbinpkg" in self.myopts,
4646 "--getbinpkgonly" in self.myopts)
4649 self.digraph=portage.digraph()
4650 # contains all sets added to the graph
4652 # contains atoms given as arguments
4653 self._sets["args"] = InternalPackageSet()
4654 # contains all atoms from all sets added to the graph, including
4655 # atoms given as arguments
4656 self._set_atoms = InternalPackageSet()
4657 self._atom_arg_map = {}
4658 # contains all nodes pulled in by self._set_atoms
4659 self._set_nodes = set()
4660 # Contains only Blocker -> Uninstall edges
4661 self._blocker_uninstalls = digraph()
4662 # Contains only Package -> Blocker edges
4663 self._blocker_parents = digraph()
4664 # Contains only irrelevant Package -> Blocker edges
4665 self._irrelevant_blockers = digraph()
4666 # Contains only unsolvable Package -> Blocker edges
4667 self._unsolvable_blockers = digraph()
4668 # Contains all Blocker -> Blocked Package edges
4669 self._blocked_pkgs = digraph()
4670 # Contains world packages that have been protected from
4671 # uninstallation but may not have been added to the graph
4672 # if the graph is not complete yet.
4673 self._blocked_world_pkgs = {}
4674 self._slot_collision_info = {}
4675 # Slot collision nodes are not allowed to block other packages since
4676 # blocker validation is only able to account for one package per slot.
4677 self._slot_collision_nodes = set()
4678 self._parent_atoms = {}
4679 self._slot_conflict_parent_atoms = set()
4680 self._serialized_tasks_cache = None
4681 self._scheduler_graph = None
4682 self._displayed_list = None
4683 self._pprovided_args = []
4684 self._missing_args = []
4685 self._masked_installed = set()
4686 self._unsatisfied_deps_for_display = []
4687 self._unsatisfied_blockers_for_display = None
4688 self._circular_deps_for_display = None
4689 self._dep_stack = []
4690 self._unsatisfied_deps = []
4691 self._initially_unsatisfied_deps = []
4692 self._ignored_deps = []
4693 self._required_set_names = set(["system", "world"])
4694 self._select_atoms = self._select_atoms_highest_available
4695 self._select_package = self._select_pkg_highest_available
4696 self._highest_pkg_cache = {}
4698 def _show_slot_collision_notice(self):
4699 """Show an informational message advising the user to mask one of the
4700 the packages. In some cases it may be possible to resolve this
4701 automatically, but support for backtracking (removal nodes that have
4702 already been selected) will be required in order to handle all possible
4706 if not self._slot_collision_info:
4709 self._show_merge_list()
4712 msg.append("\n!!! Multiple package instances within a single " + \
4713 "package slot have been pulled\n")
4714 msg.append("!!! into the dependency graph, resulting" + \
4715 " in a slot conflict:\n\n")
4717 # Max number of parents shown, to avoid flooding the display.
4719 explanation_columns = 70
4721 for (slot_atom, root), slot_nodes \
4722 in self._slot_collision_info.iteritems():
4723 msg.append(str(slot_atom))
4726 for node in slot_nodes:
4728 msg.append(str(node))
4729 parent_atoms = self._parent_atoms.get(node)
4732 # Prefer conflict atoms over others.
4733 for parent_atom in parent_atoms:
4734 if len(pruned_list) >= max_parents:
4736 if parent_atom in self._slot_conflict_parent_atoms:
4737 pruned_list.add(parent_atom)
4739 # If this package was pulled in by conflict atoms then
4740 # show those alone since those are the most interesting.
4742 # When generating the pruned list, prefer instances
4743 # of DependencyArg over instances of Package.
4744 for parent_atom in parent_atoms:
4745 if len(pruned_list) >= max_parents:
4747 parent, atom = parent_atom
4748 if isinstance(parent, DependencyArg):
4749 pruned_list.add(parent_atom)
4750 # Prefer Packages instances that themselves have been
4751 # pulled into collision slots.
4752 for parent_atom in parent_atoms:
4753 if len(pruned_list) >= max_parents:
4755 parent, atom = parent_atom
4756 if isinstance(parent, Package) and \
4757 (parent.slot_atom, parent.root) \
4758 in self._slot_collision_info:
4759 pruned_list.add(parent_atom)
4760 for parent_atom in parent_atoms:
4761 if len(pruned_list) >= max_parents:
4763 pruned_list.add(parent_atom)
4764 omitted_parents = len(parent_atoms) - len(pruned_list)
4765 parent_atoms = pruned_list
4766 msg.append(" pulled in by\n")
4767 for parent_atom in parent_atoms:
4768 parent, atom = parent_atom
4769 msg.append(2*indent)
4770 if isinstance(parent,
4771 (PackageArg, AtomArg)):
4772 # For PackageArg and AtomArg types, it's
4773 # redundant to display the atom attribute.
4774 msg.append(str(parent))
4776 # Display the specific atom from SetArg or
4778 msg.append("%s required by %s" % (atom, parent))
4781 msg.append(2*indent)
4782 msg.append("(and %d more)\n" % omitted_parents)
4784 msg.append(" (no parents)\n")
4786 explanation = self._slot_conflict_explanation(slot_nodes)
4789 msg.append(indent + "Explanation:\n\n")
4790 for line in textwrap.wrap(explanation, explanation_columns):
4791 msg.append(2*indent + line + "\n")
4794 sys.stderr.write("".join(msg))
4797 explanations_for_all = explanations == len(self._slot_collision_info)
4799 if explanations_for_all or "--quiet" in self.myopts:
4803 msg.append("It may be possible to solve this problem ")
4804 msg.append("by using package.mask to prevent one of ")
4805 msg.append("those packages from being selected. ")
4806 msg.append("However, it is also possible that conflicting ")
4807 msg.append("dependencies exist such that they are impossible to ")
4808 msg.append("satisfy simultaneously. If such a conflict exists in ")
4809 msg.append("the dependencies of two different packages, then those ")
4810 msg.append("packages can not be installed simultaneously.")
4812 from formatter import AbstractFormatter, DumbWriter
4813 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4815 f.add_flowing_data(x)
4819 msg.append("For more information, see MASKED PACKAGES ")
4820 msg.append("section in the emerge man page or refer ")
4821 msg.append("to the Gentoo Handbook.")
4823 f.add_flowing_data(x)
4827 def _slot_conflict_explanation(self, slot_nodes):
4829 When a slot conflict occurs due to USE deps, there are a few
4830 different cases to consider:
4832 1) New USE are correctly set but --newuse wasn't requested so an
4833 installed package with incorrect USE happened to get pulled
4834 into graph before the new one.
4836 2) New USE are incorrectly set but an installed package has correct
4837 USE so it got pulled into the graph, and a new instance also got
4838 pulled in due to --newuse or an upgrade.
4840 3) Multiple USE deps exist that can't be satisfied simultaneously,
4841 and multiple package instances got pulled into the same slot to
4842 satisfy the conflicting deps.
4844 Currently, explanations and suggested courses of action are generated
4845 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4848 if len(slot_nodes) != 2:
4849 # Suggestions are only implemented for
4850 # conflicts between two packages.
4853 all_conflict_atoms = self._slot_conflict_parent_atoms
4855 matched_atoms = None
4856 unmatched_node = None
4857 for node in slot_nodes:
4858 parent_atoms = self._parent_atoms.get(node)
4859 if not parent_atoms:
4860 # Normally, there are always parent atoms. If there are
4861 # none then something unexpected is happening and there's
4862 # currently no suggestion for this case.
4864 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4865 for parent_atom in conflict_atoms:
4866 parent, atom = parent_atom
4868 # Suggestions are currently only implemented for cases
4869 # in which all conflict atoms have USE deps.
4872 if matched_node is not None:
4873 # If conflict atoms match multiple nodes
4874 # then there's no suggestion.
4877 matched_atoms = conflict_atoms
4879 if unmatched_node is not None:
4880 # Neither node is matched by conflict atoms, and
4881 # there is no suggestion for this case.
4883 unmatched_node = node
4885 if matched_node is None or unmatched_node is None:
4886 # This shouldn't happen.
4889 if unmatched_node.installed and not matched_node.installed and \
4890 unmatched_node.cpv == matched_node.cpv:
4891 # If the conflicting packages are the same version then
4892 # --newuse should be all that's needed. If they are different
4893 # versions then there's some other problem.
4894 return "New USE are correctly set, but --newuse wasn't" + \
4895 " requested, so an installed package with incorrect USE " + \
4896 "happened to get pulled into the dependency graph. " + \
4897 "In order to solve " + \
4898 "this, either specify the --newuse option or explicitly " + \
4899 " reinstall '%s'." % matched_node.slot_atom
4901 if matched_node.installed and not unmatched_node.installed:
4902 atoms = sorted(set(atom for parent, atom in matched_atoms))
4903 explanation = ("New USE for '%s' are incorrectly set. " + \
4904 "In order to solve this, adjust USE to satisfy '%s'") % \
4905 (matched_node.slot_atom, atoms[0])
4907 for atom in atoms[1:-1]:
4908 explanation += ", '%s'" % (atom,)
4911 explanation += " and '%s'" % (atoms[-1],)
4917 def _process_slot_conflicts(self):
4919 Process slot conflict data to identify specific atoms which
4920 lead to conflict. These atoms only match a subset of the
4921 packages that have been pulled into a given slot.
4923 for (slot_atom, root), slot_nodes \
4924 in self._slot_collision_info.iteritems():
4926 all_parent_atoms = set()
4927 for pkg in slot_nodes:
4928 parent_atoms = self._parent_atoms.get(pkg)
4929 if not parent_atoms:
4931 all_parent_atoms.update(parent_atoms)
4933 for pkg in slot_nodes:
4934 parent_atoms = self._parent_atoms.get(pkg)
4935 if parent_atoms is None:
4936 parent_atoms = set()
4937 self._parent_atoms[pkg] = parent_atoms
4938 for parent_atom in all_parent_atoms:
4939 if parent_atom in parent_atoms:
4941 # Use package set for matching since it will match via
4942 # PROVIDE when necessary, while match_from_list does not.
4943 parent, atom = parent_atom
4944 atom_set = InternalPackageSet(
4945 initial_atoms=(atom,))
4946 if atom_set.findAtomForPackage(pkg):
4947 parent_atoms.add(parent_atom)
4949 self._slot_conflict_parent_atoms.add(parent_atom)
4951 def _reinstall_for_flags(self, forced_flags,
4952 orig_use, orig_iuse, cur_use, cur_iuse):
4953 """Return a set of flags that trigger reinstallation, or None if there
4954 are no such flags."""
4955 if "--newuse" in self.myopts:
4956 flags = set(orig_iuse.symmetric_difference(
4957 cur_iuse).difference(forced_flags))
4958 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4959 cur_iuse.intersection(cur_use)))
4962 elif "changed-use" == self.myopts.get("--reinstall"):
4963 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4964 cur_iuse.intersection(cur_use))
4969 def _create_graph(self, allow_unsatisfied=False):
4970 dep_stack = self._dep_stack
4972 self.spinner.update()
4973 dep = dep_stack.pop()
4974 if isinstance(dep, Package):
4975 if not self._add_pkg_deps(dep,
4976 allow_unsatisfied=allow_unsatisfied):
4979 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4983 def _add_dep(self, dep, allow_unsatisfied=False):
4984 debug = "--debug" in self.myopts
4985 buildpkgonly = "--buildpkgonly" in self.myopts
4986 nodeps = "--nodeps" in self.myopts
4987 empty = "empty" in self.myparams
4988 deep = "deep" in self.myparams
4989 update = "--update" in self.myopts and dep.depth <= 1
4991 if not buildpkgonly and \
4993 dep.parent not in self._slot_collision_nodes:
4994 if dep.parent.onlydeps:
4995 # It's safe to ignore blockers if the
4996 # parent is an --onlydeps node.
4998 # The blocker applies to the root where
4999 # the parent is or will be installed.
5000 blocker = Blocker(atom=dep.atom,
5001 eapi=dep.parent.metadata["EAPI"],
5002 root=dep.parent.root)
5003 self._blocker_parents.add(blocker, dep.parent)
5005 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5006 onlydeps=dep.onlydeps)
5008 if dep.priority.optional:
5009 # This could be an unecessary build-time dep
5010 # pulled in by --with-bdeps=y.
5012 if allow_unsatisfied:
5013 self._unsatisfied_deps.append(dep)
5015 self._unsatisfied_deps_for_display.append(
5016 ((dep.root, dep.atom), {"myparent":dep.parent}))
5018 # In some cases, dep_check will return deps that shouldn't
5019 # be proccessed any further, so they are identified and
5020 # discarded here. Try to discard as few as possible since
5021 # discarded dependencies reduce the amount of information
5022 # available for optimization of merge order.
5023 if dep.priority.satisfied and \
5024 not dep_pkg.installed and \
5025 not (existing_node or empty or deep or update):
5027 if dep.root == self.target_root:
5029 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5030 except StopIteration:
5032 except portage.exception.InvalidDependString:
5033 if not dep_pkg.installed:
5034 # This shouldn't happen since the package
5035 # should have been masked.
5038 self._ignored_deps.append(dep)
5041 if not self._add_pkg(dep_pkg, dep):
5045 def _add_pkg(self, pkg, dep):
5052 myparent = dep.parent
5053 priority = dep.priority
5055 if priority is None:
5056 priority = DepPriority()
5058 Fills the digraph with nodes comprised of packages to merge.
5059 mybigkey is the package spec of the package to merge.
5060 myparent is the package depending on mybigkey ( or None )
5061 addme = Should we add this package to the digraph or are we just looking at it's deps?
5062 Think --onlydeps, we need to ignore packages in that case.
5065 #IUSE-aware emerge -> USE DEP aware depgraph
5066 #"no downgrade" emerge
5068 # Ensure that the dependencies of the same package
5069 # are never processed more than once.
5070 previously_added = pkg in self.digraph
5072 # select the correct /var database that we'll be checking against
5073 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5074 pkgsettings = self.pkgsettings[pkg.root]
5079 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5080 except portage.exception.InvalidDependString, e:
5081 if not pkg.installed:
5082 show_invalid_depstring_notice(
5083 pkg, pkg.metadata["PROVIDE"], str(e))
5087 if not pkg.onlydeps:
5088 if not pkg.installed and \
5089 "empty" not in self.myparams and \
5090 vardbapi.match(pkg.slot_atom):
5091 # Increase the priority of dependencies on packages that
5092 # are being rebuilt. This optimizes merge order so that
5093 # dependencies are rebuilt/updated as soon as possible,
5094 # which is needed especially when emerge is called by
5095 # revdep-rebuild since dependencies may be affected by ABI
5096 # breakage that has rendered them useless. Don't adjust
5097 # priority here when in "empty" mode since all packages
5098 # are being merged in that case.
5099 priority.rebuild = True
5101 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5102 slot_collision = False
5104 existing_node_matches = pkg.cpv == existing_node.cpv
5105 if existing_node_matches and \
5106 pkg != existing_node and \
5107 dep.atom is not None:
5108 # Use package set for matching since it will match via
5109 # PROVIDE when necessary, while match_from_list does not.
5110 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5111 if not atom_set.findAtomForPackage(existing_node):
5112 existing_node_matches = False
5113 if existing_node_matches:
5114 # The existing node can be reused.
5116 for parent_atom in arg_atoms:
5117 parent, atom = parent_atom
5118 self.digraph.add(existing_node, parent,
5120 self._add_parent_atom(existing_node, parent_atom)
5121 # If a direct circular dependency is not an unsatisfied
5122 # buildtime dependency then drop it here since otherwise
5123 # it can skew the merge order calculation in an unwanted
5125 if existing_node != myparent or \
5126 (priority.buildtime and not priority.satisfied):
5127 self.digraph.addnode(existing_node, myparent,
5129 if dep.atom is not None and dep.parent is not None:
5130 self._add_parent_atom(existing_node,
5131 (dep.parent, dep.atom))
5135 # A slot collision has occurred. Sometimes this coincides
5136 # with unresolvable blockers, so the slot collision will be
5137 # shown later if there are no unresolvable blockers.
5138 self._add_slot_conflict(pkg)
5139 slot_collision = True
5142 # Now add this node to the graph so that self.display()
5143 # can show use flags and --tree portage.output. This node is
5144 # only being partially added to the graph. It must not be
5145 # allowed to interfere with the other nodes that have been
5146 # added. Do not overwrite data for existing nodes in
5147 # self.mydbapi since that data will be used for blocker
5149 # Even though the graph is now invalid, continue to process
5150 # dependencies so that things like --fetchonly can still
5151 # function despite collisions.
5153 elif not previously_added:
5154 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5155 self.mydbapi[pkg.root].cpv_inject(pkg)
5156 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5158 if not pkg.installed:
5159 # Allow this package to satisfy old-style virtuals in case it
5160 # doesn't already. Any pre-existing providers will be preferred
5163 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5164 # For consistency, also update the global virtuals.
5165 settings = self.roots[pkg.root].settings
5167 settings.setinst(pkg.cpv, pkg.metadata)
5169 except portage.exception.InvalidDependString, e:
5170 show_invalid_depstring_notice(
5171 pkg, pkg.metadata["PROVIDE"], str(e))
5176 self._set_nodes.add(pkg)
5178 # Do this even when addme is False (--onlydeps) so that the
5179 # parent/child relationship is always known in case
5180 # self._show_slot_collision_notice() needs to be called later.
5181 self.digraph.add(pkg, myparent, priority=priority)
5182 if dep.atom is not None and dep.parent is not None:
5183 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5186 for parent_atom in arg_atoms:
5187 parent, atom = parent_atom
5188 self.digraph.add(pkg, parent, priority=priority)
5189 self._add_parent_atom(pkg, parent_atom)
5191 """ This section determines whether we go deeper into dependencies or not.
5192 We want to go deeper on a few occasions:
5193 Installing package A, we need to make sure package A's deps are met.
5194 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5195 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5197 dep_stack = self._dep_stack
5198 if "recurse" not in self.myparams:
5200 elif pkg.installed and \
5201 "deep" not in self.myparams:
5202 dep_stack = self._ignored_deps
5204 self.spinner.update()
5209 if not previously_added:
5210 dep_stack.append(pkg)
5213 def _add_parent_atom(self, pkg, parent_atom):
5214 parent_atoms = self._parent_atoms.get(pkg)
5215 if parent_atoms is None:
5216 parent_atoms = set()
5217 self._parent_atoms[pkg] = parent_atoms
5218 parent_atoms.add(parent_atom)
5220 def _add_slot_conflict(self, pkg):
5221 self._slot_collision_nodes.add(pkg)
5222 slot_key = (pkg.slot_atom, pkg.root)
5223 slot_nodes = self._slot_collision_info.get(slot_key)
5224 if slot_nodes is None:
5226 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5227 self._slot_collision_info[slot_key] = slot_nodes
5230 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5232 mytype = pkg.type_name
5235 metadata = pkg.metadata
5236 myuse = pkg.use.enabled
5238 depth = pkg.depth + 1
5239 removal_action = "remove" in self.myparams
5242 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5244 edepend[k] = metadata[k]
5246 if not pkg.built and \
5247 "--buildpkgonly" in self.myopts and \
5248 "deep" not in self.myparams and \
5249 "empty" not in self.myparams:
5250 edepend["RDEPEND"] = ""
5251 edepend["PDEPEND"] = ""
5252 bdeps_optional = False
5254 if pkg.built and not removal_action:
5255 if self.myopts.get("--with-bdeps", "n") == "y":
5256 # Pull in build time deps as requested, but marked them as
5257 # "optional" since they are not strictly required. This allows
5258 # more freedom in the merge order calculation for solving
5259 # circular dependencies. Don't convert to PDEPEND since that
5260 # could make --with-bdeps=y less effective if it is used to
5261 # adjust merge order to prevent built_with_use() calls from
5263 bdeps_optional = True
5265 # built packages do not have build time dependencies.
5266 edepend["DEPEND"] = ""
5268 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5269 edepend["DEPEND"] = ""
5272 ("/", edepend["DEPEND"],
5273 self._priority(buildtime=(not bdeps_optional),
5274 optional=bdeps_optional)),
5275 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5276 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5279 debug = "--debug" in self.myopts
5280 strict = mytype != "installed"
5282 for dep_root, dep_string, dep_priority in deps:
5287 print "Parent: ", jbigkey
5288 print "Depstring:", dep_string
5289 print "Priority:", dep_priority
5290 vardb = self.roots[dep_root].trees["vartree"].dbapi
5292 selected_atoms = self._select_atoms(dep_root,
5293 dep_string, myuse=myuse, parent=pkg, strict=strict,
5294 priority=dep_priority)
5295 except portage.exception.InvalidDependString, e:
5296 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5299 print "Candidates:", selected_atoms
5301 for atom in selected_atoms:
5304 atom = portage.dep.Atom(atom)
5306 mypriority = dep_priority.copy()
5307 if not atom.blocker and vardb.match(atom):
5308 mypriority.satisfied = True
5310 if not self._add_dep(Dependency(atom=atom,
5311 blocker=atom.blocker, depth=depth, parent=pkg,
5312 priority=mypriority, root=dep_root),
5313 allow_unsatisfied=allow_unsatisfied):
5316 except portage.exception.InvalidAtom, e:
5317 show_invalid_depstring_notice(
5318 pkg, dep_string, str(e))
5320 if not pkg.installed:
5324 print "Exiting...", jbigkey
5325 except portage.exception.AmbiguousPackageName, e:
5327 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5328 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5330 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5331 portage.writemsg("\n", noiselevel=-1)
5332 if mytype == "binary":
5334 "!!! This binary package cannot be installed: '%s'\n" % \
5335 mykey, noiselevel=-1)
5336 elif mytype == "ebuild":
5337 portdb = self.roots[myroot].trees["porttree"].dbapi
5338 myebuild, mylocation = portdb.findname2(mykey)
5339 portage.writemsg("!!! This ebuild cannot be installed: " + \
5340 "'%s'\n" % myebuild, noiselevel=-1)
5341 portage.writemsg("!!! Please notify the package maintainer " + \
5342 "that atoms must be fully-qualified.\n", noiselevel=-1)
5346 def _priority(self, **kwargs):
5347 if "remove" in self.myparams:
5348 priority_constructor = UnmergeDepPriority
5350 priority_constructor = DepPriority
5351 return priority_constructor(**kwargs)
5353 def _dep_expand(self, root_config, atom_without_category):
5355 @param root_config: a root config instance
5356 @type root_config: RootConfig
5357 @param atom_without_category: an atom without a category component
5358 @type atom_without_category: String
5360 @returns: a list of atoms containing categories (possibly empty)
5362 null_cp = portage.dep_getkey(insert_category_into_atom(
5363 atom_without_category, "null"))
5364 cat, atom_pn = portage.catsplit(null_cp)
5366 dbs = self._filtered_trees[root_config.root]["dbs"]
5368 for db, pkg_type, built, installed, db_keys in dbs:
5369 for cat in db.categories:
5370 if db.cp_list("%s/%s" % (cat, atom_pn)):
5374 for cat in categories:
5375 deps.append(insert_category_into_atom(
5376 atom_without_category, cat))
5379 def _have_new_virt(self, root, atom_cp):
5381 for db, pkg_type, built, installed, db_keys in \
5382 self._filtered_trees[root]["dbs"]:
5383 if db.cp_list(atom_cp):
5388 def _iter_atoms_for_pkg(self, pkg):
5389 # TODO: add multiple $ROOT support
5390 if pkg.root != self.target_root:
5392 atom_arg_map = self._atom_arg_map
5393 root_config = self.roots[pkg.root]
5394 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5395 atom_cp = portage.dep_getkey(atom)
5396 if atom_cp != pkg.cp and \
5397 self._have_new_virt(pkg.root, atom_cp):
5399 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5400 visible_pkgs.reverse() # descending order
5402 for visible_pkg in visible_pkgs:
5403 if visible_pkg.cp != atom_cp:
5405 if pkg >= visible_pkg:
5406 # This is descending order, and we're not
5407 # interested in any versions <= pkg given.
5409 if pkg.slot_atom != visible_pkg.slot_atom:
5410 higher_slot = visible_pkg
5412 if higher_slot is not None:
5414 for arg in atom_arg_map[(atom, pkg.root)]:
5415 if isinstance(arg, PackageArg) and \
5420 def select_files(self, myfiles):
5421 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5422 appropriate depgraph and return a favorite list."""
5423 debug = "--debug" in self.myopts
5424 root_config = self.roots[self.target_root]
5425 sets = root_config.sets
5426 getSetAtoms = root_config.setconfig.getSetAtoms
5428 myroot = self.target_root
5429 dbs = self._filtered_trees[myroot]["dbs"]
5430 vardb = self.trees[myroot]["vartree"].dbapi
5431 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5432 portdb = self.trees[myroot]["porttree"].dbapi
5433 bindb = self.trees[myroot]["bintree"].dbapi
5434 pkgsettings = self.pkgsettings[myroot]
5436 onlydeps = "--onlydeps" in self.myopts
5439 ext = os.path.splitext(x)[1]
5441 if not os.path.exists(x):
5443 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5444 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5445 elif os.path.exists(
5446 os.path.join(pkgsettings["PKGDIR"], x)):
5447 x = os.path.join(pkgsettings["PKGDIR"], x)
5449 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5450 print "!!! Please ensure the tbz2 exists as specified.\n"
5451 return 0, myfavorites
5452 mytbz2=portage.xpak.tbz2(x)
5453 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5454 if os.path.realpath(x) != \
5455 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5456 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5457 return 0, myfavorites
5458 db_keys = list(bindb._aux_cache_keys)
5459 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5460 pkg = Package(type_name="binary", root_config=root_config,
5461 cpv=mykey, built=True, metadata=metadata,
5463 self._pkg_cache[pkg] = pkg
5464 args.append(PackageArg(arg=x, package=pkg,
5465 root_config=root_config))
5466 elif ext==".ebuild":
5467 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5468 pkgdir = os.path.dirname(ebuild_path)
5469 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5470 cp = pkgdir[len(tree_root)+1:]
5471 e = portage.exception.PackageNotFound(
5472 ("%s is not in a valid portage tree " + \
5473 "hierarchy or does not exist") % x)
5474 if not portage.isvalidatom(cp):
5476 cat = portage.catsplit(cp)[0]
5477 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5478 if not portage.isvalidatom("="+mykey):
5480 ebuild_path = portdb.findname(mykey)
5482 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5483 cp, os.path.basename(ebuild_path)):
5484 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5485 return 0, myfavorites
5486 if mykey not in portdb.xmatch(
5487 "match-visible", portage.dep_getkey(mykey)):
5488 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5489 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5490 print colorize("BAD", "*** page for details.")
5491 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5494 raise portage.exception.PackageNotFound(
5495 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5496 db_keys = list(portdb._aux_cache_keys)
5497 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5498 pkg = Package(type_name="ebuild", root_config=root_config,
5499 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5500 pkgsettings.setcpv(pkg)
5501 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5502 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5503 self._pkg_cache[pkg] = pkg
5504 args.append(PackageArg(arg=x, package=pkg,
5505 root_config=root_config))
5506 elif x.startswith(os.path.sep):
5507 if not x.startswith(myroot):
5508 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5509 " $ROOT.\n") % x, noiselevel=-1)
5511 # Queue these up since it's most efficient to handle
5512 # multiple files in a single iter_owners() call.
5513 lookup_owners.append(x)
5515 if x in ("system", "world"):
5517 if x.startswith(SETPREFIX):
5518 s = x[len(SETPREFIX):]
5520 raise portage.exception.PackageSetNotFound(s)
5523 # Recursively expand sets so that containment tests in
5524 # self._get_parent_sets() properly match atoms in nested
5525 # sets (like if world contains system).
5526 expanded_set = InternalPackageSet(
5527 initial_atoms=getSetAtoms(s))
5528 self._sets[s] = expanded_set
5529 args.append(SetArg(arg=x, set=expanded_set,
5530 root_config=root_config))
5532 if not is_valid_package_atom(x):
5533 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5535 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5536 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5538 # Don't expand categories or old-style virtuals here unless
5539 # necessary. Expansion of old-style virtuals here causes at
5540 # least the following problems:
5541 # 1) It's more difficult to determine which set(s) an atom
5542 # came from, if any.
5543 # 2) It takes away freedom from the resolver to choose other
5544 # possible expansions when necessary.
5546 args.append(AtomArg(arg=x, atom=x,
5547 root_config=root_config))
5549 expanded_atoms = self._dep_expand(root_config, x)
5550 installed_cp_set = set()
5551 for atom in expanded_atoms:
5552 atom_cp = portage.dep_getkey(atom)
5553 if vardb.cp_list(atom_cp):
5554 installed_cp_set.add(atom_cp)
5555 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5556 installed_cp = iter(installed_cp_set).next()
5557 expanded_atoms = [atom for atom in expanded_atoms \
5558 if portage.dep_getkey(atom) == installed_cp]
5560 if len(expanded_atoms) > 1:
5563 ambiguous_package_name(x, expanded_atoms, root_config,
5564 self.spinner, self.myopts)
5565 return False, myfavorites
5567 atom = expanded_atoms[0]
5569 null_atom = insert_category_into_atom(x, "null")
5570 null_cp = portage.dep_getkey(null_atom)
5571 cat, atom_pn = portage.catsplit(null_cp)
5572 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5574 # Allow the depgraph to choose which virtual.
5575 atom = insert_category_into_atom(x, "virtual")
5577 atom = insert_category_into_atom(x, "null")
5579 args.append(AtomArg(arg=x, atom=atom,
5580 root_config=root_config))
5584 search_for_multiple = False
5585 if len(lookup_owners) > 1:
5586 search_for_multiple = True
5588 for x in lookup_owners:
5589 if not search_for_multiple and os.path.isdir(x):
5590 search_for_multiple = True
5591 relative_paths.append(x[len(myroot):])
5594 for pkg, relative_path in \
5595 real_vardb._owners.iter_owners(relative_paths):
5596 owners.add(pkg.mycpv)
5597 if not search_for_multiple:
5601 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5602 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5606 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5608 # portage now masks packages with missing slot, but it's
5609 # possible that one was installed by an older version
5610 atom = portage.cpv_getkey(cpv)
5612 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5613 args.append(AtomArg(arg=atom, atom=atom,
5614 root_config=root_config))
5616 if "--update" in self.myopts:
5617 # In some cases, the greedy slots behavior can pull in a slot that
5618 # the user would want to uninstall due to it being blocked by a
5619 # newer version in a different slot. Therefore, it's necessary to
5620 # detect and discard any that should be uninstalled. Each time
5621 # that arguments are updated, package selections are repeated in
5622 # order to ensure consistency with the current arguments:
5624 # 1) Initialize args
5625 # 2) Select packages and generate initial greedy atoms
5626 # 3) Update args with greedy atoms
5627 # 4) Select packages and generate greedy atoms again, while
5628 # accounting for any blockers between selected packages
5629 # 5) Update args with revised greedy atoms
5631 self._set_args(args)
5634 greedy_args.append(arg)
5635 if not isinstance(arg, AtomArg):
5637 for atom in self._greedy_slots(arg.root_config, arg.atom):
5639 AtomArg(arg=arg.arg, atom=atom,
5640 root_config=arg.root_config))
5642 self._set_args(greedy_args)
5645 # Revise greedy atoms, accounting for any blockers
5646 # between selected packages.
5647 revised_greedy_args = []
5649 revised_greedy_args.append(arg)
5650 if not isinstance(arg, AtomArg):
5652 for atom in self._greedy_slots(arg.root_config, arg.atom,
5653 blocker_lookahead=True):
5654 revised_greedy_args.append(
5655 AtomArg(arg=arg.arg, atom=atom,
5656 root_config=arg.root_config))
5657 args = revised_greedy_args
5658 del revised_greedy_args
5660 self._set_args(args)
5662 myfavorites = set(myfavorites)
5664 if isinstance(arg, (AtomArg, PackageArg)):
5665 myfavorites.add(arg.atom)
5666 elif isinstance(arg, SetArg):
5667 myfavorites.add(arg.arg)
5668 myfavorites = list(myfavorites)
5670 pprovideddict = pkgsettings.pprovideddict
5672 portage.writemsg("\n", noiselevel=-1)
5673 # Order needs to be preserved since a feature of --nodeps
5674 # is to allow the user to force a specific merge order.
5678 for atom in arg.set:
5679 self.spinner.update()
5680 dep = Dependency(atom=atom, onlydeps=onlydeps,
5681 root=myroot, parent=arg)
5682 atom_cp = portage.dep_getkey(atom)
5684 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5685 if pprovided and portage.match_from_list(atom, pprovided):
5686 # A provided package has been specified on the command line.
5687 self._pprovided_args.append((arg, atom))
5689 if isinstance(arg, PackageArg):
5690 if not self._add_pkg(arg.package, dep) or \
5691 not self._create_graph():
5692 sys.stderr.write(("\n\n!!! Problem resolving " + \
5693 "dependencies for %s\n") % arg.arg)
5694 return 0, myfavorites
5697 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5698 (arg, atom), noiselevel=-1)
5699 pkg, existing_node = self._select_package(
5700 myroot, atom, onlydeps=onlydeps)
5702 if not (isinstance(arg, SetArg) and \
5703 arg.name in ("system", "world")):
5704 self._unsatisfied_deps_for_display.append(
5705 ((myroot, atom), {}))
5706 return 0, myfavorites
5707 self._missing_args.append((arg, atom))
5709 if atom_cp != pkg.cp:
5710 # For old-style virtuals, we need to repeat the
5711 # package.provided check against the selected package.
5712 expanded_atom = atom.replace(atom_cp, pkg.cp)
5713 pprovided = pprovideddict.get(pkg.cp)
5715 portage.match_from_list(expanded_atom, pprovided):
5716 # A provided package has been
5717 # specified on the command line.
5718 self._pprovided_args.append((arg, atom))
5720 if pkg.installed and "selective" not in self.myparams:
5721 self._unsatisfied_deps_for_display.append(
5722 ((myroot, atom), {}))
5723 # Previous behavior was to bail out in this case, but
5724 # since the dep is satisfied by the installed package,
5725 # it's more friendly to continue building the graph
5726 # and just show a warning message. Therefore, only bail
5727 # out here if the atom is not from either the system or
5729 if not (isinstance(arg, SetArg) and \
5730 arg.name in ("system", "world")):
5731 return 0, myfavorites
5733 # Add the selected package to the graph as soon as possible
5734 # so that later dep_check() calls can use it as feedback
5735 # for making more consistent atom selections.
5736 if not self._add_pkg(pkg, dep):
5737 if isinstance(arg, SetArg):
5738 sys.stderr.write(("\n\n!!! Problem resolving " + \
5739 "dependencies for %s from %s\n") % \
5742 sys.stderr.write(("\n\n!!! Problem resolving " + \
5743 "dependencies for %s\n") % atom)
5744 return 0, myfavorites
5746 except portage.exception.MissingSignature, e:
5747 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5748 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5749 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5750 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5751 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5752 return 0, myfavorites
5753 except portage.exception.InvalidSignature, e:
5754 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5755 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5756 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5757 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5758 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5759 return 0, myfavorites
5760 except SystemExit, e:
5761 raise # Needed else can't exit
5762 except Exception, e:
5763 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5764 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5767 # Now that the root packages have been added to the graph,
5768 # process the dependencies.
5769 if not self._create_graph():
5770 return 0, myfavorites
5773 if "--usepkgonly" in self.myopts:
5774 for xs in self.digraph.all_nodes():
5775 if not isinstance(xs, Package):
5777 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5781 print "Missing binary for:",xs[2]
5785 except self._unknown_internal_error:
5786 return False, myfavorites
5788 # We're true here unless we are missing binaries.
5789 return (not missing,myfavorites)
5791 def _set_args(self, args):
5793 Create the "args" package set from atoms and packages given as
5794 arguments. This method can be called multiple times if necessary.
5795 The package selection cache is automatically invalidated, since
5796 arguments influence package selections.
5798 args_set = self._sets["args"]
5801 if not isinstance(arg, (AtomArg, PackageArg)):
5804 if atom in args_set:
5808 self._set_atoms.clear()
5809 self._set_atoms.update(chain(*self._sets.itervalues()))
5810 atom_arg_map = self._atom_arg_map
5811 atom_arg_map.clear()
5813 for atom in arg.set:
5814 atom_key = (atom, arg.root_config.root)
5815 refs = atom_arg_map.get(atom_key)
5818 atom_arg_map[atom_key] = refs
5822 # Invalidate the package selection cache, since
5823 # arguments influence package selections.
5824 self._highest_pkg_cache.clear()
5825 for trees in self._filtered_trees.itervalues():
5826 trees["porttree"].dbapi._clear_cache()
5828 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5830 Return a list of slot atoms corresponding to installed slots that
5831 differ from the slot of the highest visible match. When
5832 blocker_lookahead is True, slot atoms that would trigger a blocker
5833 conflict are automatically discarded, potentially allowing automatic
5834 uninstallation of older slots when appropriate.
5836 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5837 if highest_pkg is None:
5839 vardb = root_config.trees["vartree"].dbapi
5841 for cpv in vardb.match(atom):
5842 # don't mix new virtuals with old virtuals
5843 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5844 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5846 slots.add(highest_pkg.metadata["SLOT"])
5850 slots.remove(highest_pkg.metadata["SLOT"])
5853 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5854 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5855 if pkg is not None and \
5856 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5857 greedy_pkgs.append(pkg)
5860 if not blocker_lookahead:
5861 return [pkg.slot_atom for pkg in greedy_pkgs]
5864 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5865 for pkg in greedy_pkgs + [highest_pkg]:
5866 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5868 atoms = self._select_atoms(
5869 pkg.root, dep_str, pkg.use.enabled,
5870 parent=pkg, strict=True)
5871 except portage.exception.InvalidDependString:
5873 blocker_atoms = (x for x in atoms if x.blocker)
5874 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5876 if highest_pkg not in blockers:
5879 # filter packages with invalid deps
5880 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5882 # filter packages that conflict with highest_pkg
5883 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5884 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5885 blockers[pkg].findAtomForPackage(highest_pkg))]
5890 # If two packages conflict, discard the lower version.
5891 discard_pkgs = set()
5892 greedy_pkgs.sort(reverse=True)
5893 for i in xrange(len(greedy_pkgs) - 1):
5894 pkg1 = greedy_pkgs[i]
5895 if pkg1 in discard_pkgs:
5897 for j in xrange(i + 1, len(greedy_pkgs)):
5898 pkg2 = greedy_pkgs[j]
5899 if pkg2 in discard_pkgs:
5901 if blockers[pkg1].findAtomForPackage(pkg2) or \
5902 blockers[pkg2].findAtomForPackage(pkg1):
5904 discard_pkgs.add(pkg2)
5906 return [pkg.slot_atom for pkg in greedy_pkgs \
5907 if pkg not in discard_pkgs]
5909 def _select_atoms_from_graph(self, *pargs, **kwargs):
5911 Prefer atoms matching packages that have already been
5912 added to the graph or those that are installed and have
5913 not been scheduled for replacement.
5915 kwargs["trees"] = self._graph_trees
5916 return self._select_atoms_highest_available(*pargs, **kwargs)
5918 def _select_atoms_highest_available(self, root, depstring,
5919 myuse=None, parent=None, strict=True, trees=None, priority=None):
5920 """This will raise InvalidDependString if necessary. If trees is
5921 None then self._filtered_trees is used."""
5922 pkgsettings = self.pkgsettings[root]
5924 trees = self._filtered_trees
5925 if not getattr(priority, "buildtime", False):
5926 # The parent should only be passed to dep_check() for buildtime
5927 # dependencies since that's the only case when it's appropriate
5928 # to trigger the circular dependency avoidance code which uses it.
5929 # It's important not to trigger the same circular dependency
5930 # avoidance code for runtime dependencies since it's not needed
5931 # and it can promote an incorrect package choice.
5935 if parent is not None:
5936 trees[root]["parent"] = parent
5938 portage.dep._dep_check_strict = False
5939 mycheck = portage.dep_check(depstring, None,
5940 pkgsettings, myuse=myuse,
5941 myroot=root, trees=trees)
5943 if parent is not None:
5944 trees[root].pop("parent")
5945 portage.dep._dep_check_strict = True
5947 raise portage.exception.InvalidDependString(mycheck[1])
5948 selected_atoms = mycheck[1]
5949 return selected_atoms
5951 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5952 atom = portage.dep.Atom(atom)
5953 atom_set = InternalPackageSet(initial_atoms=(atom,))
5954 atom_without_use = atom
5956 atom_without_use = portage.dep.remove_slot(atom)
5958 atom_without_use += ":" + atom.slot
5959 atom_without_use = portage.dep.Atom(atom_without_use)
5960 xinfo = '"%s"' % atom
5963 # Discard null/ from failed cpv_expand category expansion.
5964 xinfo = xinfo.replace("null/", "")
5965 masked_packages = []
5967 masked_pkg_instances = set()
5968 missing_licenses = []
5969 have_eapi_mask = False
5970 pkgsettings = self.pkgsettings[root]
5971 implicit_iuse = pkgsettings._get_implicit_iuse()
5972 root_config = self.roots[root]
5973 portdb = self.roots[root].trees["porttree"].dbapi
5974 dbs = self._filtered_trees[root]["dbs"]
5975 for db, pkg_type, built, installed, db_keys in dbs:
5979 if hasattr(db, "xmatch"):
5980 cpv_list = db.xmatch("match-all", atom_without_use)
5982 cpv_list = db.match(atom_without_use)
5985 for cpv in cpv_list:
5986 metadata, mreasons = get_mask_info(root_config, cpv,
5987 pkgsettings, db, pkg_type, built, installed, db_keys)
5988 if metadata is not None:
5989 pkg = Package(built=built, cpv=cpv,
5990 installed=installed, metadata=metadata,
5991 root_config=root_config)
5992 if pkg.cp != atom.cp:
5993 # A cpv can be returned from dbapi.match() as an
5994 # old-style virtual match even in cases when the
5995 # package does not actually PROVIDE the virtual.
5996 # Filter out any such false matches here.
5997 if not atom_set.findAtomForPackage(pkg):
6000 masked_pkg_instances.add(pkg)
6002 missing_use.append(pkg)
6005 masked_packages.append(
6006 (root_config, pkgsettings, cpv, metadata, mreasons))
6008 missing_use_reasons = []
6009 missing_iuse_reasons = []
6010 for pkg in missing_use:
6011 use = pkg.use.enabled
6012 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6013 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6015 for x in atom.use.required:
6016 if iuse_re.match(x) is None:
6017 missing_iuse.append(x)
6020 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6021 missing_iuse_reasons.append((pkg, mreasons))
6023 need_enable = sorted(atom.use.enabled.difference(use))
6024 need_disable = sorted(atom.use.disabled.intersection(use))
6025 if need_enable or need_disable:
6027 changes.extend(colorize("red", "+" + x) \
6028 for x in need_enable)
6029 changes.extend(colorize("blue", "-" + x) \
6030 for x in need_disable)
6031 mreasons.append("Change USE: %s" % " ".join(changes))
6032 missing_use_reasons.append((pkg, mreasons))
6034 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6035 in missing_use_reasons if pkg not in masked_pkg_instances]
6037 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6038 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6040 show_missing_use = False
6041 if unmasked_use_reasons:
6042 # Only show the latest version.
6043 show_missing_use = unmasked_use_reasons[:1]
6044 elif unmasked_iuse_reasons:
6045 if missing_use_reasons:
6046 # All packages with required IUSE are masked,
6047 # so display a normal masking message.
6050 show_missing_use = unmasked_iuse_reasons
6052 if show_missing_use:
6053 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6054 print "!!! One of the following packages is required to complete your request:"
6055 for pkg, mreasons in show_missing_use:
6056 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6058 elif masked_packages:
6060 colorize("BAD", "All ebuilds that could satisfy ") + \
6061 colorize("INFORM", xinfo) + \
6062 colorize("BAD", " have been masked.")
6063 print "!!! One of the following masked packages is required to complete your request:"
6064 have_eapi_mask = show_masked_packages(masked_packages)
6067 msg = ("The current version of portage supports " + \
6068 "EAPI '%s'. You must upgrade to a newer version" + \
6069 " of portage before EAPI masked packages can" + \
6070 " be installed.") % portage.const.EAPI
6071 from textwrap import wrap
6072 for line in wrap(msg, 75):
6077 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6079 # Show parent nodes and the argument that pulled them in.
6080 traversed_nodes = set()
6083 while node is not None:
6084 traversed_nodes.add(node)
6085 msg.append('(dependency required by "%s" [%s])' % \
6086 (colorize('INFORM', str(node.cpv)), node.type_name))
6087 # When traversing to parents, prefer arguments over packages
6088 # since arguments are root nodes. Never traverse the same
6089 # package twice, in order to prevent an infinite loop.
6090 selected_parent = None
6091 for parent in self.digraph.parent_nodes(node):
6092 if isinstance(parent, DependencyArg):
6093 msg.append('(dependency required by "%s" [argument])' % \
6094 (colorize('INFORM', str(parent))))
6095 selected_parent = None
6097 if parent not in traversed_nodes:
6098 selected_parent = parent
6099 node = selected_parent
6105 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6106 cache_key = (root, atom, onlydeps)
6107 ret = self._highest_pkg_cache.get(cache_key)
6110 if pkg and not existing:
6111 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6112 if existing and existing == pkg:
6113 # Update the cache to reflect that the
6114 # package has been added to the graph.
6116 self._highest_pkg_cache[cache_key] = ret
6118 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6119 self._highest_pkg_cache[cache_key] = ret
6122 settings = pkg.root_config.settings
6123 if visible(settings, pkg) and not (pkg.installed and \
6124 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6125 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6128 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6129 root_config = self.roots[root]
6130 pkgsettings = self.pkgsettings[root]
6131 dbs = self._filtered_trees[root]["dbs"]
6132 vardb = self.roots[root].trees["vartree"].dbapi
6133 portdb = self.roots[root].trees["porttree"].dbapi
6134 # List of acceptable packages, ordered by type preference.
6135 matched_packages = []
6136 highest_version = None
6137 if not isinstance(atom, portage.dep.Atom):
6138 atom = portage.dep.Atom(atom)
6140 atom_set = InternalPackageSet(initial_atoms=(atom,))
6141 existing_node = None
6143 usepkgonly = "--usepkgonly" in self.myopts
6144 empty = "empty" in self.myparams
6145 selective = "selective" in self.myparams
6147 noreplace = "--noreplace" in self.myopts
6148 # Behavior of the "selective" parameter depends on
6149 # whether or not a package matches an argument atom.
6150 # If an installed package provides an old-style
6151 # virtual that is no longer provided by an available
6152 # package, the installed package may match an argument
6153 # atom even though none of the available packages do.
6154 # Therefore, "selective" logic does not consider
6155 # whether or not an installed package matches an
6156 # argument atom. It only considers whether or not
6157 # available packages match argument atoms, which is
6158 # represented by the found_available_arg flag.
6159 found_available_arg = False
6160 for find_existing_node in True, False:
6163 for db, pkg_type, built, installed, db_keys in dbs:
6166 if installed and not find_existing_node:
6167 want_reinstall = reinstall or empty or \
6168 (found_available_arg and not selective)
6169 if want_reinstall and matched_packages:
6171 if hasattr(db, "xmatch"):
6172 cpv_list = db.xmatch("match-all", atom)
6174 cpv_list = db.match(atom)
6176 # USE=multislot can make an installed package appear as if
6177 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6178 # won't do any good as long as USE=multislot is enabled since
6179 # the newly built package still won't have the expected slot.
6180 # Therefore, assume that such SLOT dependencies are already
6181 # satisfied rather than forcing a rebuild.
6182 if installed and not cpv_list and atom.slot:
6183 for cpv in db.match(atom.cp):
6184 slot_available = False
6185 for other_db, other_type, other_built, \
6186 other_installed, other_keys in dbs:
6189 other_db.aux_get(cpv, ["SLOT"])[0]:
6190 slot_available = True
6194 if not slot_available:
6196 inst_pkg = self._pkg(cpv, "installed",
6197 root_config, installed=installed)
6198 # Remove the slot from the atom and verify that
6199 # the package matches the resulting atom.
6200 atom_without_slot = portage.dep.remove_slot(atom)
6202 atom_without_slot += str(atom.use)
6203 atom_without_slot = portage.dep.Atom(atom_without_slot)
6204 if portage.match_from_list(
6205 atom_without_slot, [inst_pkg]):
6206 cpv_list = [inst_pkg.cpv]
6211 pkg_status = "merge"
6212 if installed or onlydeps:
6213 pkg_status = "nomerge"
6216 for cpv in cpv_list:
6217 # Make --noreplace take precedence over --newuse.
6218 if not installed and noreplace and \
6219 cpv in vardb.match(atom):
6220 # If the installed version is masked, it may
6221 # be necessary to look at lower versions,
6222 # in case there is a visible downgrade.
6224 reinstall_for_flags = None
6225 cache_key = (pkg_type, root, cpv, pkg_status)
6226 calculated_use = True
6227 pkg = self._pkg_cache.get(cache_key)
6229 calculated_use = False
6231 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6234 pkg = Package(built=built, cpv=cpv,
6235 installed=installed, metadata=metadata,
6236 onlydeps=onlydeps, root_config=root_config,
6238 metadata = pkg.metadata
6240 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6241 if not built and ("?" in metadata["LICENSE"] or \
6242 "?" in metadata["PROVIDE"]):
6243 # This is avoided whenever possible because
6244 # it's expensive. It only needs to be done here
6245 # if it has an effect on visibility.
6246 pkgsettings.setcpv(pkg)
6247 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6248 calculated_use = True
6249 self._pkg_cache[pkg] = pkg
6251 if not installed or (built and matched_packages):
6252 # Only enforce visibility on installed packages
6253 # if there is at least one other visible package
6254 # available. By filtering installed masked packages
6255 # here, packages that have been masked since they
6256 # were installed can be automatically downgraded
6257 # to an unmasked version.
6259 if not visible(pkgsettings, pkg):
6261 except portage.exception.InvalidDependString:
6265 # Enable upgrade or downgrade to a version
6266 # with visible KEYWORDS when the installed
6267 # version is masked by KEYWORDS, but never
6268 # reinstall the same exact version only due
6269 # to a KEYWORDS mask.
6270 if built and matched_packages:
6272 different_version = None
6273 for avail_pkg in matched_packages:
6274 if not portage.dep.cpvequal(
6275 pkg.cpv, avail_pkg.cpv):
6276 different_version = avail_pkg
6278 if different_version is not None:
6281 pkgsettings._getMissingKeywords(
6282 pkg.cpv, pkg.metadata):
6285 # If the ebuild no longer exists or it's
6286 # keywords have been dropped, reject built
6287 # instances (installed or binary).
6288 # If --usepkgonly is enabled, assume that
6289 # the ebuild status should be ignored.
6293 pkg.cpv, "ebuild", root_config)
6294 except portage.exception.PackageNotFound:
6297 if not visible(pkgsettings, pkg_eb):
6300 if not pkg.built and not calculated_use:
6301 # This is avoided whenever possible because
6303 pkgsettings.setcpv(pkg)
6304 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6306 if pkg.cp != atom.cp:
6307 # A cpv can be returned from dbapi.match() as an
6308 # old-style virtual match even in cases when the
6309 # package does not actually PROVIDE the virtual.
6310 # Filter out any such false matches here.
6311 if not atom_set.findAtomForPackage(pkg):
6315 if root == self.target_root:
6317 # Ebuild USE must have been calculated prior
6318 # to this point, in case atoms have USE deps.
6319 myarg = self._iter_atoms_for_pkg(pkg).next()
6320 except StopIteration:
6322 except portage.exception.InvalidDependString:
6324 # masked by corruption
6326 if not installed and myarg:
6327 found_available_arg = True
6329 if atom.use and not pkg.built:
6330 use = pkg.use.enabled
6331 if atom.use.enabled.difference(use):
6333 if atom.use.disabled.intersection(use):
6335 if pkg.cp == atom_cp:
6336 if highest_version is None:
6337 highest_version = pkg
6338 elif pkg > highest_version:
6339 highest_version = pkg
6340 # At this point, we've found the highest visible
6341 # match from the current repo. Any lower versions
6342 # from this repo are ignored, so this so the loop
6343 # will always end with a break statement below
6345 if find_existing_node:
6346 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6349 if portage.dep.match_from_list(atom, [e_pkg]):
6350 if highest_version and \
6351 e_pkg.cp == atom_cp and \
6352 e_pkg < highest_version and \
6353 e_pkg.slot_atom != highest_version.slot_atom:
6354 # There is a higher version available in a
6355 # different slot, so this existing node is
6359 matched_packages.append(e_pkg)
6360 existing_node = e_pkg
6362 # Compare built package to current config and
6363 # reject the built package if necessary.
6364 if built and not installed and \
6365 ("--newuse" in self.myopts or \
6366 "--reinstall" in self.myopts):
6367 iuses = pkg.iuse.all
6368 old_use = pkg.use.enabled
6370 pkgsettings.setcpv(myeb)
6372 pkgsettings.setcpv(pkg)
6373 now_use = pkgsettings["PORTAGE_USE"].split()
6374 forced_flags = set()
6375 forced_flags.update(pkgsettings.useforce)
6376 forced_flags.update(pkgsettings.usemask)
6378 if myeb and not usepkgonly:
6379 cur_iuse = myeb.iuse.all
6380 if self._reinstall_for_flags(forced_flags,
6384 # Compare current config to installed package
6385 # and do not reinstall if possible.
6386 if not installed and \
6387 ("--newuse" in self.myopts or \
6388 "--reinstall" in self.myopts) and \
6389 cpv in vardb.match(atom):
6390 pkgsettings.setcpv(pkg)
6391 forced_flags = set()
6392 forced_flags.update(pkgsettings.useforce)
6393 forced_flags.update(pkgsettings.usemask)
6394 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6395 old_iuse = set(filter_iuse_defaults(
6396 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6397 cur_use = pkgsettings["PORTAGE_USE"].split()
6398 cur_iuse = pkg.iuse.all
6399 reinstall_for_flags = \
6400 self._reinstall_for_flags(
6401 forced_flags, old_use, old_iuse,
6403 if reinstall_for_flags:
6407 matched_packages.append(pkg)
6408 if reinstall_for_flags:
6409 self._reinstall_nodes[pkg] = \
6413 if not matched_packages:
6416 if "--debug" in self.myopts:
6417 for pkg in matched_packages:
6418 portage.writemsg("%s %s\n" % \
6419 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6421 # Filter out any old-style virtual matches if they are
6422 # mixed with new-style virtual matches.
6423 cp = portage.dep_getkey(atom)
6424 if len(matched_packages) > 1 and \
6425 "virtual" == portage.catsplit(cp)[0]:
6426 for pkg in matched_packages:
6429 # Got a new-style virtual, so filter
6430 # out any old-style virtuals.
6431 matched_packages = [pkg for pkg in matched_packages \
6435 if len(matched_packages) > 1:
6436 bestmatch = portage.best(
6437 [pkg.cpv for pkg in matched_packages])
6438 matched_packages = [pkg for pkg in matched_packages \
6439 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6441 # ordered by type preference ("ebuild" type is the last resort)
6442 return matched_packages[-1], existing_node
6444 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6446 Select packages that have already been added to the graph or
6447 those that are installed and have not been scheduled for
6450 graph_db = self._graph_trees[root]["porttree"].dbapi
6451 matches = graph_db.match_pkgs(atom)
6454 pkg = matches[-1] # highest match
6455 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6456 return pkg, in_graph
6458 def _complete_graph(self):
6460 Add any deep dependencies of required sets (args, system, world) that
6461 have not been pulled into the graph yet. This ensures that the graph
6462 is consistent such that initially satisfied deep dependencies are not
6463 broken in the new graph. Initially unsatisfied dependencies are
6464 irrelevant since we only want to avoid breaking dependencies that are
6467 Since this method can consume enough time to disturb users, it is
6468 currently only enabled by the --complete-graph option.
6470 if "--buildpkgonly" in self.myopts or \
6471 "recurse" not in self.myparams:
6474 if "complete" not in self.myparams:
6475 # Skip this to avoid consuming enough time to disturb users.
6478 # Put the depgraph into a mode that causes it to only
6479 # select packages that have already been added to the
6480 # graph or those that are installed and have not been
6481 # scheduled for replacement. Also, toggle the "deep"
6482 # parameter so that all dependencies are traversed and
6484 self._select_atoms = self._select_atoms_from_graph
6485 self._select_package = self._select_pkg_from_graph
6486 already_deep = "deep" in self.myparams
6487 if not already_deep:
6488 self.myparams.add("deep")
6490 for root in self.roots:
6491 required_set_names = self._required_set_names.copy()
6492 if root == self.target_root and \
6493 (already_deep or "empty" in self.myparams):
6494 required_set_names.difference_update(self._sets)
6495 if not required_set_names and not self._ignored_deps:
6497 root_config = self.roots[root]
6498 setconfig = root_config.setconfig
6500 # Reuse existing SetArg instances when available.
6501 for arg in self.digraph.root_nodes():
6502 if not isinstance(arg, SetArg):
6504 if arg.root_config != root_config:
6506 if arg.name in required_set_names:
6508 required_set_names.remove(arg.name)
6509 # Create new SetArg instances only when necessary.
6510 for s in required_set_names:
6511 expanded_set = InternalPackageSet(
6512 initial_atoms=setconfig.getSetAtoms(s))
6513 atom = SETPREFIX + s
6514 args.append(SetArg(arg=atom, set=expanded_set,
6515 root_config=root_config))
6516 vardb = root_config.trees["vartree"].dbapi
6518 for atom in arg.set:
6519 self._dep_stack.append(
6520 Dependency(atom=atom, root=root, parent=arg))
6521 if self._ignored_deps:
6522 self._dep_stack.extend(self._ignored_deps)
6523 self._ignored_deps = []
6524 if not self._create_graph(allow_unsatisfied=True):
6526 # Check the unsatisfied deps to see if any initially satisfied deps
6527 # will become unsatisfied due to an upgrade. Initially unsatisfied
6528 # deps are irrelevant since we only want to avoid breaking deps
6529 # that are initially satisfied.
6530 while self._unsatisfied_deps:
6531 dep = self._unsatisfied_deps.pop()
6532 matches = vardb.match_pkgs(dep.atom)
6534 self._initially_unsatisfied_deps.append(dep)
6536 # An scheduled installation broke a deep dependency.
6537 # Add the installed package to the graph so that it
6538 # will be appropriately reported as a slot collision
6539 # (possibly solvable via backtracking).
6540 pkg = matches[-1] # highest match
6541 if not self._add_pkg(pkg, dep):
6543 if not self._create_graph(allow_unsatisfied=True):
6547 def _pkg(self, cpv, type_name, root_config, installed=False):
6549 Get a package instance from the cache, or create a new
6550 one if necessary. Raises KeyError from aux_get if it
6551 failures for some reason (package does not exist or is
6556 operation = "nomerge"
6557 pkg = self._pkg_cache.get(
6558 (type_name, root_config.root, cpv, operation))
6560 tree_type = self.pkg_tree_map[type_name]
6561 db = root_config.trees[tree_type].dbapi
6562 db_keys = list(self._trees_orig[root_config.root][
6563 tree_type].dbapi._aux_cache_keys)
6565 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6567 raise portage.exception.PackageNotFound(cpv)
6568 pkg = Package(cpv=cpv, metadata=metadata,
6569 root_config=root_config, installed=installed)
6570 if type_name == "ebuild":
6571 settings = self.pkgsettings[root_config.root]
6572 settings.setcpv(pkg)
6573 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6574 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6575 self._pkg_cache[pkg] = pkg
6578 def validate_blockers(self):
6579 """Remove any blockers from the digraph that do not match any of the
6580 packages within the graph. If necessary, create hard deps to ensure
6581 correct merge order such that mutually blocking packages are never
6582 installed simultaneously."""
6584 if "--buildpkgonly" in self.myopts or \
6585 "--nodeps" in self.myopts:
6588 #if "deep" in self.myparams:
6590 # Pull in blockers from all installed packages that haven't already
6591 # been pulled into the depgraph. This is not enabled by default
6592 # due to the performance penalty that is incurred by all the
6593 # additional dep_check calls that are required.
6595 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6596 for myroot in self.trees:
6597 vardb = self.trees[myroot]["vartree"].dbapi
6598 portdb = self.trees[myroot]["porttree"].dbapi
6599 pkgsettings = self.pkgsettings[myroot]
6600 final_db = self.mydbapi[myroot]
6602 blocker_cache = BlockerCache(myroot, vardb)
6603 stale_cache = set(blocker_cache)
6606 stale_cache.discard(cpv)
6607 pkg_in_graph = self.digraph.contains(pkg)
6609 # Check for masked installed packages. Only warn about
6610 # packages that are in the graph in order to avoid warning
6611 # about those that will be automatically uninstalled during
6612 # the merge process or by --depclean.
6614 if pkg_in_graph and not visible(pkgsettings, pkg):
6615 self._masked_installed.add(pkg)
6617 blocker_atoms = None
6623 self._blocker_parents.child_nodes(pkg))
6628 self._irrelevant_blockers.child_nodes(pkg))
6631 if blockers is not None:
6632 blockers = set(str(blocker.atom) \
6633 for blocker in blockers)
6635 # If this node has any blockers, create a "nomerge"
6636 # node for it so that they can be enforced.
6637 self.spinner.update()
6638 blocker_data = blocker_cache.get(cpv)
6639 if blocker_data is not None and \
6640 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6643 # If blocker data from the graph is available, use
6644 # it to validate the cache and update the cache if
6646 if blocker_data is not None and \
6647 blockers is not None:
6648 if not blockers.symmetric_difference(
6649 blocker_data.atoms):
6653 if blocker_data is None and \
6654 blockers is not None:
6655 # Re-use the blockers from the graph.
6656 blocker_atoms = sorted(blockers)
6657 counter = long(pkg.metadata["COUNTER"])
6659 blocker_cache.BlockerData(counter, blocker_atoms)
6660 blocker_cache[pkg.cpv] = blocker_data
6664 blocker_atoms = blocker_data.atoms
6666 # Use aux_get() to trigger FakeVartree global
6667 # updates on *DEPEND when appropriate.
6668 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6669 # It is crucial to pass in final_db here in order to
6670 # optimize dep_check calls by eliminating atoms via
6671 # dep_wordreduce and dep_eval calls.
6673 portage.dep._dep_check_strict = False
6675 success, atoms = portage.dep_check(depstr,
6676 final_db, pkgsettings, myuse=pkg.use.enabled,
6677 trees=self._graph_trees, myroot=myroot)
6678 except Exception, e:
6679 if isinstance(e, SystemExit):
6681 # This is helpful, for example, if a ValueError
6682 # is thrown from cpv_expand due to multiple
6683 # matches (this can happen if an atom lacks a
6685 show_invalid_depstring_notice(
6686 pkg, depstr, str(e))
6690 portage.dep._dep_check_strict = True
6692 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6693 if replacement_pkg and \
6694 replacement_pkg[0].operation == "merge":
6695 # This package is being replaced anyway, so
6696 # ignore invalid dependencies so as not to
6697 # annoy the user too much (otherwise they'd be
6698 # forced to manually unmerge it first).
6700 show_invalid_depstring_notice(pkg, depstr, atoms)
6702 blocker_atoms = [myatom for myatom in atoms \
6703 if myatom.startswith("!")]
6704 blocker_atoms.sort()
6705 counter = long(pkg.metadata["COUNTER"])
6706 blocker_cache[cpv] = \
6707 blocker_cache.BlockerData(counter, blocker_atoms)
6710 for atom in blocker_atoms:
6711 blocker = Blocker(atom=portage.dep.Atom(atom),
6712 eapi=pkg.metadata["EAPI"], root=myroot)
6713 self._blocker_parents.add(blocker, pkg)
6714 except portage.exception.InvalidAtom, e:
6715 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6716 show_invalid_depstring_notice(
6717 pkg, depstr, "Invalid Atom: %s" % (e,))
6719 for cpv in stale_cache:
6720 del blocker_cache[cpv]
6721 blocker_cache.flush()
6724 # Discard any "uninstall" tasks scheduled by previous calls
6725 # to this method, since those tasks may not make sense given
6726 # the current graph state.
6727 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6728 if previous_uninstall_tasks:
6729 self._blocker_uninstalls = digraph()
6730 self.digraph.difference_update(previous_uninstall_tasks)
6732 for blocker in self._blocker_parents.leaf_nodes():
6733 self.spinner.update()
6734 root_config = self.roots[blocker.root]
6735 virtuals = root_config.settings.getvirtuals()
6736 myroot = blocker.root
6737 initial_db = self.trees[myroot]["vartree"].dbapi
6738 final_db = self.mydbapi[myroot]
6740 provider_virtual = False
6741 if blocker.cp in virtuals and \
6742 not self._have_new_virt(blocker.root, blocker.cp):
6743 provider_virtual = True
6745 if provider_virtual:
6747 for provider_entry in virtuals[blocker.cp]:
6749 portage.dep_getkey(provider_entry)
6750 atoms.append(blocker.atom.replace(
6751 blocker.cp, provider_cp))
6753 atoms = [blocker.atom]
6755 blocked_initial = []
6757 blocked_initial.extend(initial_db.match_pkgs(atom))
6761 blocked_final.extend(final_db.match_pkgs(atom))
6763 if not blocked_initial and not blocked_final:
6764 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6765 self._blocker_parents.remove(blocker)
6766 # Discard any parents that don't have any more blockers.
6767 for pkg in parent_pkgs:
6768 self._irrelevant_blockers.add(blocker, pkg)
6769 if not self._blocker_parents.child_nodes(pkg):
6770 self._blocker_parents.remove(pkg)
6772 for parent in self._blocker_parents.parent_nodes(blocker):
6773 unresolved_blocks = False
6774 depends_on_order = set()
6775 for pkg in blocked_initial:
6776 if pkg.slot_atom == parent.slot_atom:
6777 # TODO: Support blocks within slots in cases where it
6778 # might make sense. For example, a new version might
6779 # require that the old version be uninstalled at build
6782 if parent.installed:
6783 # Two currently installed packages conflict with
6784 # eachother. Ignore this case since the damage
6785 # is already done and this would be likely to
6786 # confuse users if displayed like a normal blocker.
6789 self._blocked_pkgs.add(pkg, blocker)
6791 if parent.operation == "merge":
6792 # Maybe the blocked package can be replaced or simply
6793 # unmerged to resolve this block.
6794 depends_on_order.add((pkg, parent))
6796 # None of the above blocker resolutions techniques apply,
6797 # so apparently this one is unresolvable.
6798 unresolved_blocks = True
6799 for pkg in blocked_final:
6800 if pkg.slot_atom == parent.slot_atom:
6801 # TODO: Support blocks within slots.
6803 if parent.operation == "nomerge" and \
6804 pkg.operation == "nomerge":
6805 # This blocker will be handled the next time that a
6806 # merge of either package is triggered.
6809 self._blocked_pkgs.add(pkg, blocker)
6811 # Maybe the blocking package can be
6812 # unmerged to resolve this block.
6813 if parent.operation == "merge" and pkg.installed:
6814 depends_on_order.add((pkg, parent))
6816 elif parent.operation == "nomerge":
6817 depends_on_order.add((parent, pkg))
6819 # None of the above blocker resolutions techniques apply,
6820 # so apparently this one is unresolvable.
6821 unresolved_blocks = True
6823 # Make sure we don't unmerge any package that have been pulled
6825 if not unresolved_blocks and depends_on_order:
6826 for inst_pkg, inst_task in depends_on_order:
6827 if self.digraph.contains(inst_pkg) and \
6828 self.digraph.parent_nodes(inst_pkg):
6829 unresolved_blocks = True
6832 if not unresolved_blocks and depends_on_order:
6833 for inst_pkg, inst_task in depends_on_order:
6834 uninst_task = Package(built=inst_pkg.built,
6835 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6836 metadata=inst_pkg.metadata,
6837 operation="uninstall",
6838 root_config=inst_pkg.root_config,
6839 type_name=inst_pkg.type_name)
6840 self._pkg_cache[uninst_task] = uninst_task
6841 # Enforce correct merge order with a hard dep.
6842 self.digraph.addnode(uninst_task, inst_task,
6843 priority=BlockerDepPriority.instance)
6844 # Count references to this blocker so that it can be
6845 # invalidated after nodes referencing it have been
6847 self._blocker_uninstalls.addnode(uninst_task, blocker)
6848 if not unresolved_blocks and not depends_on_order:
6849 self._irrelevant_blockers.add(blocker, parent)
6850 self._blocker_parents.remove_edge(blocker, parent)
6851 if not self._blocker_parents.parent_nodes(blocker):
6852 self._blocker_parents.remove(blocker)
6853 if not self._blocker_parents.child_nodes(parent):
6854 self._blocker_parents.remove(parent)
6855 if unresolved_blocks:
6856 self._unsolvable_blockers.add(blocker, parent)
6860 def _accept_blocker_conflicts(self):
6862 for x in ("--buildpkgonly", "--fetchonly",
6863 "--fetch-all-uri", "--nodeps"):
6864 if x in self.myopts:
6869 def _merge_order_bias(self, mygraph):
6871 For optimal leaf node selection, promote deep system runtime deps and
6872 order nodes from highest to lowest overall reference count.
6876 for node in mygraph.order:
6877 node_info[node] = len(mygraph.parent_nodes(node))
6878 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6880 def cmp_merge_preference(node1, node2):
6882 if node1.operation == 'uninstall':
6883 if node2.operation == 'uninstall':
6887 if node2.operation == 'uninstall':
6888 if node1.operation == 'uninstall':
6892 node1_sys = node1 in deep_system_deps
6893 node2_sys = node2 in deep_system_deps
6894 if node1_sys != node2_sys:
6899 return node_info[node2] - node_info[node1]
6901 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6903 def altlist(self, reversed=False):
6905 while self._serialized_tasks_cache is None:
6906 self._resolve_conflicts()
6908 self._serialized_tasks_cache, self._scheduler_graph = \
6909 self._serialize_tasks()
6910 except self._serialize_tasks_retry:
6913 retlist = self._serialized_tasks_cache[:]
6918 def schedulerGraph(self):
6920 The scheduler graph is identical to the normal one except that
6921 uninstall edges are reversed in specific cases that require
6922 conflicting packages to be temporarily installed simultaneously.
6923 This is intended for use by the Scheduler in it's parallelization
6924 logic. It ensures that temporary simultaneous installation of
6925 conflicting packages is avoided when appropriate (especially for
6926 !!atom blockers), but allowed in specific cases that require it.
6928 Note that this method calls break_refs() which alters the state of
6929 internal Package instances such that this depgraph instance should
6930 not be used to perform any more calculations.
6932 if self._scheduler_graph is None:
6934 self.break_refs(self._scheduler_graph.order)
6935 return self._scheduler_graph
6937 def break_refs(self, nodes):
6939 Take a mergelist like that returned from self.altlist() and
6940 break any references that lead back to the depgraph. This is
6941 useful if you want to hold references to packages without
6942 also holding the depgraph on the heap.
6945 if hasattr(node, "root_config"):
6946 # The FakeVartree references the _package_cache which
6947 # references the depgraph. So that Package instances don't
6948 # hold the depgraph and FakeVartree on the heap, replace
6949 # the RootConfig that references the FakeVartree with the
6950 # original RootConfig instance which references the actual
6952 node.root_config = \
6953 self._trees_orig[node.root_config.root]["root_config"]
6955 def _resolve_conflicts(self):
6956 if not self._complete_graph():
6957 raise self._unknown_internal_error()
6959 if not self.validate_blockers():
6960 raise self._unknown_internal_error()
6962 if self._slot_collision_info:
6963 self._process_slot_conflicts()
6965 def _serialize_tasks(self):
6967 if "--debug" in self.myopts:
6968 writemsg("\ndigraph:\n\n", noiselevel=-1)
6969 self.digraph.debug_print()
6970 writemsg("\n", noiselevel=-1)
6972 scheduler_graph = self.digraph.copy()
6973 mygraph=self.digraph.copy()
6974 # Prune "nomerge" root nodes if nothing depends on them, since
6975 # otherwise they slow down merge order calculation. Don't remove
6976 # non-root nodes since they help optimize merge order in some cases
6977 # such as revdep-rebuild.
6978 removed_nodes = set()
6980 for node in mygraph.root_nodes():
6981 if not isinstance(node, Package) or \
6982 node.installed or node.onlydeps:
6983 removed_nodes.add(node)
6985 self.spinner.update()
6986 mygraph.difference_update(removed_nodes)
6987 if not removed_nodes:
6989 removed_nodes.clear()
6990 self._merge_order_bias(mygraph)
6991 def cmp_circular_bias(n1, n2):
6993 RDEPEND is stronger than PDEPEND and this function
6994 measures such a strength bias within a circular
6995 dependency relationship.
6997 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6998 ignore_priority=priority_range.ignore_medium_soft)
6999 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7000 ignore_priority=priority_range.ignore_medium_soft)
7001 if n1_n2_medium == n2_n1_medium:
7006 myblocker_uninstalls = self._blocker_uninstalls.copy()
7008 # Contains uninstall tasks that have been scheduled to
7009 # occur after overlapping blockers have been installed.
7010 scheduled_uninstalls = set()
7011 # Contains any Uninstall tasks that have been ignored
7012 # in order to avoid the circular deps code path. These
7013 # correspond to blocker conflicts that could not be
7015 ignored_uninstall_tasks = set()
7016 have_uninstall_task = False
7017 complete = "complete" in self.myparams
7020 def get_nodes(**kwargs):
7022 Returns leaf nodes excluding Uninstall instances
7023 since those should be executed as late as possible.
7025 return [node for node in mygraph.leaf_nodes(**kwargs) \
7026 if isinstance(node, Package) and \
7027 (node.operation != "uninstall" or \
7028 node in scheduled_uninstalls)]
7030 # sys-apps/portage needs special treatment if ROOT="/"
7031 running_root = self._running_root.root
7032 from portage.const import PORTAGE_PACKAGE_ATOM
7033 runtime_deps = InternalPackageSet(
7034 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7035 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7036 PORTAGE_PACKAGE_ATOM)
7037 replacement_portage = self.mydbapi[running_root].match_pkgs(
7038 PORTAGE_PACKAGE_ATOM)
7041 running_portage = running_portage[0]
7043 running_portage = None
7045 if replacement_portage:
7046 replacement_portage = replacement_portage[0]
7048 replacement_portage = None
7050 if replacement_portage == running_portage:
7051 replacement_portage = None
7053 if replacement_portage is not None:
7054 # update from running_portage to replacement_portage asap
7055 asap_nodes.append(replacement_portage)
7057 if running_portage is not None:
7059 portage_rdepend = self._select_atoms_highest_available(
7060 running_root, running_portage.metadata["RDEPEND"],
7061 myuse=running_portage.use.enabled,
7062 parent=running_portage, strict=False)
7063 except portage.exception.InvalidDependString, e:
7064 portage.writemsg("!!! Invalid RDEPEND in " + \
7065 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7066 (running_root, running_portage.cpv, e), noiselevel=-1)
7068 portage_rdepend = []
7069 runtime_deps.update(atom for atom in portage_rdepend \
7070 if not atom.startswith("!"))
7072 def gather_deps(ignore_priority, mergeable_nodes,
7073 selected_nodes, node):
7075 Recursively gather a group of nodes that RDEPEND on
7076 eachother. This ensures that they are merged as a group
7077 and get their RDEPENDs satisfied as soon as possible.
7079 if node in selected_nodes:
7081 if node not in mergeable_nodes:
7083 if node == replacement_portage and \
7084 mygraph.child_nodes(node,
7085 ignore_priority=priority_range.ignore_medium_soft):
7086 # Make sure that portage always has all of it's
7087 # RDEPENDs installed first.
7089 selected_nodes.add(node)
7090 for child in mygraph.child_nodes(node,
7091 ignore_priority=ignore_priority):
7092 if not gather_deps(ignore_priority,
7093 mergeable_nodes, selected_nodes, child):
7097 def ignore_uninst_or_med(priority):
7098 if priority is BlockerDepPriority.instance:
7100 return priority_range.ignore_medium(priority)
7102 def ignore_uninst_or_med_soft(priority):
7103 if priority is BlockerDepPriority.instance:
7105 return priority_range.ignore_medium_soft(priority)
7107 tree_mode = "--tree" in self.myopts
7108 # Tracks whether or not the current iteration should prefer asap_nodes
7109 # if available. This is set to False when the previous iteration
7110 # failed to select any nodes. It is reset whenever nodes are
7111 # successfully selected.
7114 # Controls whether or not the current iteration should drop edges that
7115 # are "satisfied" by installed packages, in order to solve circular
7116 # dependencies. The deep runtime dependencies of installed packages are
7117 # not checked in this case (bug #199856), so it must be avoided
7118 # whenever possible.
7119 drop_satisfied = False
7121 # State of variables for successive iterations that loosen the
7122 # criteria for node selection.
7124 # iteration prefer_asap drop_satisfied
7129 # If no nodes are selected on the last iteration, it is due to
7130 # unresolved blockers or circular dependencies.
7132 while not mygraph.empty():
7133 self.spinner.update()
7134 selected_nodes = None
7135 ignore_priority = None
7136 if drop_satisfied or (prefer_asap and asap_nodes):
7137 priority_range = DepPrioritySatisfiedRange
7139 priority_range = DepPriorityNormalRange
7140 if prefer_asap and asap_nodes:
7141 # ASAP nodes are merged before their soft deps. Go ahead and
7142 # select root nodes here if necessary, since it's typical for
7143 # the parent to have been removed from the graph already.
7144 asap_nodes = [node for node in asap_nodes \
7145 if mygraph.contains(node)]
7146 for node in asap_nodes:
7147 if not mygraph.child_nodes(node,
7148 ignore_priority=priority_range.ignore_soft):
7149 selected_nodes = [node]
7150 asap_nodes.remove(node)
7152 if not selected_nodes and \
7153 not (prefer_asap and asap_nodes):
7154 for i in xrange(priority_range.NONE,
7155 priority_range.MEDIUM_SOFT + 1):
7156 ignore_priority = priority_range.ignore_priority[i]
7157 nodes = get_nodes(ignore_priority=ignore_priority)
7159 # If there is a mix of uninstall nodes with other
7160 # types, save the uninstall nodes for later since
7161 # sometimes a merge node will render an uninstall
7162 # node unnecessary (due to occupying the same slot),
7163 # and we want to avoid executing a separate uninstall
7164 # task in that case.
7166 good_uninstalls = []
7167 with_some_uninstalls_excluded = []
7169 if node.operation == "uninstall":
7170 slot_node = self.mydbapi[node.root
7171 ].match_pkgs(node.slot_atom)
7173 slot_node[0].operation == "merge":
7175 good_uninstalls.append(node)
7176 with_some_uninstalls_excluded.append(node)
7178 nodes = good_uninstalls
7179 elif with_some_uninstalls_excluded:
7180 nodes = with_some_uninstalls_excluded
7184 if ignore_priority is None and not tree_mode:
7185 # Greedily pop all of these nodes since no
7186 # relationship has been ignored. This optimization
7187 # destroys --tree output, so it's disabled in tree
7189 selected_nodes = nodes
7191 # For optimal merge order:
7192 # * Only pop one node.
7193 # * Removing a root node (node without a parent)
7194 # will not produce a leaf node, so avoid it.
7195 # * It's normal for a selected uninstall to be a
7196 # root node, so don't check them for parents.
7198 if node.operation == "uninstall" or \
7199 mygraph.parent_nodes(node):
7200 selected_nodes = [node]
7206 if not selected_nodes:
7207 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7209 mergeable_nodes = set(nodes)
7210 if prefer_asap and asap_nodes:
7212 for i in xrange(priority_range.SOFT,
7213 priority_range.MEDIUM_SOFT + 1):
7214 ignore_priority = priority_range.ignore_priority[i]
7216 if not mygraph.parent_nodes(node):
7218 selected_nodes = set()
7219 if gather_deps(ignore_priority,
7220 mergeable_nodes, selected_nodes, node):
7223 selected_nodes = None
7227 if prefer_asap and asap_nodes and not selected_nodes:
7228 # We failed to find any asap nodes to merge, so ignore
7229 # them for the next iteration.
7233 if selected_nodes and ignore_priority is not None:
7234 # Try to merge ignored medium_soft deps as soon as possible
7235 # if they're not satisfied by installed packages.
7236 for node in selected_nodes:
7237 children = set(mygraph.child_nodes(node))
7238 soft = children.difference(
7239 mygraph.child_nodes(node,
7240 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7241 medium_soft = children.difference(
7242 mygraph.child_nodes(node,
7244 DepPrioritySatisfiedRange.ignore_medium_soft))
7245 medium_soft.difference_update(soft)
7246 for child in medium_soft:
7247 if child in selected_nodes:
7249 if child in asap_nodes:
7251 asap_nodes.append(child)
7253 if selected_nodes and len(selected_nodes) > 1:
7254 if not isinstance(selected_nodes, list):
7255 selected_nodes = list(selected_nodes)
7256 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7258 if not selected_nodes and not myblocker_uninstalls.is_empty():
7259 # An Uninstall task needs to be executed in order to
7260 # avoid conflict if possible.
7263 priority_range = DepPrioritySatisfiedRange
7265 priority_range = DepPriorityNormalRange
7267 mergeable_nodes = get_nodes(
7268 ignore_priority=ignore_uninst_or_med)
7270 min_parent_deps = None
7272 for task in myblocker_uninstalls.leaf_nodes():
7273 # Do some sanity checks so that system or world packages
7274 # don't get uninstalled inappropriately here (only really
7275 # necessary when --complete-graph has not been enabled).
7277 if task in ignored_uninstall_tasks:
7280 if task in scheduled_uninstalls:
7281 # It's been scheduled but it hasn't
7282 # been executed yet due to dependence
7283 # on installation of blocking packages.
7286 root_config = self.roots[task.root]
7287 inst_pkg = self._pkg_cache[
7288 ("installed", task.root, task.cpv, "nomerge")]
7290 if self.digraph.contains(inst_pkg):
7293 forbid_overlap = False
7294 heuristic_overlap = False
7295 for blocker in myblocker_uninstalls.parent_nodes(task):
7296 if blocker.eapi in ("0", "1"):
7297 heuristic_overlap = True
7298 elif blocker.atom.blocker.overlap.forbid:
7299 forbid_overlap = True
7301 if forbid_overlap and running_root == task.root:
7304 if heuristic_overlap and running_root == task.root:
7305 # Never uninstall sys-apps/portage or it's essential
7306 # dependencies, except through replacement.
7308 runtime_dep_atoms = \
7309 list(runtime_deps.iterAtomsForPackage(task))
7310 except portage.exception.InvalidDependString, e:
7311 portage.writemsg("!!! Invalid PROVIDE in " + \
7312 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7313 (task.root, task.cpv, e), noiselevel=-1)
7317 # Don't uninstall a runtime dep if it appears
7318 # to be the only suitable one installed.
7320 vardb = root_config.trees["vartree"].dbapi
7321 for atom in runtime_dep_atoms:
7322 other_version = None
7323 for pkg in vardb.match_pkgs(atom):
7324 if pkg.cpv == task.cpv and \
7325 pkg.metadata["COUNTER"] == \
7326 task.metadata["COUNTER"]:
7330 if other_version is None:
7336 # For packages in the system set, don't take
7337 # any chances. If the conflict can't be resolved
7338 # by a normal replacement operation then abort.
7341 for atom in root_config.sets[
7342 "system"].iterAtomsForPackage(task):
7345 except portage.exception.InvalidDependString, e:
7346 portage.writemsg("!!! Invalid PROVIDE in " + \
7347 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7348 (task.root, task.cpv, e), noiselevel=-1)
7354 # Note that the world check isn't always
7355 # necessary since self._complete_graph() will
7356 # add all packages from the system and world sets to the
7357 # graph. This just allows unresolved conflicts to be
7358 # detected as early as possible, which makes it possible
7359 # to avoid calling self._complete_graph() when it is
7360 # unnecessary due to blockers triggering an abortion.
7362 # For packages in the world set, go ahead an uninstall
7363 # when necessary, as long as the atom will be satisfied
7364 # in the final state.
7365 graph_db = self.mydbapi[task.root]
7368 for atom in root_config.sets[
7369 "world"].iterAtomsForPackage(task):
7371 for pkg in graph_db.match_pkgs(atom):
7378 self._blocked_world_pkgs[inst_pkg] = atom
7380 except portage.exception.InvalidDependString, e:
7381 portage.writemsg("!!! Invalid PROVIDE in " + \
7382 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7383 (task.root, task.cpv, e), noiselevel=-1)
7389 # Check the deps of parent nodes to ensure that
7390 # the chosen task produces a leaf node. Maybe
7391 # this can be optimized some more to make the
7392 # best possible choice, but the current algorithm
7393 # is simple and should be near optimal for most
7395 mergeable_parent = False
7397 for parent in mygraph.parent_nodes(task):
7398 parent_deps.update(mygraph.child_nodes(parent,
7399 ignore_priority=priority_range.ignore_medium_soft))
7400 if parent in mergeable_nodes and \
7401 gather_deps(ignore_uninst_or_med_soft,
7402 mergeable_nodes, set(), parent):
7403 mergeable_parent = True
7405 if not mergeable_parent:
7408 parent_deps.remove(task)
7409 if min_parent_deps is None or \
7410 len(parent_deps) < min_parent_deps:
7411 min_parent_deps = len(parent_deps)
7414 if uninst_task is not None:
7415 # The uninstall is performed only after blocking
7416 # packages have been merged on top of it. File
7417 # collisions between blocking packages are detected
7418 # and removed from the list of files to be uninstalled.
7419 scheduled_uninstalls.add(uninst_task)
7420 parent_nodes = mygraph.parent_nodes(uninst_task)
7422 # Reverse the parent -> uninstall edges since we want
7423 # to do the uninstall after blocking packages have
7424 # been merged on top of it.
7425 mygraph.remove(uninst_task)
7426 for blocked_pkg in parent_nodes:
7427 mygraph.add(blocked_pkg, uninst_task,
7428 priority=BlockerDepPriority.instance)
7429 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7430 scheduler_graph.add(blocked_pkg, uninst_task,
7431 priority=BlockerDepPriority.instance)
7433 # Reset the state variables for leaf node selection and
7434 # continue trying to select leaf nodes.
7436 drop_satisfied = False
7439 if not selected_nodes:
7440 # Only select root nodes as a last resort. This case should
7441 # only trigger when the graph is nearly empty and the only
7442 # remaining nodes are isolated (no parents or children). Since
7443 # the nodes must be isolated, ignore_priority is not needed.
7444 selected_nodes = get_nodes()
7446 if not selected_nodes and not drop_satisfied:
7447 drop_satisfied = True
7450 if not selected_nodes and not myblocker_uninstalls.is_empty():
7451 # If possible, drop an uninstall task here in order to avoid
7452 # the circular deps code path. The corresponding blocker will
7453 # still be counted as an unresolved conflict.
7455 for node in myblocker_uninstalls.leaf_nodes():
7457 mygraph.remove(node)
7462 ignored_uninstall_tasks.add(node)
7465 if uninst_task is not None:
7466 # Reset the state variables for leaf node selection and
7467 # continue trying to select leaf nodes.
7469 drop_satisfied = False
7472 if not selected_nodes:
7473 self._circular_deps_for_display = mygraph
7474 raise self._unknown_internal_error()
7476 # At this point, we've succeeded in selecting one or more nodes, so
7477 # reset state variables for leaf node selection.
7479 drop_satisfied = False
7481 mygraph.difference_update(selected_nodes)
7483 for node in selected_nodes:
7484 if isinstance(node, Package) and \
7485 node.operation == "nomerge":
7488 # Handle interactions between blockers
7489 # and uninstallation tasks.
7490 solved_blockers = set()
7492 if isinstance(node, Package) and \
7493 "uninstall" == node.operation:
7494 have_uninstall_task = True
7497 vardb = self.trees[node.root]["vartree"].dbapi
7498 previous_cpv = vardb.match(node.slot_atom)
7500 # The package will be replaced by this one, so remove
7501 # the corresponding Uninstall task if necessary.
7502 previous_cpv = previous_cpv[0]
7504 ("installed", node.root, previous_cpv, "uninstall")
7506 mygraph.remove(uninst_task)
7510 if uninst_task is not None and \
7511 uninst_task not in ignored_uninstall_tasks and \
7512 myblocker_uninstalls.contains(uninst_task):
7513 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7514 myblocker_uninstalls.remove(uninst_task)
7515 # Discard any blockers that this Uninstall solves.
7516 for blocker in blocker_nodes:
7517 if not myblocker_uninstalls.child_nodes(blocker):
7518 myblocker_uninstalls.remove(blocker)
7519 solved_blockers.add(blocker)
7521 retlist.append(node)
7523 if (isinstance(node, Package) and \
7524 "uninstall" == node.operation) or \
7525 (uninst_task is not None and \
7526 uninst_task in scheduled_uninstalls):
7527 # Include satisfied blockers in the merge list
7528 # since the user might be interested and also
7529 # it serves as an indicator that blocking packages
7530 # will be temporarily installed simultaneously.
7531 for blocker in solved_blockers:
7532 retlist.append(Blocker(atom=blocker.atom,
7533 root=blocker.root, eapi=blocker.eapi,
7536 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7537 for node in myblocker_uninstalls.root_nodes():
7538 unsolvable_blockers.add(node)
7540 for blocker in unsolvable_blockers:
7541 retlist.append(blocker)
7543 # If any Uninstall tasks need to be executed in order
7544 # to avoid a conflict, complete the graph with any
7545 # dependencies that may have been initially
7546 # neglected (to ensure that unsafe Uninstall tasks
7547 # are properly identified and blocked from execution).
7548 if have_uninstall_task and \
7550 not unsolvable_blockers:
7551 self.myparams.add("complete")
7552 raise self._serialize_tasks_retry("")
7554 if unsolvable_blockers and \
7555 not self._accept_blocker_conflicts():
7556 self._unsatisfied_blockers_for_display = unsolvable_blockers
7557 self._serialized_tasks_cache = retlist[:]
7558 self._scheduler_graph = scheduler_graph
7559 raise self._unknown_internal_error()
7561 if self._slot_collision_info and \
7562 not self._accept_blocker_conflicts():
7563 self._serialized_tasks_cache = retlist[:]
7564 self._scheduler_graph = scheduler_graph
7565 raise self._unknown_internal_error()
7567 return retlist, scheduler_graph
7569 def _show_circular_deps(self, mygraph):
7570 # No leaf nodes are available, so we have a circular
7571 # dependency panic situation. Reduce the noise level to a
7572 # minimum via repeated elimination of root nodes since they
7573 # have no parents and thus can not be part of a cycle.
7575 root_nodes = mygraph.root_nodes(
7576 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7579 mygraph.difference_update(root_nodes)
7580 # Display the USE flags that are enabled on nodes that are part
7581 # of dependency cycles in case that helps the user decide to
7582 # disable some of them.
7584 tempgraph = mygraph.copy()
7585 while not tempgraph.empty():
7586 nodes = tempgraph.leaf_nodes()
7588 node = tempgraph.order[0]
7591 display_order.append(node)
7592 tempgraph.remove(node)
7593 display_order.reverse()
7594 self.myopts.pop("--quiet", None)
7595 self.myopts.pop("--verbose", None)
7596 self.myopts["--tree"] = True
7597 portage.writemsg("\n\n", noiselevel=-1)
7598 self.display(display_order)
7599 prefix = colorize("BAD", " * ")
7600 portage.writemsg("\n", noiselevel=-1)
7601 portage.writemsg(prefix + "Error: circular dependencies:\n",
7603 portage.writemsg("\n", noiselevel=-1)
7604 mygraph.debug_print()
7605 portage.writemsg("\n", noiselevel=-1)
7606 portage.writemsg(prefix + "Note that circular dependencies " + \
7607 "can often be avoided by temporarily\n", noiselevel=-1)
7608 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7609 "optional dependencies.\n", noiselevel=-1)
7611 def _show_merge_list(self):
7612 if self._serialized_tasks_cache is not None and \
7613 not (self._displayed_list and \
7614 (self._displayed_list == self._serialized_tasks_cache or \
7615 self._displayed_list == \
7616 list(reversed(self._serialized_tasks_cache)))):
7617 display_list = self._serialized_tasks_cache[:]
7618 if "--tree" in self.myopts:
7619 display_list.reverse()
7620 self.display(display_list)
7622 def _show_unsatisfied_blockers(self, blockers):
7623 self._show_merge_list()
7624 msg = "Error: The above package list contains " + \
7625 "packages which cannot be installed " + \
7626 "at the same time on the same system."
7627 prefix = colorize("BAD", " * ")
7628 from textwrap import wrap
7629 portage.writemsg("\n", noiselevel=-1)
7630 for line in wrap(msg, 70):
7631 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7633 # Display the conflicting packages along with the packages
7634 # that pulled them in. This is helpful for troubleshooting
7635 # cases in which blockers don't solve automatically and
7636 # the reasons are not apparent from the normal merge list
7640 for blocker in blockers:
7641 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7642 self._blocker_parents.parent_nodes(blocker)):
7643 parent_atoms = self._parent_atoms.get(pkg)
7644 if not parent_atoms:
7645 atom = self._blocked_world_pkgs.get(pkg)
7646 if atom is not None:
7647 parent_atoms = set([("@world", atom)])
7649 conflict_pkgs[pkg] = parent_atoms
7652 # Reduce noise by pruning packages that are only
7653 # pulled in by other conflict packages.
7655 for pkg, parent_atoms in conflict_pkgs.iteritems():
7656 relevant_parent = False
7657 for parent, atom in parent_atoms:
7658 if parent not in conflict_pkgs:
7659 relevant_parent = True
7661 if not relevant_parent:
7662 pruned_pkgs.add(pkg)
7663 for pkg in pruned_pkgs:
7664 del conflict_pkgs[pkg]
7670 # Max number of parents shown, to avoid flooding the display.
7672 for pkg, parent_atoms in conflict_pkgs.iteritems():
7676 # Prefer packages that are not directly involved in a conflict.
7677 for parent_atom in parent_atoms:
7678 if len(pruned_list) >= max_parents:
7680 parent, atom = parent_atom
7681 if parent not in conflict_pkgs:
7682 pruned_list.add(parent_atom)
7684 for parent_atom in parent_atoms:
7685 if len(pruned_list) >= max_parents:
7687 pruned_list.add(parent_atom)
7689 omitted_parents = len(parent_atoms) - len(pruned_list)
7690 msg.append(indent + "%s pulled in by\n" % pkg)
7692 for parent_atom in pruned_list:
7693 parent, atom = parent_atom
7694 msg.append(2*indent)
7695 if isinstance(parent,
7696 (PackageArg, AtomArg)):
7697 # For PackageArg and AtomArg types, it's
7698 # redundant to display the atom attribute.
7699 msg.append(str(parent))
7701 # Display the specific atom from SetArg or
7703 msg.append("%s required by %s" % (atom, parent))
7707 msg.append(2*indent)
7708 msg.append("(and %d more)\n" % omitted_parents)
7712 sys.stderr.write("".join(msg))
7715 if "--quiet" not in self.myopts:
7716 show_blocker_docs_link()
7718 def display(self, mylist, favorites=[], verbosity=None):
7720 # This is used to prevent display_problems() from
7721 # redundantly displaying this exact same merge list
7722 # again via _show_merge_list().
7723 self._displayed_list = mylist
7725 if verbosity is None:
7726 verbosity = ("--quiet" in self.myopts and 1 or \
7727 "--verbose" in self.myopts and 3 or 2)
7728 favorites_set = InternalPackageSet(favorites)
7729 oneshot = "--oneshot" in self.myopts or \
7730 "--onlydeps" in self.myopts
7731 columns = "--columns" in self.myopts
7736 counters = PackageCounters()
7738 if verbosity == 1 and "--verbose" not in self.myopts:
7739 def create_use_string(*args):
7742 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7744 is_new, reinst_flags,
7745 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7746 alphabetical=("--alphabetical" in self.myopts)):
7754 cur_iuse = set(cur_iuse)
7755 enabled_flags = cur_iuse.intersection(cur_use)
7756 removed_iuse = set(old_iuse).difference(cur_iuse)
7757 any_iuse = cur_iuse.union(old_iuse)
7758 any_iuse = list(any_iuse)
7760 for flag in any_iuse:
7763 reinst_flag = reinst_flags and flag in reinst_flags
7764 if flag in enabled_flags:
7766 if is_new or flag in old_use and \
7767 (all_flags or reinst_flag):
7768 flag_str = red(flag)
7769 elif flag not in old_iuse:
7770 flag_str = yellow(flag) + "%*"
7771 elif flag not in old_use:
7772 flag_str = green(flag) + "*"
7773 elif flag in removed_iuse:
7774 if all_flags or reinst_flag:
7775 flag_str = yellow("-" + flag) + "%"
7778 flag_str = "(" + flag_str + ")"
7779 removed.append(flag_str)
7782 if is_new or flag in old_iuse and \
7783 flag not in old_use and \
7784 (all_flags or reinst_flag):
7785 flag_str = blue("-" + flag)
7786 elif flag not in old_iuse:
7787 flag_str = yellow("-" + flag)
7788 if flag not in iuse_forced:
7790 elif flag in old_use:
7791 flag_str = green("-" + flag) + "*"
7793 if flag in iuse_forced:
7794 flag_str = "(" + flag_str + ")"
7796 enabled.append(flag_str)
7798 disabled.append(flag_str)
7801 ret = " ".join(enabled)
7803 ret = " ".join(enabled + disabled + removed)
7805 ret = '%s="%s" ' % (name, ret)
7808 repo_display = RepoDisplay(self.roots)
7812 mygraph = self.digraph.copy()
7814 # If there are any Uninstall instances, add the corresponding
7815 # blockers to the digraph (useful for --tree display).
7817 executed_uninstalls = set(node for node in mylist \
7818 if isinstance(node, Package) and node.operation == "unmerge")
7820 for uninstall in self._blocker_uninstalls.leaf_nodes():
7821 uninstall_parents = \
7822 self._blocker_uninstalls.parent_nodes(uninstall)
7823 if not uninstall_parents:
7826 # Remove the corresponding "nomerge" node and substitute
7827 # the Uninstall node.
7828 inst_pkg = self._pkg_cache[
7829 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7831 mygraph.remove(inst_pkg)
7836 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7838 inst_pkg_blockers = []
7840 # Break the Package -> Uninstall edges.
7841 mygraph.remove(uninstall)
7843 # Resolution of a package's blockers
7844 # depend on it's own uninstallation.
7845 for blocker in inst_pkg_blockers:
7846 mygraph.add(uninstall, blocker)
7848 # Expand Package -> Uninstall edges into
7849 # Package -> Blocker -> Uninstall edges.
7850 for blocker in uninstall_parents:
7851 mygraph.add(uninstall, blocker)
7852 for parent in self._blocker_parents.parent_nodes(blocker):
7853 if parent != inst_pkg:
7854 mygraph.add(blocker, parent)
7856 # If the uninstall task did not need to be executed because
7857 # of an upgrade, display Blocker -> Upgrade edges since the
7858 # corresponding Blocker -> Uninstall edges will not be shown.
7860 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7861 if upgrade_node is not None and \
7862 uninstall not in executed_uninstalls:
7863 for blocker in uninstall_parents:
7864 mygraph.add(upgrade_node, blocker)
7866 unsatisfied_blockers = []
7871 if isinstance(x, Blocker) and not x.satisfied:
7872 unsatisfied_blockers.append(x)
7875 if "--tree" in self.myopts:
7876 depth = len(tree_nodes)
7877 while depth and graph_key not in \
7878 mygraph.child_nodes(tree_nodes[depth-1]):
7881 tree_nodes = tree_nodes[:depth]
7882 tree_nodes.append(graph_key)
7883 display_list.append((x, depth, True))
7884 shown_edges.add((graph_key, tree_nodes[depth-1]))
7886 traversed_nodes = set() # prevent endless circles
7887 traversed_nodes.add(graph_key)
7888 def add_parents(current_node, ordered):
7890 # Do not traverse to parents if this node is an
7891 # an argument or a direct member of a set that has
7892 # been specified as an argument (system or world).
7893 if current_node not in self._set_nodes:
7894 parent_nodes = mygraph.parent_nodes(current_node)
7896 child_nodes = set(mygraph.child_nodes(current_node))
7897 selected_parent = None
7898 # First, try to avoid a direct cycle.
7899 for node in parent_nodes:
7900 if not isinstance(node, (Blocker, Package)):
7902 if node not in traversed_nodes and \
7903 node not in child_nodes:
7904 edge = (current_node, node)
7905 if edge in shown_edges:
7907 selected_parent = node
7909 if not selected_parent:
7910 # A direct cycle is unavoidable.
7911 for node in parent_nodes:
7912 if not isinstance(node, (Blocker, Package)):
7914 if node not in traversed_nodes:
7915 edge = (current_node, node)
7916 if edge in shown_edges:
7918 selected_parent = node
7921 shown_edges.add((current_node, selected_parent))
7922 traversed_nodes.add(selected_parent)
7923 add_parents(selected_parent, False)
7924 display_list.append((current_node,
7925 len(tree_nodes), ordered))
7926 tree_nodes.append(current_node)
7928 add_parents(graph_key, True)
7930 display_list.append((x, depth, True))
7931 mylist = display_list
7932 for x in unsatisfied_blockers:
7933 mylist.append((x, 0, True))
7935 last_merge_depth = 0
7936 for i in xrange(len(mylist)-1,-1,-1):
7937 graph_key, depth, ordered = mylist[i]
7938 if not ordered and depth == 0 and i > 0 \
7939 and graph_key == mylist[i-1][0] and \
7940 mylist[i-1][1] == 0:
7941 # An ordered node got a consecutive duplicate when the tree was
7945 if ordered and graph_key[-1] != "nomerge":
7946 last_merge_depth = depth
7948 if depth >= last_merge_depth or \
7949 i < len(mylist) - 1 and \
7950 depth >= mylist[i+1][1]:
7953 from portage import flatten
7954 from portage.dep import use_reduce, paren_reduce
7955 # files to fetch list - avoids counting a same file twice
7956 # in size display (verbose mode)
7959 # Use this set to detect when all the "repoadd" strings are "[0]"
7960 # and disable the entire repo display in this case.
7963 for mylist_index in xrange(len(mylist)):
7964 x, depth, ordered = mylist[mylist_index]
7968 portdb = self.trees[myroot]["porttree"].dbapi
7969 bindb = self.trees[myroot]["bintree"].dbapi
7970 vardb = self.trees[myroot]["vartree"].dbapi
7971 vartree = self.trees[myroot]["vartree"]
7972 pkgsettings = self.pkgsettings[myroot]
7975 indent = " " * depth
7977 if isinstance(x, Blocker):
7979 blocker_style = "PKG_BLOCKER_SATISFIED"
7980 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7982 blocker_style = "PKG_BLOCKER"
7983 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7985 counters.blocks += 1
7987 counters.blocks_satisfied += 1
7988 resolved = portage.key_expand(
7989 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7990 if "--columns" in self.myopts and "--quiet" in self.myopts:
7991 addl += " " + colorize(blocker_style, resolved)
7993 addl = "[%s %s] %s%s" % \
7994 (colorize(blocker_style, "blocks"),
7995 addl, indent, colorize(blocker_style, resolved))
7996 block_parents = self._blocker_parents.parent_nodes(x)
7997 block_parents = set([pnode[2] for pnode in block_parents])
7998 block_parents = ", ".join(block_parents)
8000 addl += colorize(blocker_style,
8001 " (\"%s\" is blocking %s)") % \
8002 (str(x.atom).lstrip("!"), block_parents)
8004 addl += colorize(blocker_style,
8005 " (is blocking %s)") % block_parents
8006 if isinstance(x, Blocker) and x.satisfied:
8011 blockers.append(addl)
8014 pkg_merge = ordered and pkg_status == "merge"
8015 if not pkg_merge and pkg_status == "merge":
8016 pkg_status = "nomerge"
8017 built = pkg_type != "ebuild"
8018 installed = pkg_type == "installed"
8020 metadata = pkg.metadata
8022 repo_name = metadata["repository"]
8023 if pkg_type == "ebuild":
8024 ebuild_path = portdb.findname(pkg_key)
8025 if not ebuild_path: # shouldn't happen
8026 raise portage.exception.PackageNotFound(pkg_key)
8027 repo_path_real = os.path.dirname(os.path.dirname(
8028 os.path.dirname(ebuild_path)))
8030 repo_path_real = portdb.getRepositoryPath(repo_name)
8031 pkg_use = list(pkg.use.enabled)
8033 restrict = flatten(use_reduce(paren_reduce(
8034 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8035 except portage.exception.InvalidDependString, e:
8036 if not pkg.installed:
8037 show_invalid_depstring_notice(x,
8038 pkg.metadata["RESTRICT"], str(e))
8042 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8043 "fetch" in restrict:
8046 counters.restrict_fetch += 1
8047 if portdb.fetch_check(pkg_key, pkg_use):
8050 counters.restrict_fetch_satisfied += 1
8052 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8053 #param is used for -u, where you still *do* want to see when something is being upgraded.
8056 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8057 if vardb.cpv_exists(pkg_key):
8058 addl=" "+yellow("R")+fetch+" "
8061 counters.reinst += 1
8062 elif pkg_status == "uninstall":
8063 counters.uninst += 1
8064 # filter out old-style virtual matches
8065 elif installed_versions and \
8066 portage.cpv_getkey(installed_versions[0]) == \
8067 portage.cpv_getkey(pkg_key):
8068 myinslotlist = vardb.match(pkg.slot_atom)
8069 # If this is the first install of a new-style virtual, we
8070 # need to filter out old-style virtual matches.
8071 if myinslotlist and \
8072 portage.cpv_getkey(myinslotlist[0]) != \
8073 portage.cpv_getkey(pkg_key):
8076 myoldbest = myinslotlist[:]
8078 if not portage.dep.cpvequal(pkg_key,
8079 portage.best([pkg_key] + myoldbest)):
8081 addl += turquoise("U")+blue("D")
8083 counters.downgrades += 1
8086 addl += turquoise("U") + " "
8088 counters.upgrades += 1
8090 # New slot, mark it new.
8091 addl = " " + green("NS") + fetch + " "
8092 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8094 counters.newslot += 1
8096 if "--changelog" in self.myopts:
8097 inst_matches = vardb.match(pkg.slot_atom)
8099 changelogs.extend(self.calc_changelog(
8100 portdb.findname(pkg_key),
8101 inst_matches[0], pkg_key))
8103 addl = " " + green("N") + " " + fetch + " "
8112 forced_flags = set()
8113 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8114 forced_flags.update(pkgsettings.useforce)
8115 forced_flags.update(pkgsettings.usemask)
8117 cur_use = [flag for flag in pkg.use.enabled \
8118 if flag in pkg.iuse.all]
8119 cur_iuse = sorted(pkg.iuse.all)
8121 if myoldbest and myinslotlist:
8122 previous_cpv = myoldbest[0]
8124 previous_cpv = pkg.cpv
8125 if vardb.cpv_exists(previous_cpv):
8126 old_iuse, old_use = vardb.aux_get(
8127 previous_cpv, ["IUSE", "USE"])
8128 old_iuse = list(set(
8129 filter_iuse_defaults(old_iuse.split())))
8131 old_use = old_use.split()
8138 old_use = [flag for flag in old_use if flag in old_iuse]
8140 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8142 use_expand.reverse()
8143 use_expand_hidden = \
8144 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8146 def map_to_use_expand(myvals, forcedFlags=False,
8150 for exp in use_expand:
8153 for val in myvals[:]:
8154 if val.startswith(exp.lower()+"_"):
8155 if val in forced_flags:
8156 forced[exp].add(val[len(exp)+1:])
8157 ret[exp].append(val[len(exp)+1:])
8160 forced["USE"] = [val for val in myvals \
8161 if val in forced_flags]
8163 for exp in use_expand_hidden:
8169 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8170 # are the only thing that triggered reinstallation.
8171 reinst_flags_map = {}
8172 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8173 reinst_expand_map = None
8174 if reinstall_for_flags:
8175 reinst_flags_map = map_to_use_expand(
8176 list(reinstall_for_flags), removeHidden=False)
8177 for k in list(reinst_flags_map):
8178 if not reinst_flags_map[k]:
8179 del reinst_flags_map[k]
8180 if not reinst_flags_map.get("USE"):
8181 reinst_expand_map = reinst_flags_map.copy()
8182 reinst_expand_map.pop("USE", None)
8183 if reinst_expand_map and \
8184 not set(reinst_expand_map).difference(
8186 use_expand_hidden = \
8187 set(use_expand_hidden).difference(
8190 cur_iuse_map, iuse_forced = \
8191 map_to_use_expand(cur_iuse, forcedFlags=True)
8192 cur_use_map = map_to_use_expand(cur_use)
8193 old_iuse_map = map_to_use_expand(old_iuse)
8194 old_use_map = map_to_use_expand(old_use)
8197 use_expand.insert(0, "USE")
8199 for key in use_expand:
8200 if key in use_expand_hidden:
8202 verboseadd += create_use_string(key.upper(),
8203 cur_iuse_map[key], iuse_forced[key],
8204 cur_use_map[key], old_iuse_map[key],
8205 old_use_map[key], is_new,
8206 reinst_flags_map.get(key))
8211 if pkg_type == "ebuild" and pkg_merge:
8213 myfilesdict = portdb.getfetchsizes(pkg_key,
8214 useflags=pkg_use, debug=self.edebug)
8215 except portage.exception.InvalidDependString, e:
8216 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8217 show_invalid_depstring_notice(x, src_uri, str(e))
8220 if myfilesdict is None:
8221 myfilesdict="[empty/missing/bad digest]"
8223 for myfetchfile in myfilesdict:
8224 if myfetchfile not in myfetchlist:
8225 mysize+=myfilesdict[myfetchfile]
8226 myfetchlist.append(myfetchfile)
8228 counters.totalsize += mysize
8229 verboseadd += format_size(mysize)
8232 # assign index for a previous version in the same slot
8233 has_previous = False
8234 repo_name_prev = None
8235 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8237 slot_matches = vardb.match(slot_atom)
8240 repo_name_prev = vardb.aux_get(slot_matches[0],
8243 # now use the data to generate output
8244 if pkg.installed or not has_previous:
8245 repoadd = repo_display.repoStr(repo_path_real)
8247 repo_path_prev = None
8249 repo_path_prev = portdb.getRepositoryPath(
8251 if repo_path_prev == repo_path_real:
8252 repoadd = repo_display.repoStr(repo_path_real)
8254 repoadd = "%s=>%s" % (
8255 repo_display.repoStr(repo_path_prev),
8256 repo_display.repoStr(repo_path_real))
8258 repoadd_set.add(repoadd)
8260 xs = [portage.cpv_getkey(pkg_key)] + \
8261 list(portage.catpkgsplit(pkg_key)[2:])
8268 if "COLUMNWIDTH" in self.settings:
8270 mywidth = int(self.settings["COLUMNWIDTH"])
8271 except ValueError, e:
8272 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8274 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8275 self.settings["COLUMNWIDTH"], noiselevel=-1)
8277 oldlp = mywidth - 30
8280 # Convert myoldbest from a list to a string.
8284 for pos, key in enumerate(myoldbest):
8285 key = portage.catpkgsplit(key)[2] + \
8286 "-" + portage.catpkgsplit(key)[3]
8287 if key[-3:] == "-r0":
8289 myoldbest[pos] = key
8290 myoldbest = blue("["+", ".join(myoldbest)+"]")
8293 root_config = self.roots[myroot]
8294 system_set = root_config.sets["system"]
8295 world_set = root_config.sets["world"]
8300 pkg_system = system_set.findAtomForPackage(pkg)
8301 pkg_world = world_set.findAtomForPackage(pkg)
8302 if not (oneshot or pkg_world) and \
8303 myroot == self.target_root and \
8304 favorites_set.findAtomForPackage(pkg):
8305 # Maybe it will be added to world now.
8306 if create_world_atom(pkg, favorites_set, root_config):
8308 except portage.exception.InvalidDependString:
8309 # This is reported elsewhere if relevant.
8312 def pkgprint(pkg_str):
8315 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8317 return colorize("PKG_MERGE_WORLD", pkg_str)
8319 return colorize("PKG_MERGE", pkg_str)
8320 elif pkg_status == "uninstall":
8321 return colorize("PKG_UNINSTALL", pkg_str)
8324 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8326 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8328 return colorize("PKG_NOMERGE", pkg_str)
8331 properties = flatten(use_reduce(paren_reduce(
8332 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8333 except portage.exception.InvalidDependString, e:
8334 if not pkg.installed:
8335 show_invalid_depstring_notice(pkg,
8336 pkg.metadata["PROPERTIES"], str(e))
8340 interactive = "interactive" in properties
8341 if interactive and pkg.operation == "merge":
8342 addl = colorize("WARN", "I") + addl[1:]
8344 counters.interactive += 1
8349 if "--columns" in self.myopts:
8350 if "--quiet" in self.myopts:
8351 myprint=addl+" "+indent+pkgprint(pkg_cp)
8352 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8353 myprint=myprint+myoldbest
8354 myprint=myprint+darkgreen("to "+x[1])
8358 myprint = "[%s] %s%s" % \
8359 (pkgprint(pkg_status.ljust(13)),
8360 indent, pkgprint(pkg.cp))
8362 myprint = "[%s %s] %s%s" % \
8363 (pkgprint(pkg.type_name), addl,
8364 indent, pkgprint(pkg.cp))
8365 if (newlp-nc_len(myprint)) > 0:
8366 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8367 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8368 if (oldlp-nc_len(myprint)) > 0:
8369 myprint=myprint+" "*(oldlp-nc_len(myprint))
8370 myprint=myprint+myoldbest
8371 myprint += darkgreen("to " + pkg.root)
8374 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8376 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8377 myprint += indent + pkgprint(pkg_key) + " " + \
8378 myoldbest + darkgreen("to " + myroot)
8380 if "--columns" in self.myopts:
8381 if "--quiet" in self.myopts:
8382 myprint=addl+" "+indent+pkgprint(pkg_cp)
8383 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8384 myprint=myprint+myoldbest
8388 myprint = "[%s] %s%s" % \
8389 (pkgprint(pkg_status.ljust(13)),
8390 indent, pkgprint(pkg.cp))
8392 myprint = "[%s %s] %s%s" % \
8393 (pkgprint(pkg.type_name), addl,
8394 indent, pkgprint(pkg.cp))
8395 if (newlp-nc_len(myprint)) > 0:
8396 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8397 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8398 if (oldlp-nc_len(myprint)) > 0:
8399 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8400 myprint += myoldbest
8403 myprint = "[%s] %s%s %s" % \
8404 (pkgprint(pkg_status.ljust(13)),
8405 indent, pkgprint(pkg.cpv),
8408 myprint = "[%s %s] %s%s %s" % \
8409 (pkgprint(pkg_type), addl, indent,
8410 pkgprint(pkg.cpv), myoldbest)
8412 if columns and pkg.operation == "uninstall":
8414 p.append((myprint, verboseadd, repoadd))
8416 if "--tree" not in self.myopts and \
8417 "--quiet" not in self.myopts and \
8418 not self._opts_no_restart.intersection(self.myopts) and \
8419 pkg.root == self._running_root.root and \
8420 portage.match_from_list(
8421 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8422 not vardb.cpv_exists(pkg.cpv) and \
8423 "--quiet" not in self.myopts:
8424 if mylist_index < len(mylist) - 1:
8425 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8426 p.append(colorize("WARN", " then resume the merge."))
8429 show_repos = repoadd_set and repoadd_set != set(["0"])
8432 if isinstance(x, basestring):
8433 out.write("%s\n" % (x,))
8436 myprint, verboseadd, repoadd = x
8439 myprint += " " + verboseadd
8441 if show_repos and repoadd:
8442 myprint += " " + teal("[%s]" % repoadd)
8444 out.write("%s\n" % (myprint,))
8453 sys.stdout.write(str(repo_display))
8455 if "--changelog" in self.myopts:
8457 for revision,text in changelogs:
8458 print bold('*'+revision)
8459 sys.stdout.write(text)
8464 def display_problems(self):
8466 Display problems with the dependency graph such as slot collisions.
8467 This is called internally by display() to show the problems _after_
8468 the merge list where it is most likely to be seen, but if display()
8469 is not going to be called then this method should be called explicitly
8470 to ensure that the user is notified of problems with the graph.
8472 All output goes to stderr, except for unsatisfied dependencies which
8473 go to stdout for parsing by programs such as autounmask.
8476 # Note that show_masked_packages() sends it's output to
8477 # stdout, and some programs such as autounmask parse the
8478 # output in cases when emerge bails out. However, when
8479 # show_masked_packages() is called for installed packages
8480 # here, the message is a warning that is more appropriate
8481 # to send to stderr, so temporarily redirect stdout to
8482 # stderr. TODO: Fix output code so there's a cleaner way
8483 # to redirect everything to stderr.
8488 sys.stdout = sys.stderr
8489 self._display_problems()
8495 # This goes to stdout for parsing by programs like autounmask.
8496 for pargs, kwargs in self._unsatisfied_deps_for_display:
8497 self._show_unsatisfied_dep(*pargs, **kwargs)
8499 def _display_problems(self):
8500 if self._circular_deps_for_display is not None:
8501 self._show_circular_deps(
8502 self._circular_deps_for_display)
8504 # The user is only notified of a slot conflict if
8505 # there are no unresolvable blocker conflicts.
8506 if self._unsatisfied_blockers_for_display is not None:
8507 self._show_unsatisfied_blockers(
8508 self._unsatisfied_blockers_for_display)
8510 self._show_slot_collision_notice()
8512 # TODO: Add generic support for "set problem" handlers so that
8513 # the below warnings aren't special cases for world only.
8515 if self._missing_args:
8516 world_problems = False
8517 if "world" in self._sets:
8518 # Filter out indirect members of world (from nested sets)
8519 # since only direct members of world are desired here.
8520 world_set = self.roots[self.target_root].sets["world"]
8521 for arg, atom in self._missing_args:
8522 if arg.name == "world" and atom in world_set:
8523 world_problems = True
8527 sys.stderr.write("\n!!! Problems have been " + \
8528 "detected with your world file\n")
8529 sys.stderr.write("!!! Please run " + \
8530 green("emaint --check world")+"\n\n")
8532 if self._missing_args:
8533 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8534 " Ebuilds for the following packages are either all\n")
8535 sys.stderr.write(colorize("BAD", "!!!") + \
8536 " masked or don't exist:\n")
8537 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8538 self._missing_args) + "\n")
8540 if self._pprovided_args:
8542 for arg, atom in self._pprovided_args:
8543 if isinstance(arg, SetArg):
8545 arg_atom = (atom, atom)
8548 arg_atom = (arg.arg, atom)
8549 refs = arg_refs.setdefault(arg_atom, [])
8550 if parent not in refs:
8553 msg.append(bad("\nWARNING: "))
8554 if len(self._pprovided_args) > 1:
8555 msg.append("Requested packages will not be " + \
8556 "merged because they are listed in\n")
8558 msg.append("A requested package will not be " + \
8559 "merged because it is listed in\n")
8560 msg.append("package.provided:\n\n")
8561 problems_sets = set()
8562 for (arg, atom), refs in arg_refs.iteritems():
8565 problems_sets.update(refs)
8567 ref_string = ", ".join(["'%s'" % name for name in refs])
8568 ref_string = " pulled in by " + ref_string
8569 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8571 if "world" in problems_sets:
8572 msg.append("This problem can be solved in one of the following ways:\n\n")
8573 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8574 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8575 msg.append(" C) Remove offending entries from package.provided.\n\n")
8576 msg.append("The best course of action depends on the reason that an offending\n")
8577 msg.append("package.provided entry exists.\n\n")
8578 sys.stderr.write("".join(msg))
8580 masked_packages = []
8581 for pkg in self._masked_installed:
8582 root_config = pkg.root_config
8583 pkgsettings = self.pkgsettings[pkg.root]
8584 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8585 masked_packages.append((root_config, pkgsettings,
8586 pkg.cpv, pkg.metadata, mreasons))
8588 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8589 " The following installed packages are masked:\n")
8590 show_masked_packages(masked_packages)
8594 def calc_changelog(self,ebuildpath,current,next):
8595 if ebuildpath == None or not os.path.exists(ebuildpath):
8597 current = '-'.join(portage.catpkgsplit(current)[1:])
8598 if current.endswith('-r0'):
8599 current = current[:-3]
8600 next = '-'.join(portage.catpkgsplit(next)[1:])
8601 if next.endswith('-r0'):
8603 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8605 changelog = open(changelogpath).read()
8606 except SystemExit, e:
8607 raise # Needed else can't exit
8610 divisions = self.find_changelog_tags(changelog)
8611 #print 'XX from',current,'to',next
8612 #for div,text in divisions: print 'XX',div
8613 # skip entries for all revisions above the one we are about to emerge
8614 for i in range(len(divisions)):
8615 if divisions[i][0]==next:
8616 divisions = divisions[i:]
8618 # find out how many entries we are going to display
8619 for i in range(len(divisions)):
8620 if divisions[i][0]==current:
8621 divisions = divisions[:i]
8624 # couldnt find the current revision in the list. display nothing
8628 def find_changelog_tags(self,changelog):
8632 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8634 if release is not None:
8635 divs.append((release,changelog))
8637 if release is not None:
8638 divs.append((release,changelog[:match.start()]))
8639 changelog = changelog[match.end():]
8640 release = match.group(1)
8641 if release.endswith('.ebuild'):
8642 release = release[:-7]
8643 if release.endswith('-r0'):
8644 release = release[:-3]
8646 def saveNomergeFavorites(self):
8647 """Find atoms in favorites that are not in the mergelist and add them
8648 to the world file if necessary."""
8649 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8650 "--oneshot", "--onlydeps", "--pretend"):
8651 if x in self.myopts:
8653 root_config = self.roots[self.target_root]
8654 world_set = root_config.sets["world"]
8656 world_locked = False
8657 if hasattr(world_set, "lock"):
8661 if hasattr(world_set, "load"):
8662 world_set.load() # maybe it's changed on disk
8664 args_set = self._sets["args"]
8665 portdb = self.trees[self.target_root]["porttree"].dbapi
8666 added_favorites = set()
8667 for x in self._set_nodes:
8668 pkg_type, root, pkg_key, pkg_status = x
8669 if pkg_status != "nomerge":
8673 myfavkey = create_world_atom(x, args_set, root_config)
8675 if myfavkey in added_favorites:
8677 added_favorites.add(myfavkey)
8678 except portage.exception.InvalidDependString, e:
8679 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8680 (pkg_key, str(e)), noiselevel=-1)
8681 writemsg("!!! see '%s'\n\n" % os.path.join(
8682 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8685 for k in self._sets:
8686 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8691 all_added.append(SETPREFIX + k)
8692 all_added.extend(added_favorites)
8695 print ">>> Recording %s in \"world\" favorites file..." % \
8696 colorize("INFORM", str(a))
8698 world_set.update(all_added)
8703 def loadResumeCommand(self, resume_data, skip_masked=False):
8705 Add a resume command to the graph and validate it in the process. This
8706 will raise a PackageNotFound exception if a package is not available.
8709 if not isinstance(resume_data, dict):
8712 mergelist = resume_data.get("mergelist")
8713 if not isinstance(mergelist, list):
8716 fakedb = self.mydbapi
8718 serialized_tasks = []
8721 if not (isinstance(x, list) and len(x) == 4):
8723 pkg_type, myroot, pkg_key, action = x
8724 if pkg_type not in self.pkg_tree_map:
8726 if action != "merge":
8728 tree_type = self.pkg_tree_map[pkg_type]
8729 mydb = trees[myroot][tree_type].dbapi
8730 db_keys = list(self._trees_orig[myroot][
8731 tree_type].dbapi._aux_cache_keys)
8733 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8735 # It does no exist or it is corrupt.
8736 if action == "uninstall":
8738 raise portage.exception.PackageNotFound(pkg_key)
8739 installed = action == "uninstall"
8740 built = pkg_type != "ebuild"
8741 root_config = self.roots[myroot]
8742 pkg = Package(built=built, cpv=pkg_key,
8743 installed=installed, metadata=metadata,
8744 operation=action, root_config=root_config,
8746 if pkg_type == "ebuild":
8747 pkgsettings = self.pkgsettings[myroot]
8748 pkgsettings.setcpv(pkg)
8749 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8750 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8751 self._pkg_cache[pkg] = pkg
8753 root_config = self.roots[pkg.root]
8754 if "merge" == pkg.operation and \
8755 not visible(root_config.settings, pkg):
8757 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8759 self._unsatisfied_deps_for_display.append(
8760 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8762 fakedb[myroot].cpv_inject(pkg)
8763 serialized_tasks.append(pkg)
8764 self.spinner.update()
8766 if self._unsatisfied_deps_for_display:
8769 if not serialized_tasks or "--nodeps" in self.myopts:
8770 self._serialized_tasks_cache = serialized_tasks
8771 self._scheduler_graph = self.digraph
8773 self._select_package = self._select_pkg_from_graph
8774 self.myparams.add("selective")
8775 # Always traverse deep dependencies in order to account for
8776 # potentially unsatisfied dependencies of installed packages.
8777 # This is necessary for correct --keep-going or --resume operation
8778 # in case a package from a group of circularly dependent packages
8779 # fails. In this case, a package which has recently been installed
8780 # may have an unsatisfied circular dependency (pulled in by
8781 # PDEPEND, for example). So, even though a package is already
8782 # installed, it may not have all of it's dependencies satisfied, so
8783 # it may not be usable. If such a package is in the subgraph of
8784 # deep depenedencies of a scheduled build, that build needs to
8785 # be cancelled. In order for this type of situation to be
8786 # recognized, deep traversal of dependencies is required.
8787 self.myparams.add("deep")
8789 favorites = resume_data.get("favorites")
8790 args_set = self._sets["args"]
8791 if isinstance(favorites, list):
8792 args = self._load_favorites(favorites)
8796 for task in serialized_tasks:
8797 if isinstance(task, Package) and \
8798 task.operation == "merge":
8799 if not self._add_pkg(task, None):
8802 # Packages for argument atoms need to be explicitly
8803 # added via _add_pkg() so that they are included in the
8804 # digraph (needed at least for --tree display).
8806 for atom in arg.set:
8807 pkg, existing_node = self._select_package(
8808 arg.root_config.root, atom)
8809 if existing_node is None and \
8811 if not self._add_pkg(pkg, Dependency(atom=atom,
8812 root=pkg.root, parent=arg)):
8815 # Allow unsatisfied deps here to avoid showing a masking
8816 # message for an unsatisfied dep that isn't necessarily
8818 if not self._create_graph(allow_unsatisfied=True):
8821 unsatisfied_deps = []
8822 for dep in self._unsatisfied_deps:
8823 if not isinstance(dep.parent, Package):
8825 if dep.parent.operation == "merge":
8826 unsatisfied_deps.append(dep)
8829 # For unsatisfied deps of installed packages, only account for
8830 # them if they are in the subgraph of dependencies of a package
8831 # which is scheduled to be installed.
8832 unsatisfied_install = False
8834 dep_stack = self.digraph.parent_nodes(dep.parent)
8836 node = dep_stack.pop()
8837 if not isinstance(node, Package):
8839 if node.operation == "merge":
8840 unsatisfied_install = True
8842 if node in traversed:
8845 dep_stack.extend(self.digraph.parent_nodes(node))
8847 if unsatisfied_install:
8848 unsatisfied_deps.append(dep)
8850 if masked_tasks or unsatisfied_deps:
8851 # This probably means that a required package
8852 # was dropped via --skipfirst. It makes the
8853 # resume list invalid, so convert it to a
8854 # UnsatisfiedResumeDep exception.
8855 raise self.UnsatisfiedResumeDep(self,
8856 masked_tasks + unsatisfied_deps)
8857 self._serialized_tasks_cache = None
8860 except self._unknown_internal_error:
8865 def _load_favorites(self, favorites):
8867 Use a list of favorites to resume state from a
8868 previous select_files() call. This creates similar
8869 DependencyArg instances to those that would have
8870 been created by the original select_files() call.
8871 This allows Package instances to be matched with
8872 DependencyArg instances during graph creation.
8874 root_config = self.roots[self.target_root]
8875 getSetAtoms = root_config.setconfig.getSetAtoms
8876 sets = root_config.sets
8879 if not isinstance(x, basestring):
8881 if x in ("system", "world"):
8883 if x.startswith(SETPREFIX):
8884 s = x[len(SETPREFIX):]
8889 # Recursively expand sets so that containment tests in
8890 # self._get_parent_sets() properly match atoms in nested
8891 # sets (like if world contains system).
8892 expanded_set = InternalPackageSet(
8893 initial_atoms=getSetAtoms(s))
8894 self._sets[s] = expanded_set
8895 args.append(SetArg(arg=x, set=expanded_set,
8896 root_config=root_config))
8898 if not portage.isvalidatom(x):
8900 args.append(AtomArg(arg=x, atom=x,
8901 root_config=root_config))
8903 self._set_args(args)
8906 class UnsatisfiedResumeDep(portage.exception.PortageException):
8908 A dependency of a resume list is not installed. This
8909 can occur when a required package is dropped from the
8910 merge list via --skipfirst.
8912 def __init__(self, depgraph, value):
8913 portage.exception.PortageException.__init__(self, value)
8914 self.depgraph = depgraph
8916 class _internal_exception(portage.exception.PortageException):
8917 def __init__(self, value=""):
8918 portage.exception.PortageException.__init__(self, value)
8920 class _unknown_internal_error(_internal_exception):
8922 Used by the depgraph internally to terminate graph creation.
8923 The specific reason for the failure should have been dumped
8924 to stderr, unfortunately, the exact reason for the failure
8928 class _serialize_tasks_retry(_internal_exception):
8930 This is raised by the _serialize_tasks() method when it needs to
8931 be called again for some reason. The only case that it's currently
8932 used for is when neglected dependencies need to be added to the
8933 graph in order to avoid making a potentially unsafe decision.
8936 class _dep_check_composite_db(portage.dbapi):
8938 A dbapi-like interface that is optimized for use in dep_check() calls.
8939 This is built on top of the existing depgraph package selection logic.
8940 Some packages that have been added to the graph may be masked from this
8941 view in order to influence the atom preference selection that occurs
8944 def __init__(self, depgraph, root):
8945 portage.dbapi.__init__(self)
8946 self._depgraph = depgraph
8948 self._match_cache = {}
8949 self._cpv_pkg_map = {}
8951 def _clear_cache(self):
8952 self._match_cache.clear()
8953 self._cpv_pkg_map.clear()
8955 def match(self, atom):
8956 ret = self._match_cache.get(atom)
8961 atom = self._dep_expand(atom)
8962 pkg, existing = self._depgraph._select_package(self._root, atom)
8966 # Return the highest available from select_package() as well as
8967 # any matching slots in the graph db.
8969 slots.add(pkg.metadata["SLOT"])
8970 atom_cp = portage.dep_getkey(atom)
8971 if pkg.cp.startswith("virtual/"):
8972 # For new-style virtual lookahead that occurs inside
8973 # dep_check(), examine all slots. This is needed
8974 # so that newer slots will not unnecessarily be pulled in
8975 # when a satisfying lower slot is already installed. For
8976 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8977 # there's no need to pull in a newer slot to satisfy a
8978 # virtual/jdk dependency.
8979 for db, pkg_type, built, installed, db_keys in \
8980 self._depgraph._filtered_trees[self._root]["dbs"]:
8981 for cpv in db.match(atom):
8982 if portage.cpv_getkey(cpv) != pkg.cp:
8984 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8986 if self._visible(pkg):
8987 self._cpv_pkg_map[pkg.cpv] = pkg
8989 slots.remove(pkg.metadata["SLOT"])
8991 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8992 pkg, existing = self._depgraph._select_package(
8993 self._root, slot_atom)
8996 if not self._visible(pkg):
8998 self._cpv_pkg_map[pkg.cpv] = pkg
9001 self._cpv_sort_ascending(ret)
9002 self._match_cache[orig_atom] = ret
9005 def _visible(self, pkg):
9006 if pkg.installed and "selective" not in self._depgraph.myparams:
9008 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9009 except (StopIteration, portage.exception.InvalidDependString):
9016 self._depgraph.pkgsettings[pkg.root], pkg):
9018 except portage.exception.InvalidDependString:
9020 in_graph = self._depgraph._slot_pkg_map[
9021 self._root].get(pkg.slot_atom)
9022 if in_graph is None:
9023 # Mask choices for packages which are not the highest visible
9024 # version within their slot (since they usually trigger slot
9026 highest_visible, in_graph = self._depgraph._select_package(
9027 self._root, pkg.slot_atom)
9028 if pkg != highest_visible:
9030 elif in_graph != pkg:
9031 # Mask choices for packages that would trigger a slot
9032 # conflict with a previously selected package.
9036 def _dep_expand(self, atom):
9038 This is only needed for old installed packages that may
9039 contain atoms that are not fully qualified with a specific
9040 category. Emulate the cpv_expand() function that's used by
9041 dbapi.match() in cases like this. If there are multiple
9042 matches, it's often due to a new-style virtual that has
9043 been added, so try to filter those out to avoid raising
9046 root_config = self._depgraph.roots[self._root]
9048 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9049 if len(expanded_atoms) > 1:
9050 non_virtual_atoms = []
9051 for x in expanded_atoms:
9052 if not portage.dep_getkey(x).startswith("virtual/"):
9053 non_virtual_atoms.append(x)
9054 if len(non_virtual_atoms) == 1:
9055 expanded_atoms = non_virtual_atoms
9056 if len(expanded_atoms) > 1:
9057 # compatible with portage.cpv_expand()
9058 raise portage.exception.AmbiguousPackageName(
9059 [portage.dep_getkey(x) for x in expanded_atoms])
9061 atom = expanded_atoms[0]
9063 null_atom = insert_category_into_atom(atom, "null")
9064 null_cp = portage.dep_getkey(null_atom)
9065 cat, atom_pn = portage.catsplit(null_cp)
9066 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9068 # Allow the resolver to choose which virtual.
9069 atom = insert_category_into_atom(atom, "virtual")
9071 atom = insert_category_into_atom(atom, "null")
9074 def aux_get(self, cpv, wants):
9075 metadata = self._cpv_pkg_map[cpv].metadata
9076 return [metadata.get(x, "") for x in wants]
9078 class RepoDisplay(object):
9079 def __init__(self, roots):
9080 self._shown_repos = {}
9081 self._unknown_repo = False
9083 for root_config in roots.itervalues():
9084 portdir = root_config.settings.get("PORTDIR")
9086 repo_paths.add(portdir)
9087 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9089 repo_paths.update(overlays.split())
9090 repo_paths = list(repo_paths)
9091 self._repo_paths = repo_paths
9092 self._repo_paths_real = [ os.path.realpath(repo_path) \
9093 for repo_path in repo_paths ]
9095 # pre-allocate index for PORTDIR so that it always has index 0.
9096 for root_config in roots.itervalues():
9097 portdb = root_config.trees["porttree"].dbapi
9098 portdir = portdb.porttree_root
9100 self.repoStr(portdir)
9102 def repoStr(self, repo_path_real):
9105 real_index = self._repo_paths_real.index(repo_path_real)
9106 if real_index == -1:
9108 self._unknown_repo = True
9110 shown_repos = self._shown_repos
9111 repo_paths = self._repo_paths
9112 repo_path = repo_paths[real_index]
9113 index = shown_repos.get(repo_path)
9115 index = len(shown_repos)
9116 shown_repos[repo_path] = index
9122 shown_repos = self._shown_repos
9123 unknown_repo = self._unknown_repo
9124 if shown_repos or self._unknown_repo:
9125 output.append("Portage tree and overlays:\n")
9126 show_repo_paths = list(shown_repos)
9127 for repo_path, repo_index in shown_repos.iteritems():
9128 show_repo_paths[repo_index] = repo_path
9130 for index, repo_path in enumerate(show_repo_paths):
9131 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9133 output.append(" "+teal("[?]") + \
9134 " indicates that the source repository could not be determined\n")
9135 return "".join(output)
9137 class PackageCounters(object):
9147 self.blocks_satisfied = 0
9149 self.restrict_fetch = 0
9150 self.restrict_fetch_satisfied = 0
9151 self.interactive = 0
9154 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9157 myoutput.append("Total: %s package" % total_installs)
9158 if total_installs != 1:
9159 myoutput.append("s")
9160 if total_installs != 0:
9161 myoutput.append(" (")
9162 if self.upgrades > 0:
9163 details.append("%s upgrade" % self.upgrades)
9164 if self.upgrades > 1:
9166 if self.downgrades > 0:
9167 details.append("%s downgrade" % self.downgrades)
9168 if self.downgrades > 1:
9171 details.append("%s new" % self.new)
9172 if self.newslot > 0:
9173 details.append("%s in new slot" % self.newslot)
9174 if self.newslot > 1:
9177 details.append("%s reinstall" % self.reinst)
9181 details.append("%s uninstall" % self.uninst)
9184 if self.interactive > 0:
9185 details.append("%s %s" % (self.interactive,
9186 colorize("WARN", "interactive")))
9187 myoutput.append(", ".join(details))
9188 if total_installs != 0:
9189 myoutput.append(")")
9190 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9191 if self.restrict_fetch:
9192 myoutput.append("\nFetch Restriction: %s package" % \
9193 self.restrict_fetch)
9194 if self.restrict_fetch > 1:
9195 myoutput.append("s")
9196 if self.restrict_fetch_satisfied < self.restrict_fetch:
9197 myoutput.append(bad(" (%s unsatisfied)") % \
9198 (self.restrict_fetch - self.restrict_fetch_satisfied))
9200 myoutput.append("\nConflict: %s block" % \
9203 myoutput.append("s")
9204 if self.blocks_satisfied < self.blocks:
9205 myoutput.append(bad(" (%s unsatisfied)") % \
9206 (self.blocks - self.blocks_satisfied))
9207 return "".join(myoutput)
9209 class PollSelectAdapter(PollConstants):
9212 Use select to emulate a poll object, for
9213 systems that don't support poll().
9217 self._registered = {}
9218 self._select_args = [[], [], []]
9220 def register(self, fd, *args):
9222 Only POLLIN is currently supported!
9226 "register expected at most 2 arguments, got " + \
9227 repr(1 + len(args)))
9229 eventmask = PollConstants.POLLIN | \
9230 PollConstants.POLLPRI | PollConstants.POLLOUT
9234 self._registered[fd] = eventmask
9235 self._select_args = None
9237 def unregister(self, fd):
9238 self._select_args = None
9239 del self._registered[fd]
9241 def poll(self, *args):
9244 "poll expected at most 2 arguments, got " + \
9245 repr(1 + len(args)))
9251 select_args = self._select_args
9252 if select_args is None:
9253 select_args = [self._registered.keys(), [], []]
9255 if timeout is not None:
9256 select_args = select_args[:]
9257 # Translate poll() timeout args to select() timeout args:
9259 # | units | value(s) for indefinite block
9260 # ---------|--------------|------------------------------
9261 # poll | milliseconds | omitted, negative, or None
9262 # ---------|--------------|------------------------------
9263 # select | seconds | omitted
9264 # ---------|--------------|------------------------------
9266 if timeout is not None and timeout < 0:
9268 if timeout is not None:
9269 select_args.append(timeout / 1000)
9271 select_events = select.select(*select_args)
9273 for fd in select_events[0]:
9274 poll_events.append((fd, PollConstants.POLLIN))
9277 class SequentialTaskQueue(SlotObject):
9279 __slots__ = ("max_jobs", "running_tasks") + \
9280 ("_dirty", "_scheduling", "_task_queue")
9282 def __init__(self, **kwargs):
9283 SlotObject.__init__(self, **kwargs)
9284 self._task_queue = deque()
9285 self.running_tasks = set()
9286 if self.max_jobs is None:
9290 def add(self, task):
9291 self._task_queue.append(task)
9294 def addFront(self, task):
9295 self._task_queue.appendleft(task)
9306 if self._scheduling:
9307 # Ignore any recursive schedule() calls triggered via
9308 # self._task_exit().
9311 self._scheduling = True
9313 task_queue = self._task_queue
9314 running_tasks = self.running_tasks
9315 max_jobs = self.max_jobs
9316 state_changed = False
9318 while task_queue and \
9319 (max_jobs is True or len(running_tasks) < max_jobs):
9320 task = task_queue.popleft()
9321 cancelled = getattr(task, "cancelled", None)
9323 running_tasks.add(task)
9324 task.addExitListener(self._task_exit)
9326 state_changed = True
9329 self._scheduling = False
9331 return state_changed
9333 def _task_exit(self, task):
9335 Since we can always rely on exit listeners being called, the set of
9336 running tasks is always pruned automatically and there is never any need
9337 to actively prune it.
9339 self.running_tasks.remove(task)
9340 if self._task_queue:
9344 self._task_queue.clear()
9345 running_tasks = self.running_tasks
9346 while running_tasks:
9347 task = running_tasks.pop()
9348 task.removeExitListener(self._task_exit)
9352 def __nonzero__(self):
9353 return bool(self._task_queue or self.running_tasks)
9356 return len(self._task_queue) + len(self.running_tasks)
9358 _can_poll_device = None
9360 def can_poll_device():
9362 Test if it's possible to use poll() on a device such as a pty. This
9363 is known to fail on Darwin.
9365 @returns: True if poll() on a device succeeds, False otherwise.
9368 global _can_poll_device
9369 if _can_poll_device is not None:
9370 return _can_poll_device
9372 if not hasattr(select, "poll"):
9373 _can_poll_device = False
9374 return _can_poll_device
9377 dev_null = open('/dev/null', 'rb')
9379 _can_poll_device = False
9380 return _can_poll_device
9383 p.register(dev_null.fileno(), PollConstants.POLLIN)
9385 invalid_request = False
9386 for f, event in p.poll():
9387 if event & PollConstants.POLLNVAL:
9388 invalid_request = True
9392 _can_poll_device = not invalid_request
9393 return _can_poll_device
9395 def create_poll_instance():
9397 Create an instance of select.poll, or an instance of
9398 PollSelectAdapter there is no poll() implementation or
9399 it is broken somehow.
9401 if can_poll_device():
9402 return select.poll()
9403 return PollSelectAdapter()
9405 getloadavg = getattr(os, "getloadavg", None)
9406 if getloadavg is None:
9409 Uses /proc/loadavg to emulate os.getloadavg().
9410 Raises OSError if the load average was unobtainable.
9413 loadavg_str = open('/proc/loadavg').readline()
9415 # getloadavg() is only supposed to raise OSError, so convert
9416 raise OSError('unknown')
9417 loadavg_split = loadavg_str.split()
9418 if len(loadavg_split) < 3:
9419 raise OSError('unknown')
9423 loadavg_floats.append(float(loadavg_split[i]))
9425 raise OSError('unknown')
9426 return tuple(loadavg_floats)
9428 class PollScheduler(object):
9430 class _sched_iface_class(SlotObject):
9431 __slots__ = ("register", "schedule", "unregister")
9435 self._max_load = None
9437 self._poll_event_queue = []
9438 self._poll_event_handlers = {}
9439 self._poll_event_handler_ids = {}
9440 # Increment id for each new handler.
9441 self._event_handler_id = 0
9442 self._poll_obj = create_poll_instance()
9443 self._scheduling = False
9445 def _schedule(self):
9447 Calls _schedule_tasks() and automatically returns early from
9448 any recursive calls to this method that the _schedule_tasks()
9449 call might trigger. This makes _schedule() safe to call from
9450 inside exit listeners.
9452 if self._scheduling:
9454 self._scheduling = True
9456 return self._schedule_tasks()
9458 self._scheduling = False
9460 def _running_job_count(self):
9463 def _can_add_job(self):
9464 max_jobs = self._max_jobs
9465 max_load = self._max_load
9467 if self._max_jobs is not True and \
9468 self._running_job_count() >= self._max_jobs:
9471 if max_load is not None and \
9472 (max_jobs is True or max_jobs > 1) and \
9473 self._running_job_count() >= 1:
9475 avg1, avg5, avg15 = getloadavg()
9479 if avg1 >= max_load:
9484 def _poll(self, timeout=None):
9486 All poll() calls pass through here. The poll events
9487 are added directly to self._poll_event_queue.
9488 In order to avoid endless blocking, this raises
9489 StopIteration if timeout is None and there are
9490 no file descriptors to poll.
9492 if not self._poll_event_handlers:
9494 if timeout is None and \
9495 not self._poll_event_handlers:
9496 raise StopIteration(
9497 "timeout is None and there are no poll() event handlers")
9499 # The following error is known to occur with Linux kernel versions
9502 # select.error: (4, 'Interrupted system call')
9504 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9505 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9506 # without any events.
9509 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9511 except select.error, e:
9512 writemsg_level("\n!!! select error: %s\n" % (e,),
9513 level=logging.ERROR, noiselevel=-1)
9515 if timeout is not None:
9518 def _next_poll_event(self, timeout=None):
9520 Since the _schedule_wait() loop is called by event
9521 handlers from _poll_loop(), maintain a central event
9522 queue for both of them to share events from a single
9523 poll() call. In order to avoid endless blocking, this
9524 raises StopIteration if timeout is None and there are
9525 no file descriptors to poll.
9527 if not self._poll_event_queue:
9529 return self._poll_event_queue.pop()
9531 def _poll_loop(self):
9533 event_handlers = self._poll_event_handlers
9534 event_handled = False
9537 while event_handlers:
9538 f, event = self._next_poll_event()
9539 handler, reg_id = event_handlers[f]
9541 event_handled = True
9542 except StopIteration:
9543 event_handled = True
9545 if not event_handled:
9546 raise AssertionError("tight loop")
9548 def _schedule_yield(self):
9550 Schedule for a short period of time chosen by the scheduler based
9551 on internal state. Synchronous tasks should call this periodically
9552 in order to allow the scheduler to service pending poll events. The
9553 scheduler will call poll() exactly once, without blocking, and any
9554 resulting poll events will be serviced.
9556 event_handlers = self._poll_event_handlers
9559 if not event_handlers:
9560 return bool(events_handled)
9562 if not self._poll_event_queue:
9566 while event_handlers and self._poll_event_queue:
9567 f, event = self._next_poll_event()
9568 handler, reg_id = event_handlers[f]
9571 except StopIteration:
9574 return bool(events_handled)
9576 def _register(self, f, eventmask, handler):
9579 @return: A unique registration id, for use in schedule() or
9582 if f in self._poll_event_handlers:
9583 raise AssertionError("fd %d is already registered" % f)
9584 self._event_handler_id += 1
9585 reg_id = self._event_handler_id
9586 self._poll_event_handler_ids[reg_id] = f
9587 self._poll_event_handlers[f] = (handler, reg_id)
9588 self._poll_obj.register(f, eventmask)
9591 def _unregister(self, reg_id):
9592 f = self._poll_event_handler_ids[reg_id]
9593 self._poll_obj.unregister(f)
9594 del self._poll_event_handlers[f]
9595 del self._poll_event_handler_ids[reg_id]
9597 def _schedule_wait(self, wait_ids):
9599 Schedule until wait_id is not longer registered
9602 @param wait_id: a task id to wait for
9604 event_handlers = self._poll_event_handlers
9605 handler_ids = self._poll_event_handler_ids
9606 event_handled = False
9608 if isinstance(wait_ids, int):
9609 wait_ids = frozenset([wait_ids])
9612 while wait_ids.intersection(handler_ids):
9613 f, event = self._next_poll_event()
9614 handler, reg_id = event_handlers[f]
9616 event_handled = True
9617 except StopIteration:
9618 event_handled = True
9620 return event_handled
9622 class QueueScheduler(PollScheduler):
9625 Add instances of SequentialTaskQueue and then call run(). The
9626 run() method returns when no tasks remain.
9629 def __init__(self, max_jobs=None, max_load=None):
9630 PollScheduler.__init__(self)
9632 if max_jobs is None:
9635 self._max_jobs = max_jobs
9636 self._max_load = max_load
9637 self.sched_iface = self._sched_iface_class(
9638 register=self._register,
9639 schedule=self._schedule_wait,
9640 unregister=self._unregister)
9643 self._schedule_listeners = []
9646 self._queues.append(q)
9648 def remove(self, q):
9649 self._queues.remove(q)
9653 while self._schedule():
9656 while self._running_job_count():
9659 def _schedule_tasks(self):
9662 @returns: True if there may be remaining tasks to schedule,
9665 while self._can_add_job():
9666 n = self._max_jobs - self._running_job_count()
9670 if not self._start_next_job(n):
9673 for q in self._queues:
9678 def _running_job_count(self):
9680 for q in self._queues:
9681 job_count += len(q.running_tasks)
9682 self._jobs = job_count
9685 def _start_next_job(self, n=1):
9687 for q in self._queues:
9688 initial_job_count = len(q.running_tasks)
9690 final_job_count = len(q.running_tasks)
9691 if final_job_count > initial_job_count:
9692 started_count += (final_job_count - initial_job_count)
9693 if started_count >= n:
9695 return started_count
9697 class TaskScheduler(object):
9700 A simple way to handle scheduling of AsynchrousTask instances. Simply
9701 add tasks and call run(). The run() method returns when no tasks remain.
9704 def __init__(self, max_jobs=None, max_load=None):
9705 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9706 self._scheduler = QueueScheduler(
9707 max_jobs=max_jobs, max_load=max_load)
9708 self.sched_iface = self._scheduler.sched_iface
9709 self.run = self._scheduler.run
9710 self._scheduler.add(self._queue)
9712 def add(self, task):
9713 self._queue.add(task)
9715 class JobStatusDisplay(object):
9717 _bound_properties = ("curval", "failed", "running")
9718 _jobs_column_width = 48
9720 # Don't update the display unless at least this much
9721 # time has passed, in units of seconds.
9722 _min_display_latency = 2
9724 _default_term_codes = {
9730 _termcap_name_map = {
9731 'carriage_return' : 'cr',
9736 def __init__(self, out=sys.stdout, quiet=False):
9737 object.__setattr__(self, "out", out)
9738 object.__setattr__(self, "quiet", quiet)
9739 object.__setattr__(self, "maxval", 0)
9740 object.__setattr__(self, "merges", 0)
9741 object.__setattr__(self, "_changed", False)
9742 object.__setattr__(self, "_displayed", False)
9743 object.__setattr__(self, "_last_display_time", 0)
9744 object.__setattr__(self, "width", 80)
9747 isatty = hasattr(out, "isatty") and out.isatty()
9748 object.__setattr__(self, "_isatty", isatty)
9749 if not isatty or not self._init_term():
9751 for k, capname in self._termcap_name_map.iteritems():
9752 term_codes[k] = self._default_term_codes[capname]
9753 object.__setattr__(self, "_term_codes", term_codes)
9754 encoding = sys.getdefaultencoding()
9755 for k, v in self._term_codes.items():
9756 if not isinstance(v, basestring):
9757 self._term_codes[k] = v.decode(encoding, 'replace')
9759 def _init_term(self):
9761 Initialize term control codes.
9763 @returns: True if term codes were successfully initialized,
9767 term_type = os.environ.get("TERM", "vt100")
9773 curses.setupterm(term_type, self.out.fileno())
9774 tigetstr = curses.tigetstr
9775 except curses.error:
9780 if tigetstr is None:
9784 for k, capname in self._termcap_name_map.iteritems():
9785 code = tigetstr(capname)
9787 code = self._default_term_codes[capname]
9788 term_codes[k] = code
9789 object.__setattr__(self, "_term_codes", term_codes)
9792 def _format_msg(self, msg):
9793 return ">>> %s" % msg
9797 self._term_codes['carriage_return'] + \
9798 self._term_codes['clr_eol'])
9800 self._displayed = False
9802 def _display(self, line):
9803 self.out.write(line)
9805 self._displayed = True
9807 def _update(self, msg):
9810 if not self._isatty:
9811 out.write(self._format_msg(msg) + self._term_codes['newline'])
9813 self._displayed = True
9819 self._display(self._format_msg(msg))
9821 def displayMessage(self, msg):
9823 was_displayed = self._displayed
9825 if self._isatty and self._displayed:
9828 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9830 self._displayed = False
9833 self._changed = True
9839 for name in self._bound_properties:
9840 object.__setattr__(self, name, 0)
9843 self.out.write(self._term_codes['newline'])
9845 self._displayed = False
9847 def __setattr__(self, name, value):
9848 old_value = getattr(self, name)
9849 if value == old_value:
9851 object.__setattr__(self, name, value)
9852 if name in self._bound_properties:
9853 self._property_change(name, old_value, value)
9855 def _property_change(self, name, old_value, new_value):
9856 self._changed = True
9859 def _load_avg_str(self):
9874 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9878 Display status on stdout, but only if something has
9879 changed since the last call.
9885 current_time = time.time()
9886 time_delta = current_time - self._last_display_time
9887 if self._displayed and \
9889 if not self._isatty:
9891 if time_delta < self._min_display_latency:
9894 self._last_display_time = current_time
9895 self._changed = False
9896 self._display_status()
9898 def _display_status(self):
9899 # Don't use len(self._completed_tasks) here since that also
9900 # can include uninstall tasks.
9901 curval_str = str(self.curval)
9902 maxval_str = str(self.maxval)
9903 running_str = str(self.running)
9904 failed_str = str(self.failed)
9905 load_avg_str = self._load_avg_str()
9907 color_output = StringIO()
9908 plain_output = StringIO()
9909 style_file = portage.output.ConsoleStyleFile(color_output)
9910 style_file.write_listener = plain_output
9911 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9912 style_writer.style_listener = style_file.new_styles
9913 f = formatter.AbstractFormatter(style_writer)
9915 number_style = "INFORM"
9916 f.add_literal_data("Jobs: ")
9917 f.push_style(number_style)
9918 f.add_literal_data(curval_str)
9920 f.add_literal_data(" of ")
9921 f.push_style(number_style)
9922 f.add_literal_data(maxval_str)
9924 f.add_literal_data(" complete")
9927 f.add_literal_data(", ")
9928 f.push_style(number_style)
9929 f.add_literal_data(running_str)
9931 f.add_literal_data(" running")
9934 f.add_literal_data(", ")
9935 f.push_style(number_style)
9936 f.add_literal_data(failed_str)
9938 f.add_literal_data(" failed")
9940 padding = self._jobs_column_width - len(plain_output.getvalue())
9942 f.add_literal_data(padding * " ")
9944 f.add_literal_data("Load avg: ")
9945 f.add_literal_data(load_avg_str)
9947 # Truncate to fit width, to avoid making the terminal scroll if the
9948 # line overflows (happens when the load average is large).
9949 plain_output = plain_output.getvalue()
9950 if self._isatty and len(plain_output) > self.width:
9951 # Use plain_output here since it's easier to truncate
9952 # properly than the color output which contains console
9954 self._update(plain_output[:self.width])
9956 self._update(color_output.getvalue())
9958 xtermTitle(" ".join(plain_output.split()))
9960 class Scheduler(PollScheduler):
9962 _opts_ignore_blockers = \
9963 frozenset(["--buildpkgonly",
9964 "--fetchonly", "--fetch-all-uri",
9965 "--nodeps", "--pretend"])
9967 _opts_no_background = \
9968 frozenset(["--pretend",
9969 "--fetchonly", "--fetch-all-uri"])
9971 _opts_no_restart = frozenset(["--buildpkgonly",
9972 "--fetchonly", "--fetch-all-uri", "--pretend"])
9974 _bad_resume_opts = set(["--ask", "--changelog",
9975 "--resume", "--skipfirst"])
9977 _fetch_log = "/var/log/emerge-fetch.log"
9979 class _iface_class(SlotObject):
9980 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9981 "dblinkElog", "fetch", "register", "schedule",
9982 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9985 class _fetch_iface_class(SlotObject):
9986 __slots__ = ("log_file", "schedule")
9988 _task_queues_class = slot_dict_class(
9989 ("merge", "jobs", "fetch", "unpack"), prefix="")
9991 class _build_opts_class(SlotObject):
9992 __slots__ = ("buildpkg", "buildpkgonly",
9993 "fetch_all_uri", "fetchonly", "pretend")
9995 class _binpkg_opts_class(SlotObject):
9996 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9998 class _pkg_count_class(SlotObject):
9999 __slots__ = ("curval", "maxval")
10001 class _emerge_log_class(SlotObject):
10002 __slots__ = ("xterm_titles",)
10004 def log(self, *pargs, **kwargs):
10005 if not self.xterm_titles:
10006 # Avoid interference with the scheduler's status display.
10007 kwargs.pop("short_msg", None)
10008 emergelog(self.xterm_titles, *pargs, **kwargs)
10010 class _failed_pkg(SlotObject):
10011 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10013 class _ConfigPool(object):
10014 """Interface for a task to temporarily allocate a config
10015 instance from a pool. This allows a task to be constructed
10016 long before the config instance actually becomes needed, like
10017 when prefetchers are constructed for the whole merge list."""
10018 __slots__ = ("_root", "_allocate", "_deallocate")
10019 def __init__(self, root, allocate, deallocate):
10021 self._allocate = allocate
10022 self._deallocate = deallocate
10023 def allocate(self):
10024 return self._allocate(self._root)
10025 def deallocate(self, settings):
10026 self._deallocate(settings)
10028 class _unknown_internal_error(portage.exception.PortageException):
10030 Used internally to terminate scheduling. The specific reason for
10031 the failure should have been dumped to stderr.
10033 def __init__(self, value=""):
10034 portage.exception.PortageException.__init__(self, value)
10036 def __init__(self, settings, trees, mtimedb, myopts,
10037 spinner, mergelist, favorites, digraph):
10038 PollScheduler.__init__(self)
10039 self.settings = settings
10040 self.target_root = settings["ROOT"]
10042 self.myopts = myopts
10043 self._spinner = spinner
10044 self._mtimedb = mtimedb
10045 self._mergelist = mergelist
10046 self._favorites = favorites
10047 self._args_set = InternalPackageSet(favorites)
10048 self._build_opts = self._build_opts_class()
10049 for k in self._build_opts.__slots__:
10050 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10051 self._binpkg_opts = self._binpkg_opts_class()
10052 for k in self._binpkg_opts.__slots__:
10053 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10056 self._logger = self._emerge_log_class()
10057 self._task_queues = self._task_queues_class()
10058 for k in self._task_queues.allowed_keys:
10059 setattr(self._task_queues, k,
10060 SequentialTaskQueue())
10062 # Holds merges that will wait to be executed when no builds are
10063 # executing. This is useful for system packages since dependencies
10064 # on system packages are frequently unspecified.
10065 self._merge_wait_queue = []
10066 # Holds merges that have been transfered from the merge_wait_queue to
10067 # the actual merge queue. They are removed from this list upon
10068 # completion. Other packages can start building only when this list is
10070 self._merge_wait_scheduled = []
10072 # Holds system packages and their deep runtime dependencies. Before
10073 # being merged, these packages go to merge_wait_queue, to be merged
10074 # when no other packages are building.
10075 self._deep_system_deps = set()
10077 # Holds packages to merge which will satisfy currently unsatisfied
10078 # deep runtime dependencies of system packages. If this is not empty
10079 # then no parallel builds will be spawned until it is empty. This
10080 # minimizes the possibility that a build will fail due to the system
10081 # being in a fragile state. For example, see bug #259954.
10082 self._unsatisfied_system_deps = set()
10084 self._status_display = JobStatusDisplay()
10085 self._max_load = myopts.get("--load-average")
10086 max_jobs = myopts.get("--jobs")
10087 if max_jobs is None:
10089 self._set_max_jobs(max_jobs)
10091 # The root where the currently running
10092 # portage instance is installed.
10093 self._running_root = trees["/"]["root_config"]
10095 if settings.get("PORTAGE_DEBUG", "") == "1":
10097 self.pkgsettings = {}
10098 self._config_pool = {}
10099 self._blocker_db = {}
10101 self._config_pool[root] = []
10102 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10104 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10105 schedule=self._schedule_fetch)
10106 self._sched_iface = self._iface_class(
10107 dblinkEbuildPhase=self._dblink_ebuild_phase,
10108 dblinkDisplayMerge=self._dblink_display_merge,
10109 dblinkElog=self._dblink_elog,
10110 fetch=fetch_iface, register=self._register,
10111 schedule=self._schedule_wait,
10112 scheduleSetup=self._schedule_setup,
10113 scheduleUnpack=self._schedule_unpack,
10114 scheduleYield=self._schedule_yield,
10115 unregister=self._unregister)
10117 self._prefetchers = weakref.WeakValueDictionary()
10118 self._pkg_queue = []
10119 self._completed_tasks = set()
10121 self._failed_pkgs = []
10122 self._failed_pkgs_all = []
10123 self._failed_pkgs_die_msgs = []
10124 self._post_mod_echo_msgs = []
10125 self._parallel_fetch = False
10126 merge_count = len([x for x in mergelist \
10127 if isinstance(x, Package) and x.operation == "merge"])
10128 self._pkg_count = self._pkg_count_class(
10129 curval=0, maxval=merge_count)
10130 self._status_display.maxval = self._pkg_count.maxval
10132 # The load average takes some time to respond when new
10133 # jobs are added, so we need to limit the rate of adding
10135 self._job_delay_max = 10
10136 self._job_delay_factor = 1.0
10137 self._job_delay_exp = 1.5
10138 self._previous_job_start_time = None
10140 self._set_digraph(digraph)
10142 # This is used to memoize the _choose_pkg() result when
10143 # no packages can be chosen until one of the existing
10145 self._choose_pkg_return_early = False
10147 features = self.settings.features
10148 if "parallel-fetch" in features and \
10149 not ("--pretend" in self.myopts or \
10150 "--fetch-all-uri" in self.myopts or \
10151 "--fetchonly" in self.myopts):
10152 if "distlocks" not in features:
10153 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10154 portage.writemsg(red("!!!")+" parallel-fetching " + \
10155 "requires the distlocks feature enabled"+"\n",
10157 portage.writemsg(red("!!!")+" you have it disabled, " + \
10158 "thus parallel-fetching is being disabled"+"\n",
10160 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10161 elif len(mergelist) > 1:
10162 self._parallel_fetch = True
10164 if self._parallel_fetch:
10165 # clear out existing fetch log if it exists
10167 open(self._fetch_log, 'w')
10168 except EnvironmentError:
10171 self._running_portage = None
10172 portage_match = self._running_root.trees["vartree"].dbapi.match(
10173 portage.const.PORTAGE_PACKAGE_ATOM)
10175 cpv = portage_match.pop()
10176 self._running_portage = self._pkg(cpv, "installed",
10177 self._running_root, installed=True)
10179 def _poll(self, timeout=None):
10181 PollScheduler._poll(self, timeout=timeout)
10183 def _set_max_jobs(self, max_jobs):
10184 self._max_jobs = max_jobs
10185 self._task_queues.jobs.max_jobs = max_jobs
10187 def _background_mode(self):
10189 Check if background mode is enabled and adjust states as necessary.
10192 @returns: True if background mode is enabled, False otherwise.
10194 background = (self._max_jobs is True or \
10195 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10196 not bool(self._opts_no_background.intersection(self.myopts))
10199 interactive_tasks = self._get_interactive_tasks()
10200 if interactive_tasks:
10202 writemsg_level(">>> Sending package output to stdio due " + \
10203 "to interactive package(s):\n",
10204 level=logging.INFO, noiselevel=-1)
10206 for pkg in interactive_tasks:
10207 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10208 if pkg.root != "/":
10209 pkg_str += " for " + pkg.root
10210 msg.append(pkg_str)
10212 writemsg_level("".join("%s\n" % (l,) for l in msg),
10213 level=logging.INFO, noiselevel=-1)
10214 if self._max_jobs is True or self._max_jobs > 1:
10215 self._set_max_jobs(1)
10216 writemsg_level(">>> Setting --jobs=1 due " + \
10217 "to the above interactive package(s)\n",
10218 level=logging.INFO, noiselevel=-1)
10220 self._status_display.quiet = \
10221 not background or \
10222 ("--quiet" in self.myopts and \
10223 "--verbose" not in self.myopts)
10225 self._logger.xterm_titles = \
10226 "notitles" not in self.settings.features and \
10227 self._status_display.quiet
10231 def _get_interactive_tasks(self):
10232 from portage import flatten
10233 from portage.dep import use_reduce, paren_reduce
10234 interactive_tasks = []
10235 for task in self._mergelist:
10236 if not (isinstance(task, Package) and \
10237 task.operation == "merge"):
10240 properties = flatten(use_reduce(paren_reduce(
10241 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10242 except portage.exception.InvalidDependString, e:
10243 show_invalid_depstring_notice(task,
10244 task.metadata["PROPERTIES"], str(e))
10245 raise self._unknown_internal_error()
10246 if "interactive" in properties:
10247 interactive_tasks.append(task)
10248 return interactive_tasks
10250 def _set_digraph(self, digraph):
10251 if "--nodeps" in self.myopts or \
10252 (self._max_jobs is not True and self._max_jobs < 2):
10254 self._digraph = None
10257 self._digraph = digraph
10258 self._find_system_deps()
10259 self._prune_digraph()
10260 self._prevent_builddir_collisions()
10262 def _find_system_deps(self):
10264 Find system packages and their deep runtime dependencies. Before being
10265 merged, these packages go to merge_wait_queue, to be merged when no
10266 other packages are building.
10268 deep_system_deps = self._deep_system_deps
10269 deep_system_deps.clear()
10270 deep_system_deps.update(
10271 _find_deep_system_runtime_deps(self._digraph))
10272 deep_system_deps.difference_update([pkg for pkg in \
10273 deep_system_deps if pkg.operation != "merge"])
10275 def _prune_digraph(self):
10277 Prune any root nodes that are irrelevant.
10280 graph = self._digraph
10281 completed_tasks = self._completed_tasks
10282 removed_nodes = set()
10284 for node in graph.root_nodes():
10285 if not isinstance(node, Package) or \
10286 (node.installed and node.operation == "nomerge") or \
10288 node in completed_tasks:
10289 removed_nodes.add(node)
10291 graph.difference_update(removed_nodes)
10292 if not removed_nodes:
10294 removed_nodes.clear()
10296 def _prevent_builddir_collisions(self):
10298 When building stages, sometimes the same exact cpv needs to be merged
10299 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10300 in the builddir. Currently, normal file locks would be inappropriate
10301 for this purpose since emerge holds all of it's build dir locks from
10305 for pkg in self._mergelist:
10306 if not isinstance(pkg, Package):
10307 # a satisfied blocker
10311 if pkg.cpv not in cpv_map:
10312 cpv_map[pkg.cpv] = [pkg]
10314 for earlier_pkg in cpv_map[pkg.cpv]:
10315 self._digraph.add(earlier_pkg, pkg,
10316 priority=DepPriority(buildtime=True))
10317 cpv_map[pkg.cpv].append(pkg)
10319 class _pkg_failure(portage.exception.PortageException):
10321 An instance of this class is raised by unmerge() when
10322 an uninstallation fails.
10325 def __init__(self, *pargs):
10326 portage.exception.PortageException.__init__(self, pargs)
10328 self.status = pargs[0]
10330 def _schedule_fetch(self, fetcher):
10332 Schedule a fetcher on the fetch queue, in order to
10333 serialize access to the fetch log.
10335 self._task_queues.fetch.addFront(fetcher)
10337 def _schedule_setup(self, setup_phase):
10339 Schedule a setup phase on the merge queue, in order to
10340 serialize unsandboxed access to the live filesystem.
10342 self._task_queues.merge.addFront(setup_phase)
10345 def _schedule_unpack(self, unpack_phase):
10347 Schedule an unpack phase on the unpack queue, in order
10348 to serialize $DISTDIR access for live ebuilds.
10350 self._task_queues.unpack.add(unpack_phase)
10352 def _find_blockers(self, new_pkg):
10354 Returns a callable which should be called only when
10355 the vdb lock has been acquired.
10357 def get_blockers():
10358 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10359 return get_blockers
10361 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10362 if self._opts_ignore_blockers.intersection(self.myopts):
10365 # Call gc.collect() here to avoid heap overflow that
10366 # triggers 'Cannot allocate memory' errors (reported
10367 # with python-2.5).
10371 blocker_db = self._blocker_db[new_pkg.root]
10373 blocker_dblinks = []
10374 for blocking_pkg in blocker_db.findInstalledBlockers(
10375 new_pkg, acquire_lock=acquire_lock):
10376 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10378 if new_pkg.cpv == blocking_pkg.cpv:
10380 blocker_dblinks.append(portage.dblink(
10381 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10382 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10383 vartree=self.trees[blocking_pkg.root]["vartree"]))
10387 return blocker_dblinks
10389 def _dblink_pkg(self, pkg_dblink):
10390 cpv = pkg_dblink.mycpv
10391 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10392 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10393 installed = type_name == "installed"
10394 return self._pkg(cpv, type_name, root_config, installed=installed)
10396 def _append_to_log_path(self, log_path, msg):
10397 f = open(log_path, 'a')
10403 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10405 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10408 background = self._background
10410 if background and log_path is not None:
10411 log_file = open(log_path, 'a')
10416 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10418 if log_file is not None:
10421 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10422 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10423 background = self._background
10425 if log_path is None:
10426 if not (background and level < logging.WARN):
10427 portage.util.writemsg_level(msg,
10428 level=level, noiselevel=noiselevel)
10431 portage.util.writemsg_level(msg,
10432 level=level, noiselevel=noiselevel)
10433 self._append_to_log_path(log_path, msg)
10435 def _dblink_ebuild_phase(self,
10436 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10438 Using this callback for merge phases allows the scheduler
10439 to run while these phases execute asynchronously, and allows
10440 the scheduler control output handling.
10443 scheduler = self._sched_iface
10444 settings = pkg_dblink.settings
10445 pkg = self._dblink_pkg(pkg_dblink)
10446 background = self._background
10447 log_path = settings.get("PORTAGE_LOG_FILE")
10449 ebuild_phase = EbuildPhase(background=background,
10450 pkg=pkg, phase=phase, scheduler=scheduler,
10451 settings=settings, tree=pkg_dblink.treetype)
10452 ebuild_phase.start()
10453 ebuild_phase.wait()
10455 return ebuild_phase.returncode
10457 def _generate_digests(self):
10459 Generate digests if necessary for --digests or FEATURES=digest.
10460 In order to avoid interference, this must done before parallel
10464 if '--fetchonly' in self.myopts:
10467 digest = '--digest' in self.myopts
10469 for pkgsettings in self.pkgsettings.itervalues():
10470 if 'digest' in pkgsettings.features:
10477 for x in self._mergelist:
10478 if not isinstance(x, Package) or \
10479 x.type_name != 'ebuild' or \
10480 x.operation != 'merge':
10482 pkgsettings = self.pkgsettings[x.root]
10483 if '--digest' not in self.myopts and \
10484 'digest' not in pkgsettings.features:
10486 portdb = x.root_config.trees['porttree'].dbapi
10487 ebuild_path = portdb.findname(x.cpv)
10488 if not ebuild_path:
10490 "!!! Could not locate ebuild for '%s'.\n" \
10491 % x.cpv, level=logging.ERROR, noiselevel=-1)
10493 pkgsettings['O'] = os.path.dirname(ebuild_path)
10494 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10496 "!!! Unable to generate manifest for '%s'.\n" \
10497 % x.cpv, level=logging.ERROR, noiselevel=-1)
10502 def _check_manifests(self):
10503 # Verify all the manifests now so that the user is notified of failure
10504 # as soon as possible.
10505 if "strict" not in self.settings.features or \
10506 "--fetchonly" in self.myopts or \
10507 "--fetch-all-uri" in self.myopts:
10510 shown_verifying_msg = False
10511 quiet_settings = {}
10512 for myroot, pkgsettings in self.pkgsettings.iteritems():
10513 quiet_config = portage.config(clone=pkgsettings)
10514 quiet_config["PORTAGE_QUIET"] = "1"
10515 quiet_config.backup_changes("PORTAGE_QUIET")
10516 quiet_settings[myroot] = quiet_config
10519 for x in self._mergelist:
10520 if not isinstance(x, Package) or \
10521 x.type_name != "ebuild":
10524 if not shown_verifying_msg:
10525 shown_verifying_msg = True
10526 self._status_msg("Verifying ebuild manifests")
10528 root_config = x.root_config
10529 portdb = root_config.trees["porttree"].dbapi
10530 quiet_config = quiet_settings[root_config.root]
10531 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10532 if not portage.digestcheck([], quiet_config, strict=True):
10537 def _add_prefetchers(self):
10539 if not self._parallel_fetch:
10542 if self._parallel_fetch:
10543 self._status_msg("Starting parallel fetch")
10545 prefetchers = self._prefetchers
10546 getbinpkg = "--getbinpkg" in self.myopts
10548 # In order to avoid "waiting for lock" messages
10549 # at the beginning, which annoy users, never
10550 # spawn a prefetcher for the first package.
10551 for pkg in self._mergelist[1:]:
10552 prefetcher = self._create_prefetcher(pkg)
10553 if prefetcher is not None:
10554 self._task_queues.fetch.add(prefetcher)
10555 prefetchers[pkg] = prefetcher
10557 def _create_prefetcher(self, pkg):
10559 @return: a prefetcher, or None if not applicable
10563 if not isinstance(pkg, Package):
10566 elif pkg.type_name == "ebuild":
10568 prefetcher = EbuildFetcher(background=True,
10569 config_pool=self._ConfigPool(pkg.root,
10570 self._allocate_config, self._deallocate_config),
10571 fetchonly=1, logfile=self._fetch_log,
10572 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10574 elif pkg.type_name == "binary" and \
10575 "--getbinpkg" in self.myopts and \
10576 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10578 prefetcher = BinpkgPrefetcher(background=True,
10579 pkg=pkg, scheduler=self._sched_iface)
10583 def _is_restart_scheduled(self):
10585 Check if the merge list contains a replacement
10586 for the current running instance, that will result
10587 in restart after merge.
10589 @returns: True if a restart is scheduled, False otherwise.
10591 if self._opts_no_restart.intersection(self.myopts):
10594 mergelist = self._mergelist
10596 for i, pkg in enumerate(mergelist):
10597 if self._is_restart_necessary(pkg) and \
10598 i != len(mergelist) - 1:
10603 def _is_restart_necessary(self, pkg):
10605 @return: True if merging the given package
10606 requires restart, False otherwise.
10609 # Figure out if we need a restart.
10610 if pkg.root == self._running_root.root and \
10611 portage.match_from_list(
10612 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10613 if self._running_portage:
10614 return pkg.cpv != self._running_portage.cpv
10618 def _restart_if_necessary(self, pkg):
10620 Use execv() to restart emerge. This happens
10621 if portage upgrades itself and there are
10622 remaining packages in the list.
10625 if self._opts_no_restart.intersection(self.myopts):
10628 if not self._is_restart_necessary(pkg):
10631 if pkg == self._mergelist[-1]:
10634 self._main_loop_cleanup()
10636 logger = self._logger
10637 pkg_count = self._pkg_count
10638 mtimedb = self._mtimedb
10639 bad_resume_opts = self._bad_resume_opts
10641 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10642 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10644 logger.log(" *** RESTARTING " + \
10645 "emerge via exec() after change of " + \
10646 "portage version.")
10648 mtimedb["resume"]["mergelist"].remove(list(pkg))
10650 portage.run_exitfuncs()
10651 mynewargv = [sys.argv[0], "--resume"]
10652 resume_opts = self.myopts.copy()
10653 # For automatic resume, we need to prevent
10654 # any of bad_resume_opts from leaking in
10655 # via EMERGE_DEFAULT_OPTS.
10656 resume_opts["--ignore-default-opts"] = True
10657 for myopt, myarg in resume_opts.iteritems():
10658 if myopt not in bad_resume_opts:
10660 mynewargv.append(myopt)
10662 mynewargv.append(myopt +"="+ str(myarg))
10663 # priority only needs to be adjusted on the first run
10664 os.environ["PORTAGE_NICENESS"] = "0"
10665 os.execv(mynewargv[0], mynewargv)
10669 if "--resume" in self.myopts:
10671 portage.writemsg_stdout(
10672 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10673 self._logger.log(" *** Resuming merge...")
10675 self._save_resume_list()
10678 self._background = self._background_mode()
10679 except self._unknown_internal_error:
10682 for root in self.trees:
10683 root_config = self.trees[root]["root_config"]
10685 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10686 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10687 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10688 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10689 if not tmpdir or not os.path.isdir(tmpdir):
10690 msg = "The directory specified in your " + \
10691 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10692 "does not exist. Please create this " + \
10693 "directory or correct your PORTAGE_TMPDIR setting."
10694 msg = textwrap.wrap(msg, 70)
10695 out = portage.output.EOutput()
10700 if self._background:
10701 root_config.settings.unlock()
10702 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10703 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10704 root_config.settings.lock()
10706 self.pkgsettings[root] = portage.config(
10707 clone=root_config.settings)
10709 rval = self._generate_digests()
10710 if rval != os.EX_OK:
10713 rval = self._check_manifests()
10714 if rval != os.EX_OK:
10717 keep_going = "--keep-going" in self.myopts
10718 fetchonly = self._build_opts.fetchonly
10719 mtimedb = self._mtimedb
10720 failed_pkgs = self._failed_pkgs
10723 rval = self._merge()
10724 if rval == os.EX_OK or fetchonly or not keep_going:
10726 if "resume" not in mtimedb:
10728 mergelist = self._mtimedb["resume"].get("mergelist")
10732 if not failed_pkgs:
10735 for failed_pkg in failed_pkgs:
10736 mergelist.remove(list(failed_pkg.pkg))
10738 self._failed_pkgs_all.extend(failed_pkgs)
10744 if not self._calc_resume_list():
10747 clear_caches(self.trees)
10748 if not self._mergelist:
10751 self._save_resume_list()
10752 self._pkg_count.curval = 0
10753 self._pkg_count.maxval = len([x for x in self._mergelist \
10754 if isinstance(x, Package) and x.operation == "merge"])
10755 self._status_display.maxval = self._pkg_count.maxval
10757 self._logger.log(" *** Finished. Cleaning up...")
10760 self._failed_pkgs_all.extend(failed_pkgs)
10763 background = self._background
10764 failure_log_shown = False
10765 if background and len(self._failed_pkgs_all) == 1:
10766 # If only one package failed then just show it's
10767 # whole log for easy viewing.
10768 failed_pkg = self._failed_pkgs_all[-1]
10769 build_dir = failed_pkg.build_dir
10772 log_paths = [failed_pkg.build_log]
10774 log_path = self._locate_failure_log(failed_pkg)
10775 if log_path is not None:
10777 log_file = open(log_path)
10781 if log_file is not None:
10783 for line in log_file:
10784 writemsg_level(line, noiselevel=-1)
10787 failure_log_shown = True
10789 # Dump mod_echo output now since it tends to flood the terminal.
10790 # This allows us to avoid having more important output, generated
10791 # later, from being swept away by the mod_echo output.
10792 mod_echo_output = _flush_elog_mod_echo()
10794 if background and not failure_log_shown and \
10795 self._failed_pkgs_all and \
10796 self._failed_pkgs_die_msgs and \
10797 not mod_echo_output:
10799 printer = portage.output.EOutput()
10800 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10802 if mysettings["ROOT"] != "/":
10803 root_msg = " merged to %s" % mysettings["ROOT"]
10805 printer.einfo("Error messages for package %s%s:" % \
10806 (colorize("INFORM", key), root_msg))
10808 for phase in portage.const.EBUILD_PHASES:
10809 if phase not in logentries:
10811 for msgtype, msgcontent in logentries[phase]:
10812 if isinstance(msgcontent, basestring):
10813 msgcontent = [msgcontent]
10814 for line in msgcontent:
10815 printer.eerror(line.strip("\n"))
10817 if self._post_mod_echo_msgs:
10818 for msg in self._post_mod_echo_msgs:
10821 if len(self._failed_pkgs_all) > 1 or \
10822 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10823 if len(self._failed_pkgs_all) > 1:
10824 msg = "The following %d packages have " % \
10825 len(self._failed_pkgs_all) + \
10826 "failed to build or install:"
10828 msg = "The following package has " + \
10829 "failed to build or install:"
10830 prefix = bad(" * ")
10831 writemsg(prefix + "\n", noiselevel=-1)
10832 from textwrap import wrap
10833 for line in wrap(msg, 72):
10834 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10835 writemsg(prefix + "\n", noiselevel=-1)
10836 for failed_pkg in self._failed_pkgs_all:
10837 writemsg("%s\t%s\n" % (prefix,
10838 colorize("INFORM", str(failed_pkg.pkg))),
10840 writemsg(prefix + "\n", noiselevel=-1)
10844 def _elog_listener(self, mysettings, key, logentries, fulltext):
10845 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10847 self._failed_pkgs_die_msgs.append(
10848 (mysettings, key, errors))
10850 def _locate_failure_log(self, failed_pkg):
10852 build_dir = failed_pkg.build_dir
10855 log_paths = [failed_pkg.build_log]
10857 for log_path in log_paths:
10862 log_size = os.stat(log_path).st_size
10873 def _add_packages(self):
10874 pkg_queue = self._pkg_queue
10875 for pkg in self._mergelist:
10876 if isinstance(pkg, Package):
10877 pkg_queue.append(pkg)
10878 elif isinstance(pkg, Blocker):
10881 def _system_merge_started(self, merge):
10883 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10885 graph = self._digraph
10888 pkg = merge.merge.pkg
10890 # Skip this if $ROOT != / since it shouldn't matter if there
10891 # are unsatisfied system runtime deps in this case.
10892 if pkg.root != '/':
10895 completed_tasks = self._completed_tasks
10896 unsatisfied = self._unsatisfied_system_deps
10898 def ignore_non_runtime_or_satisfied(priority):
10900 Ignore non-runtime and satisfied runtime priorities.
10902 if isinstance(priority, DepPriority) and \
10903 not priority.satisfied and \
10904 (priority.runtime or priority.runtime_post):
10908 # When checking for unsatisfied runtime deps, only check
10909 # direct deps since indirect deps are checked when the
10910 # corresponding parent is merged.
10911 for child in graph.child_nodes(pkg,
10912 ignore_priority=ignore_non_runtime_or_satisfied):
10913 if not isinstance(child, Package) or \
10914 child.operation == 'uninstall':
10918 if child.operation == 'merge' and \
10919 child not in completed_tasks:
10920 unsatisfied.add(child)
10922 def _merge_wait_exit_handler(self, task):
10923 self._merge_wait_scheduled.remove(task)
10924 self._merge_exit(task)
10926 def _merge_exit(self, merge):
10927 self._do_merge_exit(merge)
10928 self._deallocate_config(merge.merge.settings)
10929 if merge.returncode == os.EX_OK and \
10930 not merge.merge.pkg.installed:
10931 self._status_display.curval += 1
10932 self._status_display.merges = len(self._task_queues.merge)
10935 def _do_merge_exit(self, merge):
10936 pkg = merge.merge.pkg
10937 if merge.returncode != os.EX_OK:
10938 settings = merge.merge.settings
10939 build_dir = settings.get("PORTAGE_BUILDDIR")
10940 build_log = settings.get("PORTAGE_LOG_FILE")
10942 self._failed_pkgs.append(self._failed_pkg(
10943 build_dir=build_dir, build_log=build_log,
10945 returncode=merge.returncode))
10946 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10948 self._status_display.failed = len(self._failed_pkgs)
10951 self._task_complete(pkg)
10952 pkg_to_replace = merge.merge.pkg_to_replace
10953 if pkg_to_replace is not None:
10954 # When a package is replaced, mark it's uninstall
10955 # task complete (if any).
10956 uninst_hash_key = \
10957 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10958 self._task_complete(uninst_hash_key)
10963 self._restart_if_necessary(pkg)
10965 # Call mtimedb.commit() after each merge so that
10966 # --resume still works after being interrupted
10967 # by reboot, sigkill or similar.
10968 mtimedb = self._mtimedb
10969 mtimedb["resume"]["mergelist"].remove(list(pkg))
10970 if not mtimedb["resume"]["mergelist"]:
10971 del mtimedb["resume"]
10974 def _build_exit(self, build):
10975 if build.returncode == os.EX_OK:
10977 merge = PackageMerge(merge=build)
10978 if not build.build_opts.buildpkgonly and \
10979 build.pkg in self._deep_system_deps:
10980 # Since dependencies on system packages are frequently
10981 # unspecified, merge them only when no builds are executing.
10982 self._merge_wait_queue.append(merge)
10983 merge.addStartListener(self._system_merge_started)
10985 merge.addExitListener(self._merge_exit)
10986 self._task_queues.merge.add(merge)
10987 self._status_display.merges = len(self._task_queues.merge)
10989 settings = build.settings
10990 build_dir = settings.get("PORTAGE_BUILDDIR")
10991 build_log = settings.get("PORTAGE_LOG_FILE")
10993 self._failed_pkgs.append(self._failed_pkg(
10994 build_dir=build_dir, build_log=build_log,
10996 returncode=build.returncode))
10997 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10999 self._status_display.failed = len(self._failed_pkgs)
11000 self._deallocate_config(build.settings)
11002 self._status_display.running = self._jobs
11005 def _extract_exit(self, build):
11006 self._build_exit(build)
11008 def _task_complete(self, pkg):
11009 self._completed_tasks.add(pkg)
11010 self._unsatisfied_system_deps.discard(pkg)
11011 self._choose_pkg_return_early = False
11015 self._add_prefetchers()
11016 self._add_packages()
11017 pkg_queue = self._pkg_queue
11018 failed_pkgs = self._failed_pkgs
11019 portage.locks._quiet = self._background
11020 portage.elog._emerge_elog_listener = self._elog_listener
11026 self._main_loop_cleanup()
11027 portage.locks._quiet = False
11028 portage.elog._emerge_elog_listener = None
11030 rval = failed_pkgs[-1].returncode
11034 def _main_loop_cleanup(self):
11035 del self._pkg_queue[:]
11036 self._completed_tasks.clear()
11037 self._deep_system_deps.clear()
11038 self._unsatisfied_system_deps.clear()
11039 self._choose_pkg_return_early = False
11040 self._status_display.reset()
11041 self._digraph = None
11042 self._task_queues.fetch.clear()
11044 def _choose_pkg(self):
11046 Choose a task that has all it's dependencies satisfied.
11049 if self._choose_pkg_return_early:
11052 if self._digraph is None:
11053 if (self._jobs or self._task_queues.merge) and \
11054 not ("--nodeps" in self.myopts and \
11055 (self._max_jobs is True or self._max_jobs > 1)):
11056 self._choose_pkg_return_early = True
11058 return self._pkg_queue.pop(0)
11060 if not (self._jobs or self._task_queues.merge):
11061 return self._pkg_queue.pop(0)
11063 self._prune_digraph()
11066 later = set(self._pkg_queue)
11067 for pkg in self._pkg_queue:
11069 if not self._dependent_on_scheduled_merges(pkg, later):
11073 if chosen_pkg is not None:
11074 self._pkg_queue.remove(chosen_pkg)
11076 if chosen_pkg is None:
11077 # There's no point in searching for a package to
11078 # choose until at least one of the existing jobs
11080 self._choose_pkg_return_early = True
11084 def _dependent_on_scheduled_merges(self, pkg, later):
11086 Traverse the subgraph of the given packages deep dependencies
11087 to see if it contains any scheduled merges.
11088 @param pkg: a package to check dependencies for
11090 @param later: packages for which dependence should be ignored
11091 since they will be merged later than pkg anyway and therefore
11092 delaying the merge of pkg will not result in a more optimal
11096 @returns: True if the package is dependent, False otherwise.
11099 graph = self._digraph
11100 completed_tasks = self._completed_tasks
11103 traversed_nodes = set([pkg])
11104 direct_deps = graph.child_nodes(pkg)
11105 node_stack = direct_deps
11106 direct_deps = frozenset(direct_deps)
11108 node = node_stack.pop()
11109 if node in traversed_nodes:
11111 traversed_nodes.add(node)
11112 if not ((node.installed and node.operation == "nomerge") or \
11113 (node.operation == "uninstall" and \
11114 node not in direct_deps) or \
11115 node in completed_tasks or \
11119 node_stack.extend(graph.child_nodes(node))
11123 def _allocate_config(self, root):
11125 Allocate a unique config instance for a task in order
11126 to prevent interference between parallel tasks.
11128 if self._config_pool[root]:
11129 temp_settings = self._config_pool[root].pop()
11131 temp_settings = portage.config(clone=self.pkgsettings[root])
11132 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11133 # performance reasons, call it here to make sure all settings from the
11134 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11135 temp_settings.reload()
11136 temp_settings.reset()
11137 return temp_settings
11139 def _deallocate_config(self, settings):
11140 self._config_pool[settings["ROOT"]].append(settings)
11142 def _main_loop(self):
11144 # Only allow 1 job max if a restart is scheduled
11145 # due to portage update.
11146 if self._is_restart_scheduled() or \
11147 self._opts_no_background.intersection(self.myopts):
11148 self._set_max_jobs(1)
11150 merge_queue = self._task_queues.merge
11152 while self._schedule():
11153 if self._poll_event_handlers:
11158 if not (self._jobs or merge_queue):
11160 if self._poll_event_handlers:
11163 def _keep_scheduling(self):
11164 return bool(self._pkg_queue and \
11165 not (self._failed_pkgs and not self._build_opts.fetchonly))
11167 def _schedule_tasks(self):
11169 # When the number of jobs drops to zero, process all waiting merges.
11170 if not self._jobs and self._merge_wait_queue:
11171 for task in self._merge_wait_queue:
11172 task.addExitListener(self._merge_wait_exit_handler)
11173 self._task_queues.merge.add(task)
11174 self._status_display.merges = len(self._task_queues.merge)
11175 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11176 del self._merge_wait_queue[:]
11178 self._schedule_tasks_imp()
11179 self._status_display.display()
11182 for q in self._task_queues.values():
11186 # Cancel prefetchers if they're the only reason
11187 # the main poll loop is still running.
11188 if self._failed_pkgs and not self._build_opts.fetchonly and \
11189 not (self._jobs or self._task_queues.merge) and \
11190 self._task_queues.fetch:
11191 self._task_queues.fetch.clear()
11195 self._schedule_tasks_imp()
11196 self._status_display.display()
11198 return self._keep_scheduling()
11200 def _job_delay(self):
11203 @returns: True if job scheduling should be delayed, False otherwise.
11206 if self._jobs and self._max_load is not None:
11208 current_time = time.time()
11210 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11211 if delay > self._job_delay_max:
11212 delay = self._job_delay_max
11213 if (current_time - self._previous_job_start_time) < delay:
11218 def _schedule_tasks_imp(self):
11221 @returns: True if state changed, False otherwise.
11228 if not self._keep_scheduling():
11229 return bool(state_change)
11231 if self._choose_pkg_return_early or \
11232 self._merge_wait_scheduled or \
11233 (self._jobs and self._unsatisfied_system_deps) or \
11234 not self._can_add_job() or \
11236 return bool(state_change)
11238 pkg = self._choose_pkg()
11240 return bool(state_change)
11244 if not pkg.installed:
11245 self._pkg_count.curval += 1
11247 task = self._task(pkg)
11250 merge = PackageMerge(merge=task)
11251 merge.addExitListener(self._merge_exit)
11252 self._task_queues.merge.add(merge)
11256 self._previous_job_start_time = time.time()
11257 self._status_display.running = self._jobs
11258 task.addExitListener(self._extract_exit)
11259 self._task_queues.jobs.add(task)
11263 self._previous_job_start_time = time.time()
11264 self._status_display.running = self._jobs
11265 task.addExitListener(self._build_exit)
11266 self._task_queues.jobs.add(task)
11268 return bool(state_change)
11270 def _task(self, pkg):
11272 pkg_to_replace = None
11273 if pkg.operation != "uninstall":
11274 vardb = pkg.root_config.trees["vartree"].dbapi
11275 previous_cpv = vardb.match(pkg.slot_atom)
11277 previous_cpv = previous_cpv.pop()
11278 pkg_to_replace = self._pkg(previous_cpv,
11279 "installed", pkg.root_config, installed=True)
11281 task = MergeListItem(args_set=self._args_set,
11282 background=self._background, binpkg_opts=self._binpkg_opts,
11283 build_opts=self._build_opts,
11284 config_pool=self._ConfigPool(pkg.root,
11285 self._allocate_config, self._deallocate_config),
11286 emerge_opts=self.myopts,
11287 find_blockers=self._find_blockers(pkg), logger=self._logger,
11288 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11289 pkg_to_replace=pkg_to_replace,
11290 prefetcher=self._prefetchers.get(pkg),
11291 scheduler=self._sched_iface,
11292 settings=self._allocate_config(pkg.root),
11293 statusMessage=self._status_msg,
11294 world_atom=self._world_atom)
11298 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11299 pkg = failed_pkg.pkg
11300 msg = "%s to %s %s" % \
11301 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11302 if pkg.root != "/":
11303 msg += " %s %s" % (preposition, pkg.root)
11305 log_path = self._locate_failure_log(failed_pkg)
11306 if log_path is not None:
11307 msg += ", Log file:"
11308 self._status_msg(msg)
11310 if log_path is not None:
11311 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11313 def _status_msg(self, msg):
11315 Display a brief status message (no newlines) in the status display.
11316 This is called by tasks to provide feedback to the user. This
11317 delegates the resposibility of generating \r and \n control characters,
11318 to guarantee that lines are created or erased when necessary and
11322 @param msg: a brief status message (no newlines allowed)
11324 if not self._background:
11325 writemsg_level("\n")
11326 self._status_display.displayMessage(msg)
11328 def _save_resume_list(self):
11330 Do this before verifying the ebuild Manifests since it might
11331 be possible for the user to use --resume --skipfirst get past
11332 a non-essential package with a broken digest.
11334 mtimedb = self._mtimedb
11335 mtimedb["resume"]["mergelist"] = [list(x) \
11336 for x in self._mergelist \
11337 if isinstance(x, Package) and x.operation == "merge"]
11341 def _calc_resume_list(self):
11343 Use the current resume list to calculate a new one,
11344 dropping any packages with unsatisfied deps.
11346 @returns: True if successful, False otherwise.
11348 print colorize("GOOD", "*** Resuming merge...")
11350 if self._show_list():
11351 if "--tree" in self.myopts:
11352 portage.writemsg_stdout("\n" + \
11353 darkgreen("These are the packages that " + \
11354 "would be merged, in reverse order:\n\n"))
11357 portage.writemsg_stdout("\n" + \
11358 darkgreen("These are the packages that " + \
11359 "would be merged, in order:\n\n"))
11361 show_spinner = "--quiet" not in self.myopts and \
11362 "--nodeps" not in self.myopts
11365 print "Calculating dependencies ",
11367 myparams = create_depgraph_params(self.myopts, None)
11371 success, mydepgraph, dropped_tasks = resume_depgraph(
11372 self.settings, self.trees, self._mtimedb, self.myopts,
11373 myparams, self._spinner)
11374 except depgraph.UnsatisfiedResumeDep, exc:
11375 # rename variable to avoid python-3.0 error:
11376 # SyntaxError: can not delete variable 'e' referenced in nested
11379 mydepgraph = e.depgraph
11380 dropped_tasks = set()
11383 print "\b\b... done!"
11386 def unsatisfied_resume_dep_msg():
11387 mydepgraph.display_problems()
11388 out = portage.output.EOutput()
11389 out.eerror("One or more packages are either masked or " + \
11390 "have missing dependencies:")
11393 show_parents = set()
11394 for dep in e.value:
11395 if dep.parent in show_parents:
11397 show_parents.add(dep.parent)
11398 if dep.atom is None:
11399 out.eerror(indent + "Masked package:")
11400 out.eerror(2 * indent + str(dep.parent))
11403 out.eerror(indent + str(dep.atom) + " pulled in by:")
11404 out.eerror(2 * indent + str(dep.parent))
11406 msg = "The resume list contains packages " + \
11407 "that are either masked or have " + \
11408 "unsatisfied dependencies. " + \
11409 "Please restart/continue " + \
11410 "the operation manually, or use --skipfirst " + \
11411 "to skip the first package in the list and " + \
11412 "any other packages that may be " + \
11413 "masked or have missing dependencies."
11414 for line in textwrap.wrap(msg, 72):
11416 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11419 if success and self._show_list():
11420 mylist = mydepgraph.altlist()
11422 if "--tree" in self.myopts:
11424 mydepgraph.display(mylist, favorites=self._favorites)
11427 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11429 mydepgraph.display_problems()
11431 mylist = mydepgraph.altlist()
11432 mydepgraph.break_refs(mylist)
11433 mydepgraph.break_refs(dropped_tasks)
11434 self._mergelist = mylist
11435 self._set_digraph(mydepgraph.schedulerGraph())
11438 for task in dropped_tasks:
11439 if not (isinstance(task, Package) and task.operation == "merge"):
11442 msg = "emerge --keep-going:" + \
11444 if pkg.root != "/":
11445 msg += " for %s" % (pkg.root,)
11446 msg += " dropped due to unsatisfied dependency."
11447 for line in textwrap.wrap(msg, msg_width):
11448 eerror(line, phase="other", key=pkg.cpv)
11449 settings = self.pkgsettings[pkg.root]
11450 # Ensure that log collection from $T is disabled inside
11451 # elog_process(), since any logs that might exist are
11453 settings.pop("T", None)
11454 portage.elog.elog_process(pkg.cpv, settings)
11455 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11459 def _show_list(self):
11460 myopts = self.myopts
11461 if "--quiet" not in myopts and \
11462 ("--ask" in myopts or "--tree" in myopts or \
11463 "--verbose" in myopts):
11467 def _world_atom(self, pkg):
11469 Add the package to the world file, but only if
11470 it's supposed to be added. Otherwise, do nothing.
11473 if set(("--buildpkgonly", "--fetchonly",
11475 "--oneshot", "--onlydeps",
11476 "--pretend")).intersection(self.myopts):
11479 if pkg.root != self.target_root:
11482 args_set = self._args_set
11483 if not args_set.findAtomForPackage(pkg):
11486 logger = self._logger
11487 pkg_count = self._pkg_count
11488 root_config = pkg.root_config
11489 world_set = root_config.sets["world"]
11490 world_locked = False
11491 if hasattr(world_set, "lock"):
11493 world_locked = True
11496 if hasattr(world_set, "load"):
11497 world_set.load() # maybe it's changed on disk
11499 atom = create_world_atom(pkg, args_set, root_config)
11501 if hasattr(world_set, "add"):
11502 self._status_msg(('Recording %s in "world" ' + \
11503 'favorites file...') % atom)
11504 logger.log(" === (%s of %s) Updating world file (%s)" % \
11505 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11506 world_set.add(atom)
11508 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11509 (atom,), level=logging.WARN, noiselevel=-1)
11514 def _pkg(self, cpv, type_name, root_config, installed=False):
11516 Get a package instance from the cache, or create a new
11517 one if necessary. Raises KeyError from aux_get if it
11518 failures for some reason (package does not exist or is
11521 operation = "merge"
11523 operation = "nomerge"
11525 if self._digraph is not None:
11526 # Reuse existing instance when available.
11527 pkg = self._digraph.get(
11528 (type_name, root_config.root, cpv, operation))
11529 if pkg is not None:
11532 tree_type = depgraph.pkg_tree_map[type_name]
11533 db = root_config.trees[tree_type].dbapi
11534 db_keys = list(self.trees[root_config.root][
11535 tree_type].dbapi._aux_cache_keys)
11536 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11537 pkg = Package(cpv=cpv, metadata=metadata,
11538 root_config=root_config, installed=installed)
11539 if type_name == "ebuild":
11540 settings = self.pkgsettings[root_config.root]
11541 settings.setcpv(pkg)
11542 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11543 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11547 class MetadataRegen(PollScheduler):
11549 def __init__(self, portdb, max_jobs=None, max_load=None):
11550 PollScheduler.__init__(self)
11551 self._portdb = portdb
11553 if max_jobs is None:
11556 self._max_jobs = max_jobs
11557 self._max_load = max_load
11558 self._sched_iface = self._sched_iface_class(
11559 register=self._register,
11560 schedule=self._schedule_wait,
11561 unregister=self._unregister)
11563 self._valid_pkgs = set()
11564 self._process_iter = self._iter_metadata_processes()
11565 self.returncode = os.EX_OK
11566 self._error_count = 0
11568 def _iter_metadata_processes(self):
11569 portdb = self._portdb
11570 valid_pkgs = self._valid_pkgs
11571 every_cp = portdb.cp_all()
11572 every_cp.sort(reverse=True)
11575 cp = every_cp.pop()
11576 portage.writemsg_stdout("Processing %s\n" % cp)
11577 cpv_list = portdb.cp_list(cp)
11578 for cpv in cpv_list:
11579 valid_pkgs.add(cpv)
11580 ebuild_path, repo_path = portdb.findname2(cpv)
11581 metadata_process = portdb._metadata_process(
11582 cpv, ebuild_path, repo_path)
11583 if metadata_process is None:
11585 yield metadata_process
11589 portdb = self._portdb
11590 from portage.cache.cache_errors import CacheError
11593 for mytree in portdb.porttrees:
11595 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11596 except CacheError, e:
11597 portage.writemsg("Error listing cache entries for " + \
11598 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11603 while self._schedule():
11610 for y in self._valid_pkgs:
11611 for mytree in portdb.porttrees:
11612 if portdb.findname2(y, mytree=mytree)[0]:
11613 dead_nodes[mytree].discard(y)
11615 for mytree, nodes in dead_nodes.iteritems():
11616 auxdb = portdb.auxdb[mytree]
11620 except (KeyError, CacheError):
11623 def _schedule_tasks(self):
11626 @returns: True if there may be remaining tasks to schedule,
11629 while self._can_add_job():
11631 metadata_process = self._process_iter.next()
11632 except StopIteration:
11636 metadata_process.scheduler = self._sched_iface
11637 metadata_process.addExitListener(self._metadata_exit)
11638 metadata_process.start()
11641 def _metadata_exit(self, metadata_process):
11643 if metadata_process.returncode != os.EX_OK:
11644 self.returncode = 1
11645 self._error_count += 1
11646 self._valid_pkgs.discard(metadata_process.cpv)
11647 portage.writemsg("Error processing %s, continuing...\n" % \
11648 (metadata_process.cpv,))
11651 class UninstallFailure(portage.exception.PortageException):
11653 An instance of this class is raised by unmerge() when
11654 an uninstallation fails.
11657 def __init__(self, *pargs):
11658 portage.exception.PortageException.__init__(self, pargs)
11660 self.status = pargs[0]
11662 def unmerge(root_config, myopts, unmerge_action,
11663 unmerge_files, ldpath_mtimes, autoclean=0,
11664 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11665 scheduler=None, writemsg_level=portage.util.writemsg_level):
11667 quiet = "--quiet" in myopts
11668 settings = root_config.settings
11669 sets = root_config.sets
11670 vartree = root_config.trees["vartree"]
11671 candidate_catpkgs=[]
11673 xterm_titles = "notitles" not in settings.features
11674 out = portage.output.EOutput()
11676 db_keys = list(vartree.dbapi._aux_cache_keys)
11679 pkg = pkg_cache.get(cpv)
11681 pkg = Package(cpv=cpv, installed=True,
11682 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11683 root_config=root_config,
11684 type_name="installed")
11685 pkg_cache[cpv] = pkg
11688 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11690 # At least the parent needs to exist for the lock file.
11691 portage.util.ensure_dirs(vdb_path)
11692 except portage.exception.PortageException:
11696 if os.access(vdb_path, os.W_OK):
11697 vdb_lock = portage.locks.lockdir(vdb_path)
11698 realsyslist = sets["system"].getAtoms()
11700 for x in realsyslist:
11701 mycp = portage.dep_getkey(x)
11702 if mycp in settings.getvirtuals():
11704 for provider in settings.getvirtuals()[mycp]:
11705 if vartree.dbapi.match(provider):
11706 providers.append(provider)
11707 if len(providers) == 1:
11708 syslist.extend(providers)
11710 syslist.append(mycp)
11712 mysettings = portage.config(clone=settings)
11714 if not unmerge_files:
11715 if unmerge_action == "unmerge":
11717 print bold("emerge unmerge") + " can only be used with specific package names"
11723 localtree = vartree
11724 # process all arguments and add all
11725 # valid db entries to candidate_catpkgs
11727 if not unmerge_files:
11728 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11730 #we've got command-line arguments
11731 if not unmerge_files:
11732 print "\nNo packages to unmerge have been provided.\n"
11734 for x in unmerge_files:
11735 arg_parts = x.split('/')
11736 if x[0] not in [".","/"] and \
11737 arg_parts[-1][-7:] != ".ebuild":
11738 #possible cat/pkg or dep; treat as such
11739 candidate_catpkgs.append(x)
11740 elif unmerge_action in ["prune","clean"]:
11741 print "\n!!! Prune and clean do not accept individual" + \
11742 " ebuilds as arguments;\n skipping.\n"
11745 # it appears that the user is specifying an installed
11746 # ebuild and we're in "unmerge" mode, so it's ok.
11747 if not os.path.exists(x):
11748 print "\n!!! The path '"+x+"' doesn't exist.\n"
11751 absx = os.path.abspath(x)
11752 sp_absx = absx.split("/")
11753 if sp_absx[-1][-7:] == ".ebuild":
11755 absx = "/".join(sp_absx)
11757 sp_absx_len = len(sp_absx)
11759 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11760 vdb_len = len(vdb_path)
11762 sp_vdb = vdb_path.split("/")
11763 sp_vdb_len = len(sp_vdb)
11765 if not os.path.exists(absx+"/CONTENTS"):
11766 print "!!! Not a valid db dir: "+str(absx)
11769 if sp_absx_len <= sp_vdb_len:
11770 # The Path is shorter... so it can't be inside the vdb.
11773 print "\n!!!",x,"cannot be inside "+ \
11774 vdb_path+"; aborting.\n"
11777 for idx in range(0,sp_vdb_len):
11778 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11781 print "\n!!!", x, "is not inside "+\
11782 vdb_path+"; aborting.\n"
11785 print "="+"/".join(sp_absx[sp_vdb_len:])
11786 candidate_catpkgs.append(
11787 "="+"/".join(sp_absx[sp_vdb_len:]))
11790 if (not "--quiet" in myopts):
11792 if settings["ROOT"] != "/":
11793 writemsg_level(darkgreen(newline+ \
11794 ">>> Using system located in ROOT tree %s\n" % \
11797 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11798 not ("--quiet" in myopts):
11799 writemsg_level(darkgreen(newline+\
11800 ">>> These are the packages that would be unmerged:\n"))
11802 # Preservation of order is required for --depclean and --prune so
11803 # that dependencies are respected. Use all_selected to eliminate
11804 # duplicate packages since the same package may be selected by
11807 all_selected = set()
11808 for x in candidate_catpkgs:
11809 # cycle through all our candidate deps and determine
11810 # what will and will not get unmerged
11812 mymatch = vartree.dbapi.match(x)
11813 except portage.exception.AmbiguousPackageName, errpkgs:
11814 print "\n\n!!! The short ebuild name \"" + \
11815 x + "\" is ambiguous. Please specify"
11816 print "!!! one of the following fully-qualified " + \
11817 "ebuild names instead:\n"
11818 for i in errpkgs[0]:
11819 print " " + green(i)
11823 if not mymatch and x[0] not in "<>=~":
11824 mymatch = localtree.dep_match(x)
11826 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11827 (x, unmerge_action), noiselevel=-1)
11831 {"protected": set(), "selected": set(), "omitted": set()})
11832 mykey = len(pkgmap) - 1
11833 if unmerge_action=="unmerge":
11835 if y not in all_selected:
11836 pkgmap[mykey]["selected"].add(y)
11837 all_selected.add(y)
11838 elif unmerge_action == "prune":
11839 if len(mymatch) == 1:
11841 best_version = mymatch[0]
11842 best_slot = vartree.getslot(best_version)
11843 best_counter = vartree.dbapi.cpv_counter(best_version)
11844 for mypkg in mymatch[1:]:
11845 myslot = vartree.getslot(mypkg)
11846 mycounter = vartree.dbapi.cpv_counter(mypkg)
11847 if (myslot == best_slot and mycounter > best_counter) or \
11848 mypkg == portage.best([mypkg, best_version]):
11849 if myslot == best_slot:
11850 if mycounter < best_counter:
11851 # On slot collision, keep the one with the
11852 # highest counter since it is the most
11853 # recently installed.
11855 best_version = mypkg
11857 best_counter = mycounter
11858 pkgmap[mykey]["protected"].add(best_version)
11859 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11860 if mypkg != best_version and mypkg not in all_selected)
11861 all_selected.update(pkgmap[mykey]["selected"])
11863 # unmerge_action == "clean"
11865 for mypkg in mymatch:
11866 if unmerge_action == "clean":
11867 myslot = localtree.getslot(mypkg)
11869 # since we're pruning, we don't care about slots
11870 # and put all the pkgs in together
11872 if myslot not in slotmap:
11873 slotmap[myslot] = {}
11874 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11876 for mypkg in vartree.dbapi.cp_list(
11877 portage.dep_getkey(mymatch[0])):
11878 myslot = vartree.getslot(mypkg)
11879 if myslot not in slotmap:
11880 slotmap[myslot] = {}
11881 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11883 for myslot in slotmap:
11884 counterkeys = slotmap[myslot].keys()
11885 if not counterkeys:
11888 pkgmap[mykey]["protected"].add(
11889 slotmap[myslot][counterkeys[-1]])
11890 del counterkeys[-1]
11892 for counter in counterkeys[:]:
11893 mypkg = slotmap[myslot][counter]
11894 if mypkg not in mymatch:
11895 counterkeys.remove(counter)
11896 pkgmap[mykey]["protected"].add(
11897 slotmap[myslot][counter])
11899 #be pretty and get them in order of merge:
11900 for ckey in counterkeys:
11901 mypkg = slotmap[myslot][ckey]
11902 if mypkg not in all_selected:
11903 pkgmap[mykey]["selected"].add(mypkg)
11904 all_selected.add(mypkg)
11905 # ok, now the last-merged package
11906 # is protected, and the rest are selected
11907 numselected = len(all_selected)
11908 if global_unmerge and not numselected:
11909 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11912 if not numselected:
11913 portage.writemsg_stdout(
11914 "\n>>> No packages selected for removal by " + \
11915 unmerge_action + "\n")
11919 vartree.dbapi.flush_cache()
11920 portage.locks.unlockdir(vdb_lock)
11922 from portage.sets.base import EditablePackageSet
11924 # generate a list of package sets that are directly or indirectly listed in "world",
11925 # as there is no persistent list of "installed" sets
11926 installed_sets = ["world"]
11931 pos = len(installed_sets)
11932 for s in installed_sets[pos - 1:]:
11935 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11938 installed_sets += candidates
11939 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11942 # we don't want to unmerge packages that are still listed in user-editable package sets
11943 # listed in "world" as they would be remerged on the next update of "world" or the
11944 # relevant package sets.
11945 unknown_sets = set()
11946 for cp in xrange(len(pkgmap)):
11947 for cpv in pkgmap[cp]["selected"].copy():
11951 # It could have been uninstalled
11952 # by a concurrent process.
11955 if unmerge_action != "clean" and \
11956 root_config.root == "/" and \
11957 portage.match_from_list(
11958 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11959 msg = ("Not unmerging package %s since there is no valid " + \
11960 "reason for portage to unmerge itself.") % (pkg.cpv,)
11961 for line in textwrap.wrap(msg, 75):
11963 # adjust pkgmap so the display output is correct
11964 pkgmap[cp]["selected"].remove(cpv)
11965 all_selected.remove(cpv)
11966 pkgmap[cp]["protected"].add(cpv)
11970 for s in installed_sets:
11971 # skip sets that the user requested to unmerge, and skip world
11972 # unless we're unmerging a package set (as the package would be
11973 # removed from "world" later on)
11974 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11978 if s in unknown_sets:
11980 unknown_sets.add(s)
11981 out = portage.output.EOutput()
11982 out.eerror(("Unknown set '@%s' in " + \
11983 "%svar/lib/portage/world_sets") % \
11984 (s, root_config.root))
11987 # only check instances of EditablePackageSet as other classes are generally used for
11988 # special purposes and can be ignored here (and are usually generated dynamically, so the
11989 # user can't do much about them anyway)
11990 if isinstance(sets[s], EditablePackageSet):
11992 # This is derived from a snippet of code in the
11993 # depgraph._iter_atoms_for_pkg() method.
11994 for atom in sets[s].iterAtomsForPackage(pkg):
11995 inst_matches = vartree.dbapi.match(atom)
11996 inst_matches.reverse() # descending order
11998 for inst_cpv in inst_matches:
12000 inst_pkg = _pkg(inst_cpv)
12002 # It could have been uninstalled
12003 # by a concurrent process.
12006 if inst_pkg.cp != atom.cp:
12008 if pkg >= inst_pkg:
12009 # This is descending order, and we're not
12010 # interested in any versions <= pkg given.
12012 if pkg.slot_atom != inst_pkg.slot_atom:
12013 higher_slot = inst_pkg
12015 if higher_slot is None:
12019 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12020 #print colorize("WARN", "but still listed in the following package sets:")
12021 #print " %s\n" % ", ".join(parents)
12022 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12023 print colorize("WARN", "still referenced by the following package sets:")
12024 print " %s\n" % ", ".join(parents)
12025 # adjust pkgmap so the display output is correct
12026 pkgmap[cp]["selected"].remove(cpv)
12027 all_selected.remove(cpv)
12028 pkgmap[cp]["protected"].add(cpv)
12032 numselected = len(all_selected)
12033 if not numselected:
12035 "\n>>> No packages selected for removal by " + \
12036 unmerge_action + "\n")
12039 # Unmerge order only matters in some cases
12043 selected = d["selected"]
12046 cp = portage.cpv_getkey(iter(selected).next())
12047 cp_dict = unordered.get(cp)
12048 if cp_dict is None:
12050 unordered[cp] = cp_dict
12053 for k, v in d.iteritems():
12054 cp_dict[k].update(v)
12055 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12057 for x in xrange(len(pkgmap)):
12058 selected = pkgmap[x]["selected"]
12061 for mytype, mylist in pkgmap[x].iteritems():
12062 if mytype == "selected":
12064 mylist.difference_update(all_selected)
12065 cp = portage.cpv_getkey(iter(selected).next())
12066 for y in localtree.dep_match(cp):
12067 if y not in pkgmap[x]["omitted"] and \
12068 y not in pkgmap[x]["selected"] and \
12069 y not in pkgmap[x]["protected"] and \
12070 y not in all_selected:
12071 pkgmap[x]["omitted"].add(y)
12072 if global_unmerge and not pkgmap[x]["selected"]:
12073 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12075 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12076 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12077 "'%s' is part of your system profile.\n" % cp),
12078 level=logging.WARNING, noiselevel=-1)
12079 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12080 "be damaging to your system.\n\n"),
12081 level=logging.WARNING, noiselevel=-1)
12082 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12083 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12084 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12086 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12088 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12089 for mytype in ["selected","protected","omitted"]:
12091 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12092 if pkgmap[x][mytype]:
12093 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12094 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12095 for pn, ver, rev in sorted_pkgs:
12099 myversion = ver + "-" + rev
12100 if mytype == "selected":
12102 colorize("UNMERGE_WARN", myversion + " "),
12106 colorize("GOOD", myversion + " "), noiselevel=-1)
12108 writemsg_level("none ", noiselevel=-1)
12110 writemsg_level("\n", noiselevel=-1)
12112 writemsg_level("\n", noiselevel=-1)
12114 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12115 " packages are slated for removal.\n")
12116 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12117 " and " + colorize("GOOD", "'omitted'") + \
12118 " packages will not be removed.\n\n")
12120 if "--pretend" in myopts:
12121 #we're done... return
12123 if "--ask" in myopts:
12124 if userquery("Would you like to unmerge these packages?")=="No":
12125 # enter pretend mode for correct formatting of results
12126 myopts["--pretend"] = True
12131 #the real unmerging begins, after a short delay....
12132 if clean_delay and not autoclean:
12133 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12135 for x in xrange(len(pkgmap)):
12136 for y in pkgmap[x]["selected"]:
12137 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12138 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12139 mysplit = y.split("/")
12141 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12142 mysettings, unmerge_action not in ["clean","prune"],
12143 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12144 scheduler=scheduler)
12146 if retval != os.EX_OK:
12147 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12149 raise UninstallFailure(retval)
12152 if clean_world and hasattr(sets["world"], "cleanPackage"):
12153 sets["world"].cleanPackage(vartree.dbapi, y)
12154 emergelog(xterm_titles, " >>> unmerge success: "+y)
12155 if clean_world and hasattr(sets["world"], "remove"):
12156 for s in root_config.setconfig.active:
12157 sets["world"].remove(SETPREFIX+s)
12160 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12162 if os.path.exists("/usr/bin/install-info"):
12163 out = portage.output.EOutput()
12168 inforoot=normpath(root+z)
12169 if os.path.isdir(inforoot):
12170 infomtime = long(os.stat(inforoot).st_mtime)
12171 if inforoot not in prev_mtimes or \
12172 prev_mtimes[inforoot] != infomtime:
12173 regen_infodirs.append(inforoot)
12175 if not regen_infodirs:
12176 portage.writemsg_stdout("\n")
12177 out.einfo("GNU info directory index is up-to-date.")
12179 portage.writemsg_stdout("\n")
12180 out.einfo("Regenerating GNU info directory index...")
12182 dir_extensions = ("", ".gz", ".bz2")
12186 for inforoot in regen_infodirs:
12190 if not os.path.isdir(inforoot) or \
12191 not os.access(inforoot, os.W_OK):
12194 file_list = os.listdir(inforoot)
12196 dir_file = os.path.join(inforoot, "dir")
12197 moved_old_dir = False
12198 processed_count = 0
12199 for x in file_list:
12200 if x.startswith(".") or \
12201 os.path.isdir(os.path.join(inforoot, x)):
12203 if x.startswith("dir"):
12205 for ext in dir_extensions:
12206 if x == "dir" + ext or \
12207 x == "dir" + ext + ".old":
12212 if processed_count == 0:
12213 for ext in dir_extensions:
12215 os.rename(dir_file + ext, dir_file + ext + ".old")
12216 moved_old_dir = True
12217 except EnvironmentError, e:
12218 if e.errno != errno.ENOENT:
12221 processed_count += 1
12222 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12223 existsstr="already exists, for file `"
12225 if re.search(existsstr,myso):
12226 # Already exists... Don't increment the count for this.
12228 elif myso[:44]=="install-info: warning: no info dir entry in ":
12229 # This info file doesn't contain a DIR-header: install-info produces this
12230 # (harmless) warning (the --quiet switch doesn't seem to work).
12231 # Don't increment the count for this.
12234 badcount=badcount+1
12235 errmsg += myso + "\n"
12238 if moved_old_dir and not os.path.exists(dir_file):
12239 # We didn't generate a new dir file, so put the old file
12240 # back where it was originally found.
12241 for ext in dir_extensions:
12243 os.rename(dir_file + ext + ".old", dir_file + ext)
12244 except EnvironmentError, e:
12245 if e.errno != errno.ENOENT:
12249 # Clean dir.old cruft so that they don't prevent
12250 # unmerge of otherwise empty directories.
12251 for ext in dir_extensions:
12253 os.unlink(dir_file + ext + ".old")
12254 except EnvironmentError, e:
12255 if e.errno != errno.ENOENT:
12259 #update mtime so we can potentially avoid regenerating.
12260 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12263 out.eerror("Processed %d info files; %d errors." % \
12264 (icount, badcount))
12265 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12268 out.einfo("Processed %d info files." % (icount,))
12271 def display_news_notification(root_config, myopts):
12272 target_root = root_config.root
12273 trees = root_config.trees
12274 settings = trees["vartree"].settings
12275 portdb = trees["porttree"].dbapi
12276 vardb = trees["vartree"].dbapi
12277 NEWS_PATH = os.path.join("metadata", "news")
12278 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12279 newsReaderDisplay = False
12280 update = "--pretend" not in myopts
12282 for repo in portdb.getRepositories():
12283 unreadItems = checkUpdatedNewsItems(
12284 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12286 if not newsReaderDisplay:
12287 newsReaderDisplay = True
12289 print colorize("WARN", " * IMPORTANT:"),
12290 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12293 if newsReaderDisplay:
12294 print colorize("WARN", " *"),
12295 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12298 def display_preserved_libs(vardbapi):
12301 # Ensure the registry is consistent with existing files.
12302 vardbapi.plib_registry.pruneNonExisting()
12304 if vardbapi.plib_registry.hasEntries():
12306 print colorize("WARN", "!!!") + " existing preserved libs:"
12307 plibdata = vardbapi.plib_registry.getPreservedLibs()
12308 linkmap = vardbapi.linkmap
12311 linkmap_broken = False
12315 except portage.exception.CommandNotFound, e:
12316 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12317 level=logging.ERROR, noiselevel=-1)
12319 linkmap_broken = True
12321 search_for_owners = set()
12322 for cpv in plibdata:
12323 internal_plib_keys = set(linkmap._obj_key(f) \
12324 for f in plibdata[cpv])
12325 for f in plibdata[cpv]:
12326 if f in consumer_map:
12329 for c in linkmap.findConsumers(f):
12330 # Filter out any consumers that are also preserved libs
12331 # belonging to the same package as the provider.
12332 if linkmap._obj_key(c) not in internal_plib_keys:
12333 consumers.append(c)
12335 consumer_map[f] = consumers
12336 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12338 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12340 for cpv in plibdata:
12341 print colorize("WARN", ">>>") + " package: %s" % cpv
12343 for f in plibdata[cpv]:
12344 obj_key = linkmap._obj_key(f)
12345 alt_paths = samefile_map.get(obj_key)
12346 if alt_paths is None:
12348 samefile_map[obj_key] = alt_paths
12351 for alt_paths in samefile_map.itervalues():
12352 alt_paths = sorted(alt_paths)
12353 for p in alt_paths:
12354 print colorize("WARN", " * ") + " - %s" % (p,)
12356 consumers = consumer_map.get(f, [])
12357 for c in consumers[:MAX_DISPLAY]:
12358 print colorize("WARN", " * ") + " used by %s (%s)" % \
12359 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12360 if len(consumers) == MAX_DISPLAY + 1:
12361 print colorize("WARN", " * ") + " used by %s (%s)" % \
12362 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12363 for x in owners.get(consumers[MAX_DISPLAY], [])))
12364 elif len(consumers) > MAX_DISPLAY:
12365 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12366 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12369 def _flush_elog_mod_echo():
12371 Dump the mod_echo output now so that our other
12372 notifications are shown last.
12374 @returns: True if messages were shown, False otherwise.
12376 messages_shown = False
12378 from portage.elog import mod_echo
12379 except ImportError:
12380 pass # happens during downgrade to a version without the module
12382 messages_shown = bool(mod_echo._items)
12383 mod_echo.finalize()
12384 return messages_shown
12386 def post_emerge(root_config, myopts, mtimedb, retval):
12388 Misc. things to run at the end of a merge session.
12391 Update Config Files
12394 Display preserved libs warnings
12397 @param trees: A dictionary mapping each ROOT to it's package databases
12399 @param mtimedb: The mtimeDB to store data needed across merge invocations
12400 @type mtimedb: MtimeDB class instance
12401 @param retval: Emerge's return value
12405 1. Calls sys.exit(retval)
12408 target_root = root_config.root
12409 trees = { target_root : root_config.trees }
12410 vardbapi = trees[target_root]["vartree"].dbapi
12411 settings = vardbapi.settings
12412 info_mtimes = mtimedb["info"]
12414 # Load the most current variables from ${ROOT}/etc/profile.env
12417 settings.regenerate()
12420 config_protect = settings.get("CONFIG_PROTECT","").split()
12421 infodirs = settings.get("INFOPATH","").split(":") + \
12422 settings.get("INFODIR","").split(":")
12426 if retval == os.EX_OK:
12427 exit_msg = " *** exiting successfully."
12429 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12430 emergelog("notitles" not in settings.features, exit_msg)
12432 _flush_elog_mod_echo()
12434 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12435 if "--pretend" in myopts or (counter_hash is not None and \
12436 counter_hash == vardbapi._counter_hash()):
12437 display_news_notification(root_config, myopts)
12438 # If vdb state has not changed then there's nothing else to do.
12441 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12442 portage.util.ensure_dirs(vdb_path)
12444 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12445 vdb_lock = portage.locks.lockdir(vdb_path)
12449 if "noinfo" not in settings.features:
12450 chk_updated_info_files(target_root,
12451 infodirs, info_mtimes, retval)
12455 portage.locks.unlockdir(vdb_lock)
12457 chk_updated_cfg_files(target_root, config_protect)
12459 display_news_notification(root_config, myopts)
12460 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12461 display_preserved_libs(vardbapi)
12466 def chk_updated_cfg_files(target_root, config_protect):
12468 #number of directories with some protect files in them
12470 for x in config_protect:
12471 x = os.path.join(target_root, x.lstrip(os.path.sep))
12472 if not os.access(x, os.W_OK):
12473 # Avoid Permission denied errors generated
12477 mymode = os.lstat(x).st_mode
12480 if stat.S_ISLNK(mymode):
12481 # We want to treat it like a directory if it
12482 # is a symlink to an existing directory.
12484 real_mode = os.stat(x).st_mode
12485 if stat.S_ISDIR(real_mode):
12489 if stat.S_ISDIR(mymode):
12490 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12492 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12493 os.path.split(x.rstrip(os.path.sep))
12494 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12495 a = commands.getstatusoutput(mycommand)
12497 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12499 # Show the error message alone, sending stdout to /dev/null.
12500 os.system(mycommand + " 1>/dev/null")
12502 files = a[1].split('\0')
12503 # split always produces an empty string as the last element
12504 if files and not files[-1]:
12508 print "\n"+colorize("WARN", " * IMPORTANT:"),
12509 if stat.S_ISDIR(mymode):
12510 print "%d config files in '%s' need updating." % \
12513 print "config file '%s' needs updating." % x
12516 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12517 " section of the " + bold("emerge")
12518 print " "+yellow("*")+" man page to learn how to update config files."
12520 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12523 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12524 Returns the number of unread (yet relevent) items.
12526 @param portdb: a portage tree database
12527 @type portdb: pordbapi
12528 @param vardb: an installed package database
12529 @type vardb: vardbapi
12532 @param UNREAD_PATH:
12538 1. The number of unread but relevant news items.
12541 from portage.news import NewsManager
12542 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12543 return manager.getUnreadItems( repo_id, update=update )
12545 def insert_category_into_atom(atom, category):
12546 alphanum = re.search(r'\w', atom)
12548 ret = atom[:alphanum.start()] + "%s/" % category + \
12549 atom[alphanum.start():]
12554 def is_valid_package_atom(x):
12556 alphanum = re.search(r'\w', x)
12558 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12559 return portage.isvalidatom(x)
12561 def show_blocker_docs_link():
12563 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12564 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12566 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12569 def show_mask_docs():
12570 print "For more information, see the MASKED PACKAGES section in the emerge"
12571 print "man page or refer to the Gentoo Handbook."
12573 def action_sync(settings, trees, mtimedb, myopts, myaction):
12574 xterm_titles = "notitles" not in settings.features
12575 emergelog(xterm_titles, " === sync")
12576 myportdir = settings.get("PORTDIR", None)
12577 out = portage.output.EOutput()
12579 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12581 if myportdir[-1]=="/":
12582 myportdir=myportdir[:-1]
12584 st = os.stat(myportdir)
12588 print ">>>",myportdir,"not found, creating it."
12589 os.makedirs(myportdir,0755)
12590 st = os.stat(myportdir)
12593 spawn_kwargs["env"] = settings.environ()
12594 if 'usersync' in settings.features and \
12595 portage.data.secpass >= 2 and \
12596 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12597 st.st_gid != os.getgid() and st.st_mode & 0070):
12599 homedir = pwd.getpwuid(st.st_uid).pw_dir
12603 # Drop privileges when syncing, in order to match
12604 # existing uid/gid settings.
12605 spawn_kwargs["uid"] = st.st_uid
12606 spawn_kwargs["gid"] = st.st_gid
12607 spawn_kwargs["groups"] = [st.st_gid]
12608 spawn_kwargs["env"]["HOME"] = homedir
12610 if not st.st_mode & 0020:
12611 umask = umask | 0020
12612 spawn_kwargs["umask"] = umask
12614 syncuri = settings.get("SYNC", "").strip()
12616 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12617 noiselevel=-1, level=logging.ERROR)
12620 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12621 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12624 dosyncuri = syncuri
12625 updatecache_flg = False
12626 if myaction == "metadata":
12627 print "skipping sync"
12628 updatecache_flg = True
12629 elif ".git" in vcs_dirs:
12630 # Update existing git repository, and ignore the syncuri. We are
12631 # going to trust the user and assume that the user is in the branch
12632 # that he/she wants updated. We'll let the user manage branches with
12634 if portage.process.find_binary("git") is None:
12635 msg = ["Command not found: git",
12636 "Type \"emerge dev-util/git\" to enable git support."]
12638 writemsg_level("!!! %s\n" % l,
12639 level=logging.ERROR, noiselevel=-1)
12641 msg = ">>> Starting git pull in %s..." % myportdir
12642 emergelog(xterm_titles, msg )
12643 writemsg_level(msg + "\n")
12644 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12645 (portage._shell_quote(myportdir),), **spawn_kwargs)
12646 if exitcode != os.EX_OK:
12647 msg = "!!! git pull error in %s." % myportdir
12648 emergelog(xterm_titles, msg)
12649 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12651 msg = ">>> Git pull in %s successful" % myportdir
12652 emergelog(xterm_titles, msg)
12653 writemsg_level(msg + "\n")
12654 exitcode = git_sync_timestamps(settings, myportdir)
12655 if exitcode == os.EX_OK:
12656 updatecache_flg = True
12657 elif syncuri[:8]=="rsync://":
12658 for vcs_dir in vcs_dirs:
12659 writemsg_level(("!!! %s appears to be under revision " + \
12660 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12661 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12663 if not os.path.exists("/usr/bin/rsync"):
12664 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12665 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12670 if settings["PORTAGE_RSYNC_OPTS"] == "":
12671 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12672 rsync_opts.extend([
12673 "--recursive", # Recurse directories
12674 "--links", # Consider symlinks
12675 "--safe-links", # Ignore links outside of tree
12676 "--perms", # Preserve permissions
12677 "--times", # Preserive mod times
12678 "--compress", # Compress the data transmitted
12679 "--force", # Force deletion on non-empty dirs
12680 "--whole-file", # Don't do block transfers, only entire files
12681 "--delete", # Delete files that aren't in the master tree
12682 "--stats", # Show final statistics about what was transfered
12683 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12684 "--exclude=/distfiles", # Exclude distfiles from consideration
12685 "--exclude=/local", # Exclude local from consideration
12686 "--exclude=/packages", # Exclude packages from consideration
12690 # The below validation is not needed when using the above hardcoded
12693 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12695 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12696 for opt in ("--recursive", "--times"):
12697 if opt not in rsync_opts:
12698 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12699 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12700 rsync_opts.append(opt)
12702 for exclude in ("distfiles", "local", "packages"):
12703 opt = "--exclude=/%s" % exclude
12704 if opt not in rsync_opts:
12705 portage.writemsg(yellow("WARNING:") + \
12706 " adding required option %s not included in " % opt + \
12707 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12708 rsync_opts.append(opt)
12710 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12711 def rsync_opt_startswith(opt_prefix):
12712 for x in rsync_opts:
12713 if x.startswith(opt_prefix):
12717 if not rsync_opt_startswith("--timeout="):
12718 rsync_opts.append("--timeout=%d" % mytimeout)
12720 for opt in ("--compress", "--whole-file"):
12721 if opt not in rsync_opts:
12722 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12723 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12724 rsync_opts.append(opt)
12726 if "--quiet" in myopts:
12727 rsync_opts.append("--quiet") # Shut up a lot
12729 rsync_opts.append("--verbose") # Print filelist
12731 if "--verbose" in myopts:
12732 rsync_opts.append("--progress") # Progress meter for each file
12734 if "--debug" in myopts:
12735 rsync_opts.append("--checksum") # Force checksum on all files
12737 # Real local timestamp file.
12738 servertimestampfile = os.path.join(
12739 myportdir, "metadata", "timestamp.chk")
12741 content = portage.util.grabfile(servertimestampfile)
12745 mytimestamp = time.mktime(time.strptime(content[0],
12746 "%a, %d %b %Y %H:%M:%S +0000"))
12747 except (OverflowError, ValueError):
12752 rsync_initial_timeout = \
12753 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12755 rsync_initial_timeout = 15
12758 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12759 except SystemExit, e:
12760 raise # Needed else can't exit
12762 maxretries=3 #default number of retries
12765 user_name, hostname, port = re.split(
12766 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12769 if user_name is None:
12771 updatecache_flg=True
12772 all_rsync_opts = set(rsync_opts)
12773 extra_rsync_opts = shlex.split(
12774 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12775 all_rsync_opts.update(extra_rsync_opts)
12776 family = socket.AF_INET
12777 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12778 family = socket.AF_INET
12779 elif socket.has_ipv6 and \
12780 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12781 family = socket.AF_INET6
12783 SERVER_OUT_OF_DATE = -1
12784 EXCEEDED_MAX_RETRIES = -2
12790 for addrinfo in socket.getaddrinfo(
12791 hostname, None, family, socket.SOCK_STREAM):
12792 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12793 # IPv6 addresses need to be enclosed in square brackets
12794 ips.append("[%s]" % addrinfo[4][0])
12796 ips.append(addrinfo[4][0])
12797 from random import shuffle
12799 except SystemExit, e:
12800 raise # Needed else can't exit
12801 except Exception, e:
12802 print "Notice:",str(e)
12807 dosyncuri = syncuri.replace(
12808 "//" + user_name + hostname + port + "/",
12809 "//" + user_name + ips[0] + port + "/", 1)
12810 except SystemExit, e:
12811 raise # Needed else can't exit
12812 except Exception, e:
12813 print "Notice:",str(e)
12817 if "--ask" in myopts:
12818 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12823 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12824 if "--quiet" not in myopts:
12825 print ">>> Starting rsync with "+dosyncuri+"..."
12827 emergelog(xterm_titles,
12828 ">>> Starting retry %d of %d with %s" % \
12829 (retries,maxretries,dosyncuri))
12830 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12832 if mytimestamp != 0 and "--quiet" not in myopts:
12833 print ">>> Checking server timestamp ..."
12835 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12837 if "--debug" in myopts:
12840 exitcode = os.EX_OK
12841 servertimestamp = 0
12842 # Even if there's no timestamp available locally, fetch the
12843 # timestamp anyway as an initial probe to verify that the server is
12844 # responsive. This protects us from hanging indefinitely on a
12845 # connection attempt to an unresponsive server which rsync's
12846 # --timeout option does not prevent.
12848 # Temporary file for remote server timestamp comparison.
12849 from tempfile import mkstemp
12850 fd, tmpservertimestampfile = mkstemp()
12852 mycommand = rsynccommand[:]
12853 mycommand.append(dosyncuri.rstrip("/") + \
12854 "/metadata/timestamp.chk")
12855 mycommand.append(tmpservertimestampfile)
12859 def timeout_handler(signum, frame):
12860 raise portage.exception.PortageException("timed out")
12861 signal.signal(signal.SIGALRM, timeout_handler)
12862 # Timeout here in case the server is unresponsive. The
12863 # --timeout rsync option doesn't apply to the initial
12864 # connection attempt.
12865 if rsync_initial_timeout:
12866 signal.alarm(rsync_initial_timeout)
12868 mypids.extend(portage.process.spawn(
12869 mycommand, env=settings.environ(), returnpid=True))
12870 exitcode = os.waitpid(mypids[0], 0)[1]
12871 content = portage.grabfile(tmpservertimestampfile)
12873 if rsync_initial_timeout:
12876 os.unlink(tmpservertimestampfile)
12879 except portage.exception.PortageException, e:
12883 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12884 os.kill(mypids[0], signal.SIGTERM)
12885 os.waitpid(mypids[0], 0)
12886 # This is the same code rsync uses for timeout.
12889 if exitcode != os.EX_OK:
12890 if exitcode & 0xff:
12891 exitcode = (exitcode & 0xff) << 8
12893 exitcode = exitcode >> 8
12895 portage.process.spawned_pids.remove(mypids[0])
12898 servertimestamp = time.mktime(time.strptime(
12899 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12900 except (OverflowError, ValueError):
12902 del mycommand, mypids, content
12903 if exitcode == os.EX_OK:
12904 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12905 emergelog(xterm_titles,
12906 ">>> Cancelling sync -- Already current.")
12909 print ">>> Timestamps on the server and in the local repository are the same."
12910 print ">>> Cancelling all further sync action. You are already up to date."
12912 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12916 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12917 emergelog(xterm_titles,
12918 ">>> Server out of date: %s" % dosyncuri)
12921 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12923 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12926 exitcode = SERVER_OUT_OF_DATE
12927 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12929 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12930 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12931 if exitcode in [0,1,3,4,11,14,20,21]:
12933 elif exitcode in [1,3,4,11,14,20,21]:
12936 # Code 2 indicates protocol incompatibility, which is expected
12937 # for servers with protocol < 29 that don't support
12938 # --prune-empty-directories. Retry for a server that supports
12939 # at least rsync protocol version 29 (>=rsync-2.6.4).
12944 if retries<=maxretries:
12945 print ">>> Retrying..."
12950 updatecache_flg=False
12951 exitcode = EXCEEDED_MAX_RETRIES
12955 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12956 elif exitcode == SERVER_OUT_OF_DATE:
12958 elif exitcode == EXCEEDED_MAX_RETRIES:
12960 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12965 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12966 msg.append("that your SYNC statement is proper.")
12967 msg.append("SYNC=" + settings["SYNC"])
12969 msg.append("Rsync has reported that there is a File IO error. Normally")
12970 msg.append("this means your disk is full, but can be caused by corruption")
12971 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12972 msg.append("and try again after the problem has been fixed.")
12973 msg.append("PORTDIR=" + settings["PORTDIR"])
12975 msg.append("Rsync was killed before it finished.")
12977 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12978 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12979 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12980 msg.append("temporary problem unless complications exist with your network")
12981 msg.append("(and possibly your system's filesystem) configuration.")
12985 elif syncuri[:6]=="cvs://":
12986 if not os.path.exists("/usr/bin/cvs"):
12987 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12988 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12990 cvsroot=syncuri[6:]
12991 cvsdir=os.path.dirname(myportdir)
12992 if not os.path.exists(myportdir+"/CVS"):
12994 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12995 if os.path.exists(cvsdir+"/gentoo-x86"):
12996 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12999 os.rmdir(myportdir)
13001 if e.errno != errno.ENOENT:
13003 "!!! existing '%s' directory; exiting.\n" % myportdir)
13006 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13007 print "!!! cvs checkout error; exiting."
13009 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13012 print ">>> Starting cvs update with "+syncuri+"..."
13013 retval = portage.process.spawn_bash(
13014 "cd %s; cvs -z0 -q update -dP" % \
13015 (portage._shell_quote(myportdir),), **spawn_kwargs)
13016 if retval != os.EX_OK:
13018 dosyncuri = syncuri
13020 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13021 noiselevel=-1, level=logging.ERROR)
13024 if updatecache_flg and \
13025 myaction != "metadata" and \
13026 "metadata-transfer" not in settings.features:
13027 updatecache_flg = False
13029 # Reload the whole config from scratch.
13030 settings, trees, mtimedb = load_emerge_config(trees=trees)
13031 root_config = trees[settings["ROOT"]]["root_config"]
13032 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13034 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13035 action_metadata(settings, portdb, myopts)
13037 if portage._global_updates(trees, mtimedb["updates"]):
13039 # Reload the whole config from scratch.
13040 settings, trees, mtimedb = load_emerge_config(trees=trees)
13041 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13042 root_config = trees[settings["ROOT"]]["root_config"]
13044 mybestpv = portdb.xmatch("bestmatch-visible",
13045 portage.const.PORTAGE_PACKAGE_ATOM)
13046 mypvs = portage.best(
13047 trees[settings["ROOT"]]["vartree"].dbapi.match(
13048 portage.const.PORTAGE_PACKAGE_ATOM))
13050 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13052 if myaction != "metadata":
13053 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13054 retval = portage.process.spawn(
13055 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13056 dosyncuri], env=settings.environ())
13057 if retval != os.EX_OK:
13058 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13060 if(mybestpv != mypvs) and not "--quiet" in myopts:
13062 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13063 print red(" * ")+"that you update portage now, before any other packages are updated."
13065 print red(" * ")+"To update portage, run 'emerge portage' now."
13068 display_news_notification(root_config, myopts)
13071 def git_sync_timestamps(settings, portdir):
13073 Since git doesn't preserve timestamps, synchronize timestamps between
13074 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13075 for a given file as long as the file in the working tree is not modified
13076 (relative to HEAD).
13078 cache_dir = os.path.join(portdir, "metadata", "cache")
13079 if not os.path.isdir(cache_dir):
13081 writemsg_level(">>> Synchronizing timestamps...\n")
13083 from portage.cache.cache_errors import CacheError
13085 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13086 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13087 except CacheError, e:
13088 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13089 level=logging.ERROR, noiselevel=-1)
13092 ec_dir = os.path.join(portdir, "eclass")
13094 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13095 if f.endswith(".eclass"))
13097 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13098 level=logging.ERROR, noiselevel=-1)
13101 args = [portage.const.BASH_BINARY, "-c",
13102 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13103 portage._shell_quote(portdir)]
13105 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13106 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13108 if rval != os.EX_OK:
13111 modified_eclasses = set(ec for ec in ec_names \
13112 if os.path.join("eclass", ec + ".eclass") in modified_files)
13114 updated_ec_mtimes = {}
13116 for cpv in cache_db:
13117 cpv_split = portage.catpkgsplit(cpv)
13118 if cpv_split is None:
13119 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13120 level=logging.ERROR, noiselevel=-1)
13123 cat, pn, ver, rev = cpv_split
13124 cat, pf = portage.catsplit(cpv)
13125 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13126 if relative_eb_path in modified_files:
13130 cache_entry = cache_db[cpv]
13131 eb_mtime = cache_entry.get("_mtime_")
13132 ec_mtimes = cache_entry.get("_eclasses_")
13134 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13135 level=logging.ERROR, noiselevel=-1)
13137 except CacheError, e:
13138 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13139 (cpv, e), level=logging.ERROR, noiselevel=-1)
13142 if eb_mtime is None:
13143 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13144 level=logging.ERROR, noiselevel=-1)
13148 eb_mtime = long(eb_mtime)
13150 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13151 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13154 if ec_mtimes is None:
13155 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13156 level=logging.ERROR, noiselevel=-1)
13159 if modified_eclasses.intersection(ec_mtimes):
13162 missing_eclasses = set(ec_mtimes).difference(ec_names)
13163 if missing_eclasses:
13164 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13165 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13169 eb_path = os.path.join(portdir, relative_eb_path)
13171 current_eb_mtime = os.stat(eb_path)
13173 writemsg_level("!!! Missing ebuild: %s\n" % \
13174 (cpv,), level=logging.ERROR, noiselevel=-1)
13177 inconsistent = False
13178 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13179 updated_mtime = updated_ec_mtimes.get(ec)
13180 if updated_mtime is not None and updated_mtime != ec_mtime:
13181 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13182 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13183 inconsistent = True
13189 if current_eb_mtime != eb_mtime:
13190 os.utime(eb_path, (eb_mtime, eb_mtime))
13192 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13193 if ec in updated_ec_mtimes:
13195 ec_path = os.path.join(ec_dir, ec + ".eclass")
13196 current_mtime = long(os.stat(ec_path).st_mtime)
13197 if current_mtime != ec_mtime:
13198 os.utime(ec_path, (ec_mtime, ec_mtime))
13199 updated_ec_mtimes[ec] = ec_mtime
13203 def action_metadata(settings, portdb, myopts):
13204 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13205 old_umask = os.umask(0002)
13206 cachedir = os.path.normpath(settings.depcachedir)
13207 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13208 "/lib", "/opt", "/proc", "/root", "/sbin",
13209 "/sys", "/tmp", "/usr", "/var"]:
13210 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13211 "ROOT DIRECTORY ON YOUR SYSTEM."
13212 print >> sys.stderr, \
13213 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13215 if not os.path.exists(cachedir):
13218 ec = portage.eclass_cache.cache(portdb.porttree_root)
13219 myportdir = os.path.realpath(settings["PORTDIR"])
13220 cm = settings.load_best_module("portdbapi.metadbmodule")(
13221 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13223 from portage.cache import util
13225 class percentage_noise_maker(util.quiet_mirroring):
13226 def __init__(self, dbapi):
13228 self.cp_all = dbapi.cp_all()
13229 l = len(self.cp_all)
13230 self.call_update_min = 100000000
13231 self.min_cp_all = l/100.0
13235 def __iter__(self):
13236 for x in self.cp_all:
13238 if self.count > self.min_cp_all:
13239 self.call_update_min = 0
13241 for y in self.dbapi.cp_list(x):
13243 self.call_update_mine = 0
13245 def update(self, *arg):
13247 self.pstr = int(self.pstr) + 1
13250 sys.stdout.write("%s%i%%" % \
13251 ("\b" * (len(str(self.pstr))+1), self.pstr))
13253 self.call_update_min = 10000000
13255 def finish(self, *arg):
13256 sys.stdout.write("\b\b\b\b100%\n")
13259 if "--quiet" in myopts:
13260 def quicky_cpv_generator(cp_all_list):
13261 for x in cp_all_list:
13262 for y in portdb.cp_list(x):
13264 source = quicky_cpv_generator(portdb.cp_all())
13265 noise_maker = portage.cache.util.quiet_mirroring()
13267 noise_maker = source = percentage_noise_maker(portdb)
13268 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13269 eclass_cache=ec, verbose_instance=noise_maker)
13272 os.umask(old_umask)
13274 def action_regen(settings, portdb, max_jobs, max_load):
13275 xterm_titles = "notitles" not in settings.features
13276 emergelog(xterm_titles, " === regen")
13277 #regenerate cache entries
13278 portage.writemsg_stdout("Regenerating cache entries...\n")
13280 os.close(sys.stdin.fileno())
13281 except SystemExit, e:
13282 raise # Needed else can't exit
13287 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13290 portage.writemsg_stdout("done!\n")
13291 return regen.returncode
13293 def action_config(settings, trees, myopts, myfiles):
13294 if len(myfiles) != 1:
13295 print red("!!! config can only take a single package atom at this time\n")
13297 if not is_valid_package_atom(myfiles[0]):
13298 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13300 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13301 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13305 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13306 except portage.exception.AmbiguousPackageName, e:
13307 # Multiple matches thrown from cpv_expand
13310 print "No packages found.\n"
13312 elif len(pkgs) > 1:
13313 if "--ask" in myopts:
13315 print "Please select a package to configure:"
13319 options.append(str(idx))
13320 print options[-1]+") "+pkg
13322 options.append("X")
13323 idx = userquery("Selection?", options)
13326 pkg = pkgs[int(idx)-1]
13328 print "The following packages available:"
13331 print "\nPlease use a specific atom or the --ask option."
13337 if "--ask" in myopts:
13338 if userquery("Ready to configure "+pkg+"?") == "No":
13341 print "Configuring pkg..."
13343 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13344 mysettings = portage.config(clone=settings)
13345 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13346 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13347 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13349 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13350 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13351 if retval == os.EX_OK:
13352 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13353 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13356 def action_info(settings, trees, myopts, myfiles):
13357 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13358 settings.profile_path, settings["CHOST"],
13359 trees[settings["ROOT"]]["vartree"].dbapi)
13361 header_title = "System Settings"
13363 print header_width * "="
13364 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13365 print header_width * "="
13366 print "System uname: "+platform.platform(aliased=1)
13368 lastSync = portage.grabfile(os.path.join(
13369 settings["PORTDIR"], "metadata", "timestamp.chk"))
13370 print "Timestamp of tree:",
13376 output=commands.getstatusoutput("distcc --version")
13378 print str(output[1].split("\n",1)[0]),
13379 if "distcc" in settings.features:
13384 output=commands.getstatusoutput("ccache -V")
13386 print str(output[1].split("\n",1)[0]),
13387 if "ccache" in settings.features:
13392 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13393 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13394 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13395 myvars = portage.util.unique_array(myvars)
13399 if portage.isvalidatom(x):
13400 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13401 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13402 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13404 for pn, ver, rev in pkg_matches:
13406 pkgs.append(ver + "-" + rev)
13410 pkgs = ", ".join(pkgs)
13411 print "%-20s %s" % (x+":", pkgs)
13413 print "%-20s %s" % (x+":", "[NOT VALID]")
13415 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13417 if "--verbose" in myopts:
13418 myvars=settings.keys()
13420 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13421 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13422 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13423 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13425 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13427 myvars = portage.util.unique_array(myvars)
13433 print '%s="%s"' % (x, settings[x])
13435 use = set(settings["USE"].split())
13436 use_expand = settings["USE_EXPAND"].split()
13438 for varname in use_expand:
13439 flag_prefix = varname.lower() + "_"
13440 for f in list(use):
13441 if f.startswith(flag_prefix):
13445 print 'USE="%s"' % " ".join(use),
13446 for varname in use_expand:
13447 myval = settings.get(varname)
13449 print '%s="%s"' % (varname, myval),
13452 unset_vars.append(x)
13454 print "Unset: "+", ".join(unset_vars)
13457 if "--debug" in myopts:
13458 for x in dir(portage):
13459 module = getattr(portage, x)
13460 if "cvs_id_string" in dir(module):
13461 print "%s: %s" % (str(x), str(module.cvs_id_string))
13463 # See if we can find any packages installed matching the strings
13464 # passed on the command line
13466 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13467 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13469 mypkgs.extend(vardb.match(x))
13471 # If some packages were found...
13473 # Get our global settings (we only print stuff if it varies from
13474 # the current config)
13475 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13476 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13478 pkgsettings = portage.config(clone=settings)
13480 for myvar in mydesiredvars:
13481 global_vals[myvar] = set(settings.get(myvar, "").split())
13483 # Loop through each package
13484 # Only print settings if they differ from global settings
13485 header_title = "Package Settings"
13486 print header_width * "="
13487 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13488 print header_width * "="
13489 from portage.output import EOutput
13492 # Get all package specific variables
13493 auxvalues = vardb.aux_get(pkg, auxkeys)
13495 for i in xrange(len(auxkeys)):
13496 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13498 for myvar in mydesiredvars:
13499 # If the package variable doesn't match the
13500 # current global variable, something has changed
13501 # so set diff_found so we know to print
13502 if valuesmap[myvar] != global_vals[myvar]:
13503 diff_values[myvar] = valuesmap[myvar]
13504 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13505 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13506 pkgsettings.reset()
13507 # If a matching ebuild is no longer available in the tree, maybe it
13508 # would make sense to compare against the flags for the best
13509 # available version with the same slot?
13511 if portdb.cpv_exists(pkg):
13513 pkgsettings.setcpv(pkg, mydb=mydb)
13514 if valuesmap["IUSE"].intersection(
13515 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13516 diff_values["USE"] = valuesmap["USE"]
13517 # If a difference was found, print the info for
13520 # Print package info
13521 print "%s was built with the following:" % pkg
13522 for myvar in mydesiredvars + ["USE"]:
13523 if myvar in diff_values:
13524 mylist = list(diff_values[myvar])
13526 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13528 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13529 ebuildpath = vardb.findname(pkg)
13530 if not ebuildpath or not os.path.exists(ebuildpath):
13531 out.ewarn("No ebuild found for '%s'" % pkg)
13533 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13534 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13535 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13538 def action_search(root_config, myopts, myfiles, spinner):
13540 print "emerge: no search terms provided."
13542 searchinstance = search(root_config,
13543 spinner, "--searchdesc" in myopts,
13544 "--quiet" not in myopts, "--usepkg" in myopts,
13545 "--usepkgonly" in myopts)
13546 for mysearch in myfiles:
13548 searchinstance.execute(mysearch)
13549 except re.error, comment:
13550 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13552 searchinstance.output()
13554 def action_depclean(settings, trees, ldpath_mtimes,
13555 myopts, action, myfiles, spinner):
13556 # Kill packages that aren't explicitly merged or are required as a
13557 # dependency of another package. World file is explicit.
13559 # Global depclean or prune operations are not very safe when there are
13560 # missing dependencies since it's unknown how badly incomplete
13561 # the dependency graph is, and we might accidentally remove packages
13562 # that should have been pulled into the graph. On the other hand, it's
13563 # relatively safe to ignore missing deps when only asked to remove
13564 # specific packages.
13565 allow_missing_deps = len(myfiles) > 0
13568 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13569 msg.append("mistakes. Packages that are part of the world set will always\n")
13570 msg.append("be kept. They can be manually added to this set with\n")
13571 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13572 msg.append("package.provided (see portage(5)) will be removed by\n")
13573 msg.append("depclean, even if they are part of the world set.\n")
13575 msg.append("As a safety measure, depclean will not remove any packages\n")
13576 msg.append("unless *all* required dependencies have been resolved. As a\n")
13577 msg.append("consequence, it is often necessary to run %s\n" % \
13578 good("`emerge --update"))
13579 msg.append(good("--newuse --deep @system @world`") + \
13580 " prior to depclean.\n")
13582 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13583 portage.writemsg_stdout("\n")
13585 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13587 xterm_titles = "notitles" not in settings.features
13588 myroot = settings["ROOT"]
13589 root_config = trees[myroot]["root_config"]
13590 getSetAtoms = root_config.setconfig.getSetAtoms
13591 vardb = trees[myroot]["vartree"].dbapi
13593 required_set_names = ("system", "world")
13597 for s in required_set_names:
13598 required_sets[s] = InternalPackageSet(
13599 initial_atoms=getSetAtoms(s))
13602 # When removing packages, use a temporary version of world
13603 # which excludes packages that are intended to be eligible for
13605 world_temp_set = required_sets["world"]
13606 system_set = required_sets["system"]
13608 if not system_set or not world_temp_set:
13611 writemsg_level("!!! You have no system list.\n",
13612 level=logging.ERROR, noiselevel=-1)
13614 if not world_temp_set:
13615 writemsg_level("!!! You have no world file.\n",
13616 level=logging.WARNING, noiselevel=-1)
13618 writemsg_level("!!! Proceeding is likely to " + \
13619 "break your installation.\n",
13620 level=logging.WARNING, noiselevel=-1)
13621 if "--pretend" not in myopts:
13622 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13624 if action == "depclean":
13625 emergelog(xterm_titles, " >>> depclean")
13628 args_set = InternalPackageSet()
13631 if not is_valid_package_atom(x):
13632 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13633 level=logging.ERROR, noiselevel=-1)
13634 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13637 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13638 except portage.exception.AmbiguousPackageName, e:
13639 msg = "The short ebuild name \"" + x + \
13640 "\" is ambiguous. Please specify " + \
13641 "one of the following " + \
13642 "fully-qualified ebuild names instead:"
13643 for line in textwrap.wrap(msg, 70):
13644 writemsg_level("!!! %s\n" % (line,),
13645 level=logging.ERROR, noiselevel=-1)
13647 writemsg_level(" %s\n" % colorize("INFORM", i),
13648 level=logging.ERROR, noiselevel=-1)
13649 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13652 matched_packages = False
13655 matched_packages = True
13657 if not matched_packages:
13658 writemsg_level(">>> No packages selected for removal by %s\n" % \
13662 writemsg_level("\nCalculating dependencies ")
13663 resolver_params = create_depgraph_params(myopts, "remove")
13664 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13665 vardb = resolver.trees[myroot]["vartree"].dbapi
13667 if action == "depclean":
13670 # Pull in everything that's installed but not matched
13671 # by an argument atom since we don't want to clean any
13672 # package if something depends on it.
13674 world_temp_set.clear()
13679 if args_set.findAtomForPackage(pkg) is None:
13680 world_temp_set.add("=" + pkg.cpv)
13682 except portage.exception.InvalidDependString, e:
13683 show_invalid_depstring_notice(pkg,
13684 pkg.metadata["PROVIDE"], str(e))
13686 world_temp_set.add("=" + pkg.cpv)
13689 elif action == "prune":
13691 # Pull in everything that's installed since we don't
13692 # to prune a package if something depends on it.
13693 world_temp_set.clear()
13694 world_temp_set.update(vardb.cp_all())
13698 # Try to prune everything that's slotted.
13699 for cp in vardb.cp_all():
13700 if len(vardb.cp_list(cp)) > 1:
13703 # Remove atoms from world that match installed packages
13704 # that are also matched by argument atoms, but do not remove
13705 # them if they match the highest installed version.
13708 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13709 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13710 raise AssertionError("package expected in matches: " + \
13711 "cp = %s, cpv = %s matches = %s" % \
13712 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13714 highest_version = pkgs_for_cp[-1]
13715 if pkg == highest_version:
13716 # pkg is the highest version
13717 world_temp_set.add("=" + pkg.cpv)
13720 if len(pkgs_for_cp) <= 1:
13721 raise AssertionError("more packages expected: " + \
13722 "cp = %s, cpv = %s matches = %s" % \
13723 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13726 if args_set.findAtomForPackage(pkg) is None:
13727 world_temp_set.add("=" + pkg.cpv)
13729 except portage.exception.InvalidDependString, e:
13730 show_invalid_depstring_notice(pkg,
13731 pkg.metadata["PROVIDE"], str(e))
13733 world_temp_set.add("=" + pkg.cpv)
13737 for s, package_set in required_sets.iteritems():
13738 set_atom = SETPREFIX + s
13739 set_arg = SetArg(arg=set_atom, set=package_set,
13740 root_config=resolver.roots[myroot])
13741 set_args[s] = set_arg
13742 for atom in set_arg.set:
13743 resolver._dep_stack.append(
13744 Dependency(atom=atom, root=myroot, parent=set_arg))
13745 resolver.digraph.add(set_arg, None)
13747 success = resolver._complete_graph()
13748 writemsg_level("\b\b... done!\n")
13750 resolver.display_problems()
13755 def unresolved_deps():
13757 unresolvable = set()
13758 for dep in resolver._initially_unsatisfied_deps:
13759 if isinstance(dep.parent, Package) and \
13760 (dep.priority > UnmergeDepPriority.SOFT):
13761 unresolvable.add((dep.atom, dep.parent.cpv))
13763 if not unresolvable:
13766 if unresolvable and not allow_missing_deps:
13767 prefix = bad(" * ")
13769 msg.append("Dependencies could not be completely resolved due to")
13770 msg.append("the following required packages not being installed:")
13772 for atom, parent in unresolvable:
13773 msg.append(" %s pulled in by:" % (atom,))
13774 msg.append(" %s" % (parent,))
13776 msg.append("Have you forgotten to run " + \
13777 good("`emerge --update --newuse --deep @system @world`") + " prior")
13778 msg.append(("to %s? It may be necessary to manually " + \
13779 "uninstall packages that no longer") % action)
13780 msg.append("exist in the portage tree since " + \
13781 "it may not be possible to satisfy their")
13782 msg.append("dependencies. Also, be aware of " + \
13783 "the --with-bdeps option that is documented")
13784 msg.append("in " + good("`man emerge`") + ".")
13785 if action == "prune":
13787 msg.append("If you would like to ignore " + \
13788 "dependencies then use %s." % good("--nodeps"))
13789 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13790 level=logging.ERROR, noiselevel=-1)
13794 if unresolved_deps():
13797 graph = resolver.digraph.copy()
13798 required_pkgs_total = 0
13800 if isinstance(node, Package):
13801 required_pkgs_total += 1
13803 def show_parents(child_node):
13804 parent_nodes = graph.parent_nodes(child_node)
13805 if not parent_nodes:
13806 # With --prune, the highest version can be pulled in without any
13807 # real parent since all installed packages are pulled in. In that
13808 # case there's nothing to show here.
13811 for node in parent_nodes:
13812 parent_strs.append(str(getattr(node, "cpv", node)))
13815 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13816 for parent_str in parent_strs:
13817 msg.append(" %s\n" % (parent_str,))
13819 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13821 def cmp_pkg_cpv(pkg1, pkg2):
13822 """Sort Package instances by cpv."""
13823 if pkg1.cpv > pkg2.cpv:
13825 elif pkg1.cpv == pkg2.cpv:
13830 def create_cleanlist():
13831 pkgs_to_remove = []
13833 if action == "depclean":
13836 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13839 arg_atom = args_set.findAtomForPackage(pkg)
13840 except portage.exception.InvalidDependString:
13841 # this error has already been displayed by now
13845 if pkg not in graph:
13846 pkgs_to_remove.append(pkg)
13847 elif "--verbose" in myopts:
13851 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13852 if pkg not in graph:
13853 pkgs_to_remove.append(pkg)
13854 elif "--verbose" in myopts:
13857 elif action == "prune":
13858 # Prune really uses all installed instead of world. It's not
13859 # a real reverse dependency so don't display it as such.
13860 graph.remove(set_args["world"])
13862 for atom in args_set:
13863 for pkg in vardb.match_pkgs(atom):
13864 if pkg not in graph:
13865 pkgs_to_remove.append(pkg)
13866 elif "--verbose" in myopts:
13869 if not pkgs_to_remove:
13871 ">>> No packages selected for removal by %s\n" % action)
13872 if "--verbose" not in myopts:
13874 ">>> To see reverse dependencies, use %s\n" % \
13876 if action == "prune":
13878 ">>> To ignore dependencies, use %s\n" % \
13881 return pkgs_to_remove
13883 cleanlist = create_cleanlist()
13886 clean_set = set(cleanlist)
13888 # Check if any of these package are the sole providers of libraries
13889 # with consumers that have not been selected for removal. If so, these
13890 # packages and any dependencies need to be added to the graph.
13891 real_vardb = trees[myroot]["vartree"].dbapi
13892 linkmap = real_vardb.linkmap
13893 liblist = linkmap.listLibraryObjects()
13894 consumer_cache = {}
13895 provider_cache = {}
13899 writemsg_level(">>> Checking for lib consumers...\n")
13901 for pkg in cleanlist:
13902 pkg_dblink = real_vardb._dblink(pkg.cpv)
13903 provided_libs = set()
13905 for lib in liblist:
13906 if pkg_dblink.isowner(lib, myroot):
13907 provided_libs.add(lib)
13909 if not provided_libs:
13913 for lib in provided_libs:
13914 lib_consumers = consumer_cache.get(lib)
13915 if lib_consumers is None:
13916 lib_consumers = linkmap.findConsumers(lib)
13917 consumer_cache[lib] = lib_consumers
13919 consumers[lib] = lib_consumers
13924 for lib, lib_consumers in consumers.items():
13925 for consumer_file in list(lib_consumers):
13926 if pkg_dblink.isowner(consumer_file, myroot):
13927 lib_consumers.remove(consumer_file)
13928 if not lib_consumers:
13934 for lib, lib_consumers in consumers.iteritems():
13936 soname = soname_cache.get(lib)
13938 soname = linkmap.getSoname(lib)
13939 soname_cache[lib] = soname
13941 consumer_providers = []
13942 for lib_consumer in lib_consumers:
13943 providers = provider_cache.get(lib)
13944 if providers is None:
13945 providers = linkmap.findProviders(lib_consumer)
13946 provider_cache[lib_consumer] = providers
13947 if soname not in providers:
13948 # Why does this happen?
13950 consumer_providers.append(
13951 (lib_consumer, providers[soname]))
13953 consumers[lib] = consumer_providers
13955 consumer_map[pkg] = consumers
13959 search_files = set()
13960 for consumers in consumer_map.itervalues():
13961 for lib, consumer_providers in consumers.iteritems():
13962 for lib_consumer, providers in consumer_providers:
13963 search_files.add(lib_consumer)
13964 search_files.update(providers)
13966 writemsg_level(">>> Assigning files to packages...\n")
13967 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13969 for pkg, consumers in consumer_map.items():
13970 for lib, consumer_providers in consumers.items():
13971 lib_consumers = set()
13973 for lib_consumer, providers in consumer_providers:
13974 owner_set = file_owners.get(lib_consumer)
13975 provider_dblinks = set()
13976 provider_pkgs = set()
13978 if len(providers) > 1:
13979 for provider in providers:
13980 provider_set = file_owners.get(provider)
13981 if provider_set is not None:
13982 provider_dblinks.update(provider_set)
13984 if len(provider_dblinks) > 1:
13985 for provider_dblink in provider_dblinks:
13986 pkg_key = ("installed", myroot,
13987 provider_dblink.mycpv, "nomerge")
13988 if pkg_key not in clean_set:
13989 provider_pkgs.add(vardb.get(pkg_key))
13994 if owner_set is not None:
13995 lib_consumers.update(owner_set)
13997 for consumer_dblink in list(lib_consumers):
13998 if ("installed", myroot, consumer_dblink.mycpv,
13999 "nomerge") in clean_set:
14000 lib_consumers.remove(consumer_dblink)
14004 consumers[lib] = lib_consumers
14008 del consumer_map[pkg]
14011 # TODO: Implement a package set for rebuilding consumer packages.
14013 msg = "In order to avoid breakage of link level " + \
14014 "dependencies, one or more packages will not be removed. " + \
14015 "This can be solved by rebuilding " + \
14016 "the packages that pulled them in."
14018 prefix = bad(" * ")
14019 from textwrap import wrap
14020 writemsg_level("".join(prefix + "%s\n" % line for \
14021 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14024 for pkg, consumers in consumer_map.iteritems():
14025 unique_consumers = set(chain(*consumers.values()))
14026 unique_consumers = sorted(consumer.mycpv \
14027 for consumer in unique_consumers)
14029 msg.append(" %s pulled in by:" % (pkg.cpv,))
14030 for consumer in unique_consumers:
14031 msg.append(" %s" % (consumer,))
14033 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14034 level=logging.WARNING, noiselevel=-1)
14036 # Add lib providers to the graph as children of lib consumers,
14037 # and also add any dependencies pulled in by the provider.
14038 writemsg_level(">>> Adding lib providers to graph...\n")
14040 for pkg, consumers in consumer_map.iteritems():
14041 for consumer_dblink in set(chain(*consumers.values())):
14042 consumer_pkg = vardb.get(("installed", myroot,
14043 consumer_dblink.mycpv, "nomerge"))
14044 if not resolver._add_pkg(pkg,
14045 Dependency(parent=consumer_pkg,
14046 priority=UnmergeDepPriority(runtime=True),
14048 resolver.display_problems()
14051 writemsg_level("\nCalculating dependencies ")
14052 success = resolver._complete_graph()
14053 writemsg_level("\b\b... done!\n")
14054 resolver.display_problems()
14057 if unresolved_deps():
14060 graph = resolver.digraph.copy()
14061 required_pkgs_total = 0
14063 if isinstance(node, Package):
14064 required_pkgs_total += 1
14065 cleanlist = create_cleanlist()
14068 clean_set = set(cleanlist)
14070 # Use a topological sort to create an unmerge order such that
14071 # each package is unmerged before it's dependencies. This is
14072 # necessary to avoid breaking things that may need to run
14073 # during pkg_prerm or pkg_postrm phases.
14075 # Create a new graph to account for dependencies between the
14076 # packages being unmerged.
14080 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14081 runtime = UnmergeDepPriority(runtime=True)
14082 runtime_post = UnmergeDepPriority(runtime_post=True)
14083 buildtime = UnmergeDepPriority(buildtime=True)
14085 "RDEPEND": runtime,
14086 "PDEPEND": runtime_post,
14087 "DEPEND": buildtime,
14090 for node in clean_set:
14091 graph.add(node, None)
14093 node_use = node.metadata["USE"].split()
14094 for dep_type in dep_keys:
14095 depstr = node.metadata[dep_type]
14099 portage.dep._dep_check_strict = False
14100 success, atoms = portage.dep_check(depstr, None, settings,
14101 myuse=node_use, trees=resolver._graph_trees,
14104 portage.dep._dep_check_strict = True
14106 # Ignore invalid deps of packages that will
14107 # be uninstalled anyway.
14110 priority = priority_map[dep_type]
14112 if not isinstance(atom, portage.dep.Atom):
14113 # Ignore invalid atoms returned from dep_check().
14117 matches = vardb.match_pkgs(atom)
14120 for child_node in matches:
14121 if child_node in clean_set:
14122 graph.add(child_node, node, priority=priority)
14125 if len(graph.order) == len(graph.root_nodes()):
14126 # If there are no dependencies between packages
14127 # let unmerge() group them by cat/pn.
14129 cleanlist = [pkg.cpv for pkg in graph.order]
14131 # Order nodes from lowest to highest overall reference count for
14132 # optimal root node selection.
14133 node_refcounts = {}
14134 for node in graph.order:
14135 node_refcounts[node] = len(graph.parent_nodes(node))
14136 def cmp_reference_count(node1, node2):
14137 return node_refcounts[node1] - node_refcounts[node2]
14138 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14140 ignore_priority_range = [None]
14141 ignore_priority_range.extend(
14142 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14143 while not graph.empty():
14144 for ignore_priority in ignore_priority_range:
14145 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14149 raise AssertionError("no root nodes")
14150 if ignore_priority is not None:
14151 # Some deps have been dropped due to circular dependencies,
14152 # so only pop one node in order do minimize the number that
14157 cleanlist.append(node.cpv)
14159 unmerge(root_config, myopts, "unmerge", cleanlist,
14160 ldpath_mtimes, ordered=ordered)
14162 if action == "prune":
14165 if not cleanlist and "--quiet" in myopts:
14168 print "Packages installed: "+str(len(vardb.cpv_all()))
14169 print "Packages in world: " + \
14170 str(len(root_config.sets["world"].getAtoms()))
14171 print "Packages in system: " + \
14172 str(len(root_config.sets["system"].getAtoms()))
14173 print "Required packages: "+str(required_pkgs_total)
14174 if "--pretend" in myopts:
14175 print "Number to remove: "+str(len(cleanlist))
14177 print "Number removed: "+str(len(cleanlist))
14179 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14181 Construct a depgraph for the given resume list. This will raise
14182 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14184 @returns: (success, depgraph, dropped_tasks)
14187 skip_unsatisfied = True
14188 mergelist = mtimedb["resume"]["mergelist"]
14189 dropped_tasks = set()
14191 mydepgraph = depgraph(settings, trees,
14192 myopts, myparams, spinner)
14194 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14195 skip_masked=skip_masked)
14196 except depgraph.UnsatisfiedResumeDep, e:
14197 if not skip_unsatisfied:
14200 graph = mydepgraph.digraph
14201 unsatisfied_parents = dict((dep.parent, dep.parent) \
14202 for dep in e.value)
14203 traversed_nodes = set()
14204 unsatisfied_stack = list(unsatisfied_parents)
14205 while unsatisfied_stack:
14206 pkg = unsatisfied_stack.pop()
14207 if pkg in traversed_nodes:
14209 traversed_nodes.add(pkg)
14211 # If this package was pulled in by a parent
14212 # package scheduled for merge, removing this
14213 # package may cause the the parent package's
14214 # dependency to become unsatisfied.
14215 for parent_node in graph.parent_nodes(pkg):
14216 if not isinstance(parent_node, Package) \
14217 or parent_node.operation not in ("merge", "nomerge"):
14220 graph.child_nodes(parent_node,
14221 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14222 if pkg in unsatisfied:
14223 unsatisfied_parents[parent_node] = parent_node
14224 unsatisfied_stack.append(parent_node)
14226 pruned_mergelist = []
14227 for x in mergelist:
14228 if isinstance(x, list) and \
14229 tuple(x) not in unsatisfied_parents:
14230 pruned_mergelist.append(x)
14232 # If the mergelist doesn't shrink then this loop is infinite.
14233 if len(pruned_mergelist) == len(mergelist):
14234 # This happens if a package can't be dropped because
14235 # it's already installed, but it has unsatisfied PDEPEND.
14237 mergelist[:] = pruned_mergelist
14239 # Exclude installed packages that have been removed from the graph due
14240 # to failure to build/install runtime dependencies after the dependent
14241 # package has already been installed.
14242 dropped_tasks.update(pkg for pkg in \
14243 unsatisfied_parents if pkg.operation != "nomerge")
14244 mydepgraph.break_refs(unsatisfied_parents)
14246 del e, graph, traversed_nodes, \
14247 unsatisfied_parents, unsatisfied_stack
14251 return (success, mydepgraph, dropped_tasks)
14253 def action_build(settings, trees, mtimedb,
14254 myopts, myaction, myfiles, spinner):
14256 # validate the state of the resume data
14257 # so that we can make assumptions later.
14258 for k in ("resume", "resume_backup"):
14259 if k not in mtimedb:
14261 resume_data = mtimedb[k]
14262 if not isinstance(resume_data, dict):
14265 mergelist = resume_data.get("mergelist")
14266 if not isinstance(mergelist, list):
14269 for x in mergelist:
14270 if not (isinstance(x, list) and len(x) == 4):
14272 pkg_type, pkg_root, pkg_key, pkg_action = x
14273 if pkg_root not in trees:
14274 # Current $ROOT setting differs,
14275 # so the list must be stale.
14281 resume_opts = resume_data.get("myopts")
14282 if not isinstance(resume_opts, (dict, list)):
14285 favorites = resume_data.get("favorites")
14286 if not isinstance(favorites, list):
14291 if "--resume" in myopts and \
14292 ("resume" in mtimedb or
14293 "resume_backup" in mtimedb):
14295 if "resume" not in mtimedb:
14296 mtimedb["resume"] = mtimedb["resume_backup"]
14297 del mtimedb["resume_backup"]
14299 # "myopts" is a list for backward compatibility.
14300 resume_opts = mtimedb["resume"].get("myopts", [])
14301 if isinstance(resume_opts, list):
14302 resume_opts = dict((k,True) for k in resume_opts)
14303 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14304 resume_opts.pop(opt, None)
14305 myopts.update(resume_opts)
14307 if "--debug" in myopts:
14308 writemsg_level("myopts %s\n" % (myopts,))
14310 # Adjust config according to options of the command being resumed.
14311 for myroot in trees:
14312 mysettings = trees[myroot]["vartree"].settings
14313 mysettings.unlock()
14314 adjust_config(myopts, mysettings)
14316 del myroot, mysettings
14318 ldpath_mtimes = mtimedb["ldpath"]
14321 buildpkgonly = "--buildpkgonly" in myopts
14322 pretend = "--pretend" in myopts
14323 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14324 ask = "--ask" in myopts
14325 nodeps = "--nodeps" in myopts
14326 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14327 tree = "--tree" in myopts
14328 if nodeps and tree:
14330 del myopts["--tree"]
14331 portage.writemsg(colorize("WARN", " * ") + \
14332 "--tree is broken with --nodeps. Disabling...\n")
14333 debug = "--debug" in myopts
14334 verbose = "--verbose" in myopts
14335 quiet = "--quiet" in myopts
14336 if pretend or fetchonly:
14337 # make the mtimedb readonly
14338 mtimedb.filename = None
14339 if '--digest' in myopts or 'digest' in settings.features:
14340 if '--digest' in myopts:
14341 msg = "The --digest option"
14343 msg = "The FEATURES=digest setting"
14345 msg += " can prevent corruption from being" + \
14346 " noticed. The `repoman manifest` command is the preferred" + \
14347 " way to generate manifests and it is capable of doing an" + \
14348 " entire repository or category at once."
14349 prefix = bad(" * ")
14350 writemsg(prefix + "\n")
14351 from textwrap import wrap
14352 for line in wrap(msg, 72):
14353 writemsg("%s%s\n" % (prefix, line))
14354 writemsg(prefix + "\n")
14356 if "--quiet" not in myopts and \
14357 ("--pretend" in myopts or "--ask" in myopts or \
14358 "--tree" in myopts or "--verbose" in myopts):
14360 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14362 elif "--buildpkgonly" in myopts:
14366 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14368 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14372 print darkgreen("These are the packages that would be %s, in order:") % action
14375 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14376 if not show_spinner:
14377 spinner.update = spinner.update_quiet
14380 favorites = mtimedb["resume"].get("favorites")
14381 if not isinstance(favorites, list):
14385 print "Calculating dependencies ",
14386 myparams = create_depgraph_params(myopts, myaction)
14388 resume_data = mtimedb["resume"]
14389 mergelist = resume_data["mergelist"]
14390 if mergelist and "--skipfirst" in myopts:
14391 for i, task in enumerate(mergelist):
14392 if isinstance(task, list) and \
14393 task and task[-1] == "merge":
14400 success, mydepgraph, dropped_tasks = resume_depgraph(
14401 settings, trees, mtimedb, myopts, myparams, spinner)
14402 except (portage.exception.PackageNotFound,
14403 depgraph.UnsatisfiedResumeDep), e:
14404 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14405 mydepgraph = e.depgraph
14408 from textwrap import wrap
14409 from portage.output import EOutput
14412 resume_data = mtimedb["resume"]
14413 mergelist = resume_data.get("mergelist")
14414 if not isinstance(mergelist, list):
14416 if mergelist and debug or (verbose and not quiet):
14417 out.eerror("Invalid resume list:")
14420 for task in mergelist:
14421 if isinstance(task, list):
14422 out.eerror(indent + str(tuple(task)))
14425 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14426 out.eerror("One or more packages are either masked or " + \
14427 "have missing dependencies:")
14430 for dep in e.value:
14431 if dep.atom is None:
14432 out.eerror(indent + "Masked package:")
14433 out.eerror(2 * indent + str(dep.parent))
14436 out.eerror(indent + str(dep.atom) + " pulled in by:")
14437 out.eerror(2 * indent + str(dep.parent))
14439 msg = "The resume list contains packages " + \
14440 "that are either masked or have " + \
14441 "unsatisfied dependencies. " + \
14442 "Please restart/continue " + \
14443 "the operation manually, or use --skipfirst " + \
14444 "to skip the first package in the list and " + \
14445 "any other packages that may be " + \
14446 "masked or have missing dependencies."
14447 for line in wrap(msg, 72):
14449 elif isinstance(e, portage.exception.PackageNotFound):
14450 out.eerror("An expected package is " + \
14451 "not available: %s" % str(e))
14453 msg = "The resume list contains one or more " + \
14454 "packages that are no longer " + \
14455 "available. Please restart/continue " + \
14456 "the operation manually."
14457 for line in wrap(msg, 72):
14461 print "\b\b... done!"
14465 portage.writemsg("!!! One or more packages have been " + \
14466 "dropped due to\n" + \
14467 "!!! masking or unsatisfied dependencies:\n\n",
14469 for task in dropped_tasks:
14470 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14471 portage.writemsg("\n", noiselevel=-1)
14474 if mydepgraph is not None:
14475 mydepgraph.display_problems()
14476 if not (ask or pretend):
14477 # delete the current list and also the backup
14478 # since it's probably stale too.
14479 for k in ("resume", "resume_backup"):
14480 mtimedb.pop(k, None)
14485 if ("--resume" in myopts):
14486 print darkgreen("emerge: It seems we have nothing to resume...")
14489 myparams = create_depgraph_params(myopts, myaction)
14490 if "--quiet" not in myopts and "--nodeps" not in myopts:
14491 print "Calculating dependencies ",
14493 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14495 retval, favorites = mydepgraph.select_files(myfiles)
14496 except portage.exception.PackageNotFound, e:
14497 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14499 except portage.exception.PackageSetNotFound, e:
14500 root_config = trees[settings["ROOT"]]["root_config"]
14501 display_missing_pkg_set(root_config, e.value)
14504 print "\b\b... done!"
14506 mydepgraph.display_problems()
14509 if "--pretend" not in myopts and \
14510 ("--ask" in myopts or "--tree" in myopts or \
14511 "--verbose" in myopts) and \
14512 not ("--quiet" in myopts and "--ask" not in myopts):
14513 if "--resume" in myopts:
14514 mymergelist = mydepgraph.altlist()
14515 if len(mymergelist) == 0:
14516 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14518 favorites = mtimedb["resume"]["favorites"]
14519 retval = mydepgraph.display(
14520 mydepgraph.altlist(reversed=tree),
14521 favorites=favorites)
14522 mydepgraph.display_problems()
14523 if retval != os.EX_OK:
14525 prompt="Would you like to resume merging these packages?"
14527 retval = mydepgraph.display(
14528 mydepgraph.altlist(reversed=("--tree" in myopts)),
14529 favorites=favorites)
14530 mydepgraph.display_problems()
14531 if retval != os.EX_OK:
14534 for x in mydepgraph.altlist():
14535 if isinstance(x, Package) and x.operation == "merge":
14539 sets = trees[settings["ROOT"]]["root_config"].sets
14540 world_candidates = None
14541 if "--noreplace" in myopts and \
14542 not oneshot and favorites:
14543 # Sets that are not world candidates are filtered
14544 # out here since the favorites list needs to be
14545 # complete for depgraph.loadResumeCommand() to
14546 # operate correctly.
14547 world_candidates = [x for x in favorites \
14548 if not (x.startswith(SETPREFIX) and \
14549 not sets[x[1:]].world_candidate)]
14550 if "--noreplace" in myopts and \
14551 not oneshot and world_candidates:
14553 for x in world_candidates:
14554 print " %s %s" % (good("*"), x)
14555 prompt="Would you like to add these packages to your world favorites?"
14556 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14557 prompt="Nothing to merge; would you like to auto-clean packages?"
14560 print "Nothing to merge; quitting."
14563 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14564 prompt="Would you like to fetch the source files for these packages?"
14566 prompt="Would you like to merge these packages?"
14568 if "--ask" in myopts and userquery(prompt) == "No":
14573 # Don't ask again (e.g. when auto-cleaning packages after merge)
14574 myopts.pop("--ask", None)
14576 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14577 if ("--resume" in myopts):
14578 mymergelist = mydepgraph.altlist()
14579 if len(mymergelist) == 0:
14580 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14582 favorites = mtimedb["resume"]["favorites"]
14583 retval = mydepgraph.display(
14584 mydepgraph.altlist(reversed=tree),
14585 favorites=favorites)
14586 mydepgraph.display_problems()
14587 if retval != os.EX_OK:
14590 retval = mydepgraph.display(
14591 mydepgraph.altlist(reversed=("--tree" in myopts)),
14592 favorites=favorites)
14593 mydepgraph.display_problems()
14594 if retval != os.EX_OK:
14596 if "--buildpkgonly" in myopts:
14597 graph_copy = mydepgraph.digraph.clone()
14598 removed_nodes = set()
14599 for node in graph_copy:
14600 if not isinstance(node, Package) or \
14601 node.operation == "nomerge":
14602 removed_nodes.add(node)
14603 graph_copy.difference_update(removed_nodes)
14604 if not graph_copy.hasallzeros(ignore_priority = \
14605 DepPrioritySatisfiedRange.ignore_medium):
14606 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14607 print "!!! You have to merge the dependencies before you can build this package.\n"
14610 if "--buildpkgonly" in myopts:
14611 graph_copy = mydepgraph.digraph.clone()
14612 removed_nodes = set()
14613 for node in graph_copy:
14614 if not isinstance(node, Package) or \
14615 node.operation == "nomerge":
14616 removed_nodes.add(node)
14617 graph_copy.difference_update(removed_nodes)
14618 if not graph_copy.hasallzeros(ignore_priority = \
14619 DepPrioritySatisfiedRange.ignore_medium):
14620 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14621 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14624 if ("--resume" in myopts):
14625 favorites=mtimedb["resume"]["favorites"]
14626 mymergelist = mydepgraph.altlist()
14627 mydepgraph.break_refs(mymergelist)
14628 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14629 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14630 del mydepgraph, mymergelist
14631 clear_caches(trees)
14633 retval = mergetask.merge()
14634 merge_count = mergetask.curval
14636 if "resume" in mtimedb and \
14637 "mergelist" in mtimedb["resume"] and \
14638 len(mtimedb["resume"]["mergelist"]) > 1:
14639 mtimedb["resume_backup"] = mtimedb["resume"]
14640 del mtimedb["resume"]
14642 mtimedb["resume"]={}
14643 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14644 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14645 # a list type for options.
14646 mtimedb["resume"]["myopts"] = myopts.copy()
14648 # Convert Atom instances to plain str.
14649 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14651 pkglist = mydepgraph.altlist()
14652 mydepgraph.saveNomergeFavorites()
14653 mydepgraph.break_refs(pkglist)
14654 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14655 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14656 del mydepgraph, pkglist
14657 clear_caches(trees)
14659 retval = mergetask.merge()
14660 merge_count = mergetask.curval
14662 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14663 if "yes" == settings.get("AUTOCLEAN"):
14664 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14665 unmerge(trees[settings["ROOT"]]["root_config"],
14666 myopts, "clean", [],
14667 ldpath_mtimes, autoclean=1)
14669 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14670 + " AUTOCLEAN is disabled. This can cause serious"
14671 + " problems due to overlapping packages.\n")
14672 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14676 def multiple_actions(action1, action2):
14677 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14678 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14681 def insert_optional_args(args):
14683 Parse optional arguments and insert a value if one has
14684 not been provided. This is done before feeding the args
14685 to the optparse parser since that parser does not support
14686 this feature natively.
14690 jobs_opts = ("-j", "--jobs")
14691 arg_stack = args[:]
14692 arg_stack.reverse()
14694 arg = arg_stack.pop()
14696 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14697 if not (short_job_opt or arg in jobs_opts):
14698 new_args.append(arg)
14701 # Insert an empty placeholder in order to
14702 # satisfy the requirements of optparse.
14704 new_args.append("--jobs")
14707 if short_job_opt and len(arg) > 2:
14708 if arg[:2] == "-j":
14710 job_count = int(arg[2:])
14712 saved_opts = arg[2:]
14715 saved_opts = arg[1:].replace("j", "")
14717 if job_count is None and arg_stack:
14719 job_count = int(arg_stack[-1])
14723 # Discard the job count from the stack
14724 # since we're consuming it here.
14727 if job_count is None:
14728 # unlimited number of jobs
14729 new_args.append("True")
14731 new_args.append(str(job_count))
14733 if saved_opts is not None:
14734 new_args.append("-" + saved_opts)
14738 def parse_opts(tmpcmdline, silent=False):
14743 global actions, options, shortmapping
14745 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14746 argument_options = {
14748 "help":"specify the location for portage configuration files",
14752 "help":"enable or disable color output",
14754 "choices":("y", "n")
14759 "help" : "Specifies the number of packages to build " + \
14765 "--load-average": {
14767 "help" :"Specifies that no new builds should be started " + \
14768 "if there are other builds running and the load average " + \
14769 "is at least LOAD (a floating-point number).",
14775 "help":"include unnecessary build time dependencies",
14777 "choices":("y", "n")
14780 "help":"specify conditions to trigger package reinstallation",
14782 "choices":["changed-use"]
14786 from optparse import OptionParser
14787 parser = OptionParser()
14788 if parser.has_option("--help"):
14789 parser.remove_option("--help")
14791 for action_opt in actions:
14792 parser.add_option("--" + action_opt, action="store_true",
14793 dest=action_opt.replace("-", "_"), default=False)
14794 for myopt in options:
14795 parser.add_option(myopt, action="store_true",
14796 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14797 for shortopt, longopt in shortmapping.iteritems():
14798 parser.add_option("-" + shortopt, action="store_true",
14799 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14800 for myalias, myopt in longopt_aliases.iteritems():
14801 parser.add_option(myalias, action="store_true",
14802 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14804 for myopt, kwargs in argument_options.iteritems():
14805 parser.add_option(myopt,
14806 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14808 tmpcmdline = insert_optional_args(tmpcmdline)
14810 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14814 if myoptions.jobs == "True":
14818 jobs = int(myoptions.jobs)
14822 if jobs is not True and \
14826 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14827 (myoptions.jobs,), noiselevel=-1)
14829 myoptions.jobs = jobs
14831 if myoptions.load_average:
14833 load_average = float(myoptions.load_average)
14837 if load_average <= 0.0:
14838 load_average = None
14840 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14841 (myoptions.load_average,), noiselevel=-1)
14843 myoptions.load_average = load_average
14845 for myopt in options:
14846 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14848 myopts[myopt] = True
14850 for myopt in argument_options:
14851 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14855 if myoptions.searchdesc:
14856 myoptions.search = True
14858 for action_opt in actions:
14859 v = getattr(myoptions, action_opt.replace("-", "_"))
14862 multiple_actions(myaction, action_opt)
14864 myaction = action_opt
14868 return myaction, myopts, myfiles
14870 def validate_ebuild_environment(trees):
14871 for myroot in trees:
14872 settings = trees[myroot]["vartree"].settings
14873 settings.validate()
14875 def clear_caches(trees):
14876 for d in trees.itervalues():
14877 d["porttree"].dbapi.melt()
14878 d["porttree"].dbapi._aux_cache.clear()
14879 d["bintree"].dbapi._aux_cache.clear()
14880 d["bintree"].dbapi._clear_cache()
14881 d["vartree"].dbapi.linkmap._clear_cache()
14882 portage.dircache.clear()
14885 def load_emerge_config(trees=None):
14887 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14888 v = os.environ.get(envvar, None)
14889 if v and v.strip():
14891 trees = portage.create_trees(trees=trees, **kwargs)
14893 for root, root_trees in trees.iteritems():
14894 settings = root_trees["vartree"].settings
14895 setconfig = load_default_config(settings, root_trees)
14896 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14898 settings = trees["/"]["vartree"].settings
14900 for myroot in trees:
14902 settings = trees[myroot]["vartree"].settings
14905 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14906 mtimedb = portage.MtimeDB(mtimedbfile)
14908 return settings, trees, mtimedb
14910 def adjust_config(myopts, settings):
14911 """Make emerge specific adjustments to the config."""
14913 # To enhance usability, make some vars case insensitive by forcing them to
14915 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14916 if myvar in settings:
14917 settings[myvar] = settings[myvar].lower()
14918 settings.backup_changes(myvar)
14921 # Kill noauto as it will break merges otherwise.
14922 if "noauto" in settings.features:
14923 settings.features.remove('noauto')
14924 settings['FEATURES'] = ' '.join(sorted(settings.features))
14925 settings.backup_changes("FEATURES")
14929 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14930 except ValueError, e:
14931 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14932 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14933 settings["CLEAN_DELAY"], noiselevel=-1)
14934 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14935 settings.backup_changes("CLEAN_DELAY")
14937 EMERGE_WARNING_DELAY = 10
14939 EMERGE_WARNING_DELAY = int(settings.get(
14940 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14941 except ValueError, e:
14942 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14943 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14944 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14945 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14946 settings.backup_changes("EMERGE_WARNING_DELAY")
14948 if "--quiet" in myopts:
14949 settings["PORTAGE_QUIET"]="1"
14950 settings.backup_changes("PORTAGE_QUIET")
14952 if "--verbose" in myopts:
14953 settings["PORTAGE_VERBOSE"] = "1"
14954 settings.backup_changes("PORTAGE_VERBOSE")
14956 # Set so that configs will be merged regardless of remembered status
14957 if ("--noconfmem" in myopts):
14958 settings["NOCONFMEM"]="1"
14959 settings.backup_changes("NOCONFMEM")
14961 # Set various debug markers... They should be merged somehow.
14964 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14965 if PORTAGE_DEBUG not in (0, 1):
14966 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14967 PORTAGE_DEBUG, noiselevel=-1)
14968 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14971 except ValueError, e:
14972 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14973 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14974 settings["PORTAGE_DEBUG"], noiselevel=-1)
14976 if "--debug" in myopts:
14978 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14979 settings.backup_changes("PORTAGE_DEBUG")
14981 if settings.get("NOCOLOR") not in ("yes","true"):
14982 portage.output.havecolor = 1
14984 """The explicit --color < y | n > option overrides the NOCOLOR environment
14985 variable and stdout auto-detection."""
14986 if "--color" in myopts:
14987 if "y" == myopts["--color"]:
14988 portage.output.havecolor = 1
14989 settings["NOCOLOR"] = "false"
14991 portage.output.havecolor = 0
14992 settings["NOCOLOR"] = "true"
14993 settings.backup_changes("NOCOLOR")
14994 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14995 portage.output.havecolor = 0
14996 settings["NOCOLOR"] = "true"
14997 settings.backup_changes("NOCOLOR")
14999 def apply_priorities(settings):
15003 def nice(settings):
15005 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15006 except (OSError, ValueError), e:
15007 out = portage.output.EOutput()
15008 out.eerror("Failed to change nice value to '%s'" % \
15009 settings["PORTAGE_NICENESS"])
15010 out.eerror("%s\n" % str(e))
15012 def ionice(settings):
15014 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15016 ionice_cmd = shlex.split(ionice_cmd)
15020 from portage.util import varexpand
15021 variables = {"PID" : str(os.getpid())}
15022 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15025 rval = portage.process.spawn(cmd, env=os.environ)
15026 except portage.exception.CommandNotFound:
15027 # The OS kernel probably doesn't support ionice,
15028 # so return silently.
15031 if rval != os.EX_OK:
15032 out = portage.output.EOutput()
15033 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15034 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15036 def display_missing_pkg_set(root_config, set_name):
15039 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15040 "The following sets exist:") % \
15041 colorize("INFORM", set_name))
15044 for s in sorted(root_config.sets):
15045 msg.append(" %s" % s)
15048 writemsg_level("".join("%s\n" % l for l in msg),
15049 level=logging.ERROR, noiselevel=-1)
15051 def expand_set_arguments(myfiles, myaction, root_config):
15053 setconfig = root_config.setconfig
15055 sets = setconfig.getSets()
15057 # In order to know exactly which atoms/sets should be added to the
15058 # world file, the depgraph performs set expansion later. It will get
15059 # confused about where the atoms came from if it's not allowed to
15060 # expand them itself.
15061 do_not_expand = (None, )
15064 if a in ("system", "world"):
15065 newargs.append(SETPREFIX+a)
15072 # separators for set arguments
15076 # WARNING: all operators must be of equal length
15078 DIFF_OPERATOR = "-@"
15079 UNION_OPERATOR = "+@"
15081 for i in range(0, len(myfiles)):
15082 if myfiles[i].startswith(SETPREFIX):
15085 x = myfiles[i][len(SETPREFIX):]
15088 start = x.find(ARG_START)
15089 end = x.find(ARG_END)
15090 if start > 0 and start < end:
15091 namepart = x[:start]
15092 argpart = x[start+1:end]
15094 # TODO: implement proper quoting
15095 args = argpart.split(",")
15099 k, v = a.split("=", 1)
15102 options[a] = "True"
15103 setconfig.update(namepart, options)
15104 newset += (x[:start-len(namepart)]+namepart)
15105 x = x[end+len(ARG_END):]
15109 myfiles[i] = SETPREFIX+newset
15111 sets = setconfig.getSets()
15113 # display errors that occured while loading the SetConfig instance
15114 for e in setconfig.errors:
15115 print colorize("BAD", "Error during set creation: %s" % e)
15117 # emerge relies on the existance of sets with names "world" and "system"
15118 required_sets = ("world", "system")
15121 for s in required_sets:
15123 missing_sets.append(s)
15125 if len(missing_sets) > 2:
15126 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15127 missing_sets_str += ', and "%s"' % missing_sets[-1]
15128 elif len(missing_sets) == 2:
15129 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15131 missing_sets_str = '"%s"' % missing_sets[-1]
15132 msg = ["emerge: incomplete set configuration, " + \
15133 "missing set(s): %s" % missing_sets_str]
15135 msg.append(" sets defined: %s" % ", ".join(sets))
15136 msg.append(" This usually means that '%s'" % \
15137 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15138 msg.append(" is missing or corrupt.")
15140 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15142 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15145 if a.startswith(SETPREFIX):
15146 # support simple set operations (intersection, difference and union)
15147 # on the commandline. Expressions are evaluated strictly left-to-right
15148 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15149 expression = a[len(SETPREFIX):]
15152 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15153 is_pos = expression.rfind(IS_OPERATOR)
15154 diff_pos = expression.rfind(DIFF_OPERATOR)
15155 union_pos = expression.rfind(UNION_OPERATOR)
15156 op_pos = max(is_pos, diff_pos, union_pos)
15157 s1 = expression[:op_pos]
15158 s2 = expression[op_pos+len(IS_OPERATOR):]
15159 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15161 display_missing_pkg_set(root_config, s2)
15163 expr_sets.insert(0, s2)
15164 expr_ops.insert(0, op)
15166 if not expression in sets:
15167 display_missing_pkg_set(root_config, expression)
15169 expr_sets.insert(0, expression)
15170 result = set(setconfig.getSetAtoms(expression))
15171 for i in range(0, len(expr_ops)):
15172 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15173 if expr_ops[i] == IS_OPERATOR:
15174 result.intersection_update(s2)
15175 elif expr_ops[i] == DIFF_OPERATOR:
15176 result.difference_update(s2)
15177 elif expr_ops[i] == UNION_OPERATOR:
15180 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15181 newargs.extend(result)
15183 s = a[len(SETPREFIX):]
15185 display_missing_pkg_set(root_config, s)
15187 setconfig.active.append(s)
15189 set_atoms = setconfig.getSetAtoms(s)
15190 except portage.exception.PackageSetNotFound, e:
15191 writemsg_level(("emerge: the given set '%s' " + \
15192 "contains a non-existent set named '%s'.\n") % \
15193 (s, e), level=logging.ERROR, noiselevel=-1)
15195 if myaction in unmerge_actions and \
15196 not sets[s].supportsOperation("unmerge"):
15197 sys.stderr.write("emerge: the given set '%s' does " % s + \
15198 "not support unmerge operations\n")
15200 elif not set_atoms:
15201 print "emerge: '%s' is an empty set" % s
15202 elif myaction not in do_not_expand:
15203 newargs.extend(set_atoms)
15205 newargs.append(SETPREFIX+s)
15206 for e in sets[s].errors:
15210 return (newargs, retval)
15212 def repo_name_check(trees):
15213 missing_repo_names = set()
15214 for root, root_trees in trees.iteritems():
15215 if "porttree" in root_trees:
15216 portdb = root_trees["porttree"].dbapi
15217 missing_repo_names.update(portdb.porttrees)
15218 repos = portdb.getRepositories()
15220 missing_repo_names.discard(portdb.getRepositoryPath(r))
15221 if portdb.porttree_root in missing_repo_names and \
15222 not os.path.exists(os.path.join(
15223 portdb.porttree_root, "profiles")):
15224 # This is normal if $PORTDIR happens to be empty,
15225 # so don't warn about it.
15226 missing_repo_names.remove(portdb.porttree_root)
15228 if missing_repo_names:
15230 msg.append("WARNING: One or more repositories " + \
15231 "have missing repo_name entries:")
15233 for p in missing_repo_names:
15234 msg.append("\t%s/profiles/repo_name" % (p,))
15236 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15237 "should be a plain text file containing a unique " + \
15238 "name for the repository on the first line.", 70))
15239 writemsg_level("".join("%s\n" % l for l in msg),
15240 level=logging.WARNING, noiselevel=-1)
15242 return bool(missing_repo_names)
15244 def config_protect_check(trees):
15245 for root, root_trees in trees.iteritems():
15246 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15247 msg = "!!! CONFIG_PROTECT is empty"
15249 msg += " for '%s'" % root
15250 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15252 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15254 if "--quiet" in myopts:
15255 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15256 print "!!! one of the following fully-qualified ebuild names instead:\n"
15257 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15258 print " " + colorize("INFORM", cp)
15261 s = search(root_config, spinner, "--searchdesc" in myopts,
15262 "--quiet" not in myopts, "--usepkg" in myopts,
15263 "--usepkgonly" in myopts)
15264 null_cp = portage.dep_getkey(insert_category_into_atom(
15266 cat, atom_pn = portage.catsplit(null_cp)
15267 s.searchkey = atom_pn
15268 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15271 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15272 print "!!! one of the above fully-qualified ebuild names instead.\n"
15274 def profile_check(trees, myaction, myopts):
15275 if myaction in ("info", "sync"):
15277 elif "--version" in myopts or "--help" in myopts:
15279 for root, root_trees in trees.iteritems():
15280 if root_trees["root_config"].settings.profiles:
15282 # generate some profile related warning messages
15283 validate_ebuild_environment(trees)
15284 msg = "If you have just changed your profile configuration, you " + \
15285 "should revert back to the previous configuration. Due to " + \
15286 "your current profile being invalid, allowed actions are " + \
15287 "limited to --help, --info, --sync, and --version."
15288 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15289 level=logging.ERROR, noiselevel=-1)
15294 global portage # NFC why this is necessary now - genone
15295 portage._disable_legacy_globals()
15296 # Disable color until we're sure that it should be enabled (after
15297 # EMERGE_DEFAULT_OPTS has been parsed).
15298 portage.output.havecolor = 0
15299 # This first pass is just for options that need to be known as early as
15300 # possible, such as --config-root. They will be parsed again later,
15301 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15302 # the value of --config-root).
15303 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15304 if "--debug" in myopts:
15305 os.environ["PORTAGE_DEBUG"] = "1"
15306 if "--config-root" in myopts:
15307 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15309 # Portage needs to ensure a sane umask for the files it creates.
15311 settings, trees, mtimedb = load_emerge_config()
15312 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15313 rval = profile_check(trees, myaction, myopts)
15314 if rval != os.EX_OK:
15317 if portage._global_updates(trees, mtimedb["updates"]):
15319 # Reload the whole config from scratch.
15320 settings, trees, mtimedb = load_emerge_config(trees=trees)
15321 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15323 xterm_titles = "notitles" not in settings.features
15326 if "--ignore-default-opts" not in myopts:
15327 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15328 tmpcmdline.extend(sys.argv[1:])
15329 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15331 if "--digest" in myopts:
15332 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15333 # Reload the whole config from scratch so that the portdbapi internal
15334 # config is updated with new FEATURES.
15335 settings, trees, mtimedb = load_emerge_config(trees=trees)
15336 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15338 for myroot in trees:
15339 mysettings = trees[myroot]["vartree"].settings
15340 mysettings.unlock()
15341 adjust_config(myopts, mysettings)
15342 if '--pretend' not in myopts and myaction in \
15343 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15344 mysettings["PORTAGE_COUNTER_HASH"] = \
15345 trees[myroot]["vartree"].dbapi._counter_hash()
15346 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15348 del myroot, mysettings
15350 apply_priorities(settings)
15352 spinner = stdout_spinner()
15353 if "candy" in settings.features:
15354 spinner.update = spinner.update_scroll
15356 if "--quiet" not in myopts:
15357 portage.deprecated_profile_check(settings=settings)
15358 repo_name_check(trees)
15359 config_protect_check(trees)
15361 eclasses_overridden = {}
15362 for mytrees in trees.itervalues():
15363 mydb = mytrees["porttree"].dbapi
15364 # Freeze the portdbapi for performance (memoize all xmatch results).
15366 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15369 if eclasses_overridden and \
15370 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15371 prefix = bad(" * ")
15372 if len(eclasses_overridden) == 1:
15373 writemsg(prefix + "Overlay eclass overrides " + \
15374 "eclass from PORTDIR:\n", noiselevel=-1)
15376 writemsg(prefix + "Overlay eclasses override " + \
15377 "eclasses from PORTDIR:\n", noiselevel=-1)
15378 writemsg(prefix + "\n", noiselevel=-1)
15379 for eclass_name in sorted(eclasses_overridden):
15380 writemsg(prefix + " '%s/%s.eclass'\n" % \
15381 (eclasses_overridden[eclass_name], eclass_name),
15383 writemsg(prefix + "\n", noiselevel=-1)
15384 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15385 "because it will trigger invalidation of cached ebuild metadata " + \
15386 "that is distributed with the portage tree. If you must " + \
15387 "override eclasses from PORTDIR then you are advised to add " + \
15388 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15389 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15390 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15391 "you would like to disable this warning."
15392 from textwrap import wrap
15393 for line in wrap(msg, 72):
15394 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15396 if "moo" in myfiles:
15399 Larry loves Gentoo (""" + platform.system() + """)
15401 _______________________
15402 < Have you mooed today? >
15403 -----------------------
15413 ext = os.path.splitext(x)[1]
15414 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15415 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15418 root_config = trees[settings["ROOT"]]["root_config"]
15419 if myaction == "list-sets":
15420 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15424 # only expand sets for actions taking package arguments
15425 oldargs = myfiles[:]
15426 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15427 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15428 if retval != os.EX_OK:
15431 # Need to handle empty sets specially, otherwise emerge will react
15432 # with the help message for empty argument lists
15433 if oldargs and not myfiles:
15434 print "emerge: no targets left after set expansion"
15437 if ("--tree" in myopts) and ("--columns" in myopts):
15438 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15441 if ("--quiet" in myopts):
15442 spinner.update = spinner.update_quiet
15443 portage.util.noiselimit = -1
15445 # Always create packages if FEATURES=buildpkg
15446 # Imply --buildpkg if --buildpkgonly
15447 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15448 if "--buildpkg" not in myopts:
15449 myopts["--buildpkg"] = True
15451 # Always try and fetch binary packages if FEATURES=getbinpkg
15452 if ("getbinpkg" in settings.features):
15453 myopts["--getbinpkg"] = True
15455 if "--buildpkgonly" in myopts:
15456 # --buildpkgonly will not merge anything, so
15457 # it cancels all binary package options.
15458 for opt in ("--getbinpkg", "--getbinpkgonly",
15459 "--usepkg", "--usepkgonly"):
15460 myopts.pop(opt, None)
15462 if "--fetch-all-uri" in myopts:
15463 myopts["--fetchonly"] = True
15465 if "--skipfirst" in myopts and "--resume" not in myopts:
15466 myopts["--resume"] = True
15468 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15469 myopts["--usepkgonly"] = True
15471 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15472 myopts["--getbinpkg"] = True
15474 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15475 myopts["--usepkg"] = True
15477 # Also allow -K to apply --usepkg/-k
15478 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15479 myopts["--usepkg"] = True
15481 # Allow -p to remove --ask
15482 if ("--pretend" in myopts) and ("--ask" in myopts):
15483 print ">>> --pretend disables --ask... removing --ask from options."
15484 del myopts["--ask"]
15486 # forbid --ask when not in a terminal
15487 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15488 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15489 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15493 if settings.get("PORTAGE_DEBUG", "") == "1":
15494 spinner.update = spinner.update_quiet
15496 if "python-trace" in settings.features:
15497 import portage.debug
15498 portage.debug.set_trace(True)
15500 if not ("--quiet" in myopts):
15501 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15502 spinner.update = spinner.update_basic
15504 if myaction == 'version':
15505 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15506 settings.profile_path, settings["CHOST"],
15507 trees[settings["ROOT"]]["vartree"].dbapi)
15509 elif "--help" in myopts:
15510 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15513 if "--debug" in myopts:
15514 print "myaction", myaction
15515 print "myopts", myopts
15517 if not myaction and not myfiles and "--resume" not in myopts:
15518 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15521 pretend = "--pretend" in myopts
15522 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15523 buildpkgonly = "--buildpkgonly" in myopts
15525 # check if root user is the current user for the actions where emerge needs this
15526 if portage.secpass < 2:
15527 # We've already allowed "--version" and "--help" above.
15528 if "--pretend" not in myopts and myaction not in ("search","info"):
15529 need_superuser = not \
15531 (buildpkgonly and secpass >= 1) or \
15532 myaction in ("metadata", "regen") or \
15533 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15534 if portage.secpass < 1 or \
15537 access_desc = "superuser"
15539 access_desc = "portage group"
15540 # Always show portage_group_warning() when only portage group
15541 # access is required but the user is not in the portage group.
15542 from portage.data import portage_group_warning
15543 if "--ask" in myopts:
15544 myopts["--pretend"] = True
15545 del myopts["--ask"]
15546 print ("%s access is required... " + \
15547 "adding --pretend to options.\n") % access_desc
15548 if portage.secpass < 1 and not need_superuser:
15549 portage_group_warning()
15551 sys.stderr.write(("emerge: %s access is " + \
15552 "required.\n\n") % access_desc)
15553 if portage.secpass < 1 and not need_superuser:
15554 portage_group_warning()
15557 disable_emergelog = False
15558 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15560 disable_emergelog = True
15562 if myaction in ("search", "info"):
15563 disable_emergelog = True
15564 if disable_emergelog:
15565 """ Disable emergelog for everything except build or unmerge
15566 operations. This helps minimize parallel emerge.log entries that can
15567 confuse log parsers. We especially want it disabled during
15568 parallel-fetch, which uses --resume --fetchonly."""
15570 def emergelog(*pargs, **kargs):
15573 if not "--pretend" in myopts:
15574 emergelog(xterm_titles, "Started emerge on: "+\
15575 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15578 myelogstr=" ".join(myopts)
15580 myelogstr+=" "+myaction
15582 myelogstr += " " + " ".join(oldargs)
15583 emergelog(xterm_titles, " *** emerge " + myelogstr)
15586 def emergeexitsig(signum, frame):
15587 signal.signal(signal.SIGINT, signal.SIG_IGN)
15588 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15589 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15590 sys.exit(100+signum)
15591 signal.signal(signal.SIGINT, emergeexitsig)
15592 signal.signal(signal.SIGTERM, emergeexitsig)
15595 """This gets out final log message in before we quit."""
15596 if "--pretend" not in myopts:
15597 emergelog(xterm_titles, " *** terminating.")
15598 if "notitles" not in settings.features:
15600 portage.atexit_register(emergeexit)
15602 if myaction in ("config", "metadata", "regen", "sync"):
15603 if "--pretend" in myopts:
15604 sys.stderr.write(("emerge: The '%s' action does " + \
15605 "not support '--pretend'.\n") % myaction)
15608 if "sync" == myaction:
15609 return action_sync(settings, trees, mtimedb, myopts, myaction)
15610 elif "metadata" == myaction:
15611 action_metadata(settings, portdb, myopts)
15612 elif myaction=="regen":
15613 validate_ebuild_environment(trees)
15614 return action_regen(settings, portdb, myopts.get("--jobs"),
15615 myopts.get("--load-average"))
15617 elif "config"==myaction:
15618 validate_ebuild_environment(trees)
15619 action_config(settings, trees, myopts, myfiles)
15622 elif "search"==myaction:
15623 validate_ebuild_environment(trees)
15624 action_search(trees[settings["ROOT"]]["root_config"],
15625 myopts, myfiles, spinner)
15626 elif myaction in ("clean", "unmerge") or \
15627 (myaction == "prune" and "--nodeps" in myopts):
15628 validate_ebuild_environment(trees)
15630 # Ensure atoms are valid before calling unmerge().
15631 # For backward compat, leading '=' is not required.
15633 if is_valid_package_atom(x) or \
15634 is_valid_package_atom("=" + x):
15637 msg.append("'%s' is not a valid package atom." % (x,))
15638 msg.append("Please check ebuild(5) for full details.")
15639 writemsg_level("".join("!!! %s\n" % line for line in msg),
15640 level=logging.ERROR, noiselevel=-1)
15643 # When given a list of atoms, unmerge
15644 # them in the order given.
15645 ordered = myaction == "unmerge"
15646 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15647 mtimedb["ldpath"], ordered=ordered):
15648 if not (buildpkgonly or fetchonly or pretend):
15649 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15651 elif myaction in ("depclean", "info", "prune"):
15653 # Ensure atoms are valid before calling unmerge().
15654 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15657 if is_valid_package_atom(x):
15659 valid_atoms.append(
15660 portage.dep_expand(x, mydb=vardb, settings=settings))
15661 except portage.exception.AmbiguousPackageName, e:
15662 msg = "The short ebuild name \"" + x + \
15663 "\" is ambiguous. Please specify " + \
15664 "one of the following " + \
15665 "fully-qualified ebuild names instead:"
15666 for line in textwrap.wrap(msg, 70):
15667 writemsg_level("!!! %s\n" % (line,),
15668 level=logging.ERROR, noiselevel=-1)
15670 writemsg_level(" %s\n" % colorize("INFORM", i),
15671 level=logging.ERROR, noiselevel=-1)
15672 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15676 msg.append("'%s' is not a valid package atom." % (x,))
15677 msg.append("Please check ebuild(5) for full details.")
15678 writemsg_level("".join("!!! %s\n" % line for line in msg),
15679 level=logging.ERROR, noiselevel=-1)
15682 if myaction == "info":
15683 return action_info(settings, trees, myopts, valid_atoms)
15685 validate_ebuild_environment(trees)
15686 action_depclean(settings, trees, mtimedb["ldpath"],
15687 myopts, myaction, valid_atoms, spinner)
15688 if not (buildpkgonly or fetchonly or pretend):
15689 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15690 # "update", "system", or just process files:
15692 validate_ebuild_environment(trees)
15695 if x.startswith(SETPREFIX) or \
15696 is_valid_package_atom(x):
15698 if x[:1] == os.sep:
15706 msg.append("'%s' is not a valid package atom." % (x,))
15707 msg.append("Please check ebuild(5) for full details.")
15708 writemsg_level("".join("!!! %s\n" % line for line in msg),
15709 level=logging.ERROR, noiselevel=-1)
15712 if "--pretend" not in myopts:
15713 display_news_notification(root_config, myopts)
15714 retval = action_build(settings, trees, mtimedb,
15715 myopts, myaction, myfiles, spinner)
15716 root_config = trees[settings["ROOT"]]["root_config"]
15717 post_emerge(root_config, myopts, mtimedb, retval)