2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
214 "--verbose", "--version"
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1391 if not pkgsettings._accept_chost(pkg):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1419 if not pkgsettings._accept_chost(pkg):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 if metadata is None:
1440 mreasons = ["corruption"]
1442 pkg = Package(type_name=pkg_type, root_config=root_config,
1443 cpv=cpv, built=built, installed=installed, metadata=metadata)
1444 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1445 return metadata, mreasons
1447 def show_masked_packages(masked_packages):
1448 shown_licenses = set()
1449 shown_comments = set()
1450 # Maybe there is both an ebuild and a binary. Only
1451 # show one of them to avoid redundant appearance.
1453 have_eapi_mask = False
1454 for (root_config, pkgsettings, cpv,
1455 metadata, mreasons) in masked_packages:
1456 if cpv in shown_cpvs:
1459 comment, filename = None, None
1460 if "package.mask" in mreasons:
1461 comment, filename = \
1462 portage.getmaskingreason(
1463 cpv, metadata=metadata,
1464 settings=pkgsettings,
1465 portdb=root_config.trees["porttree"].dbapi,
1466 return_location=True)
1467 missing_licenses = []
1469 if not portage.eapi_is_supported(metadata["EAPI"]):
1470 have_eapi_mask = True
1472 missing_licenses = \
1473 pkgsettings._getMissingLicenses(
1475 except portage.exception.InvalidDependString:
1476 # This will have already been reported
1477 # above via mreasons.
1480 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1481 if comment and comment not in shown_comments:
1484 shown_comments.add(comment)
1485 portdb = root_config.trees["porttree"].dbapi
1486 for l in missing_licenses:
1487 l_path = portdb.findLicensePath(l)
1488 if l in shown_licenses:
1490 msg = ("A copy of the '%s' license" + \
1491 " is located at '%s'.") % (l, l_path)
1494 shown_licenses.add(l)
1495 return have_eapi_mask
1497 class Task(SlotObject):
1498 __slots__ = ("_hash_key", "_hash_value")
1500 def _get_hash_key(self):
1501 hash_key = getattr(self, "_hash_key", None)
1502 if hash_key is None:
1503 raise NotImplementedError(self)
1506 def __eq__(self, other):
1507 return self._get_hash_key() == other
1509 def __ne__(self, other):
1510 return self._get_hash_key() != other
1513 hash_value = getattr(self, "_hash_value", None)
1514 if hash_value is None:
1515 self._hash_value = hash(self._get_hash_key())
1516 return self._hash_value
1519 return len(self._get_hash_key())
1521 def __getitem__(self, key):
1522 return self._get_hash_key()[key]
1525 return iter(self._get_hash_key())
1527 def __contains__(self, key):
1528 return key in self._get_hash_key()
1531 return str(self._get_hash_key())
1533 class Blocker(Task):
1535 __hash__ = Task.__hash__
1536 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538 def __init__(self, **kwargs):
1539 Task.__init__(self, **kwargs)
1540 self.cp = portage.dep_getkey(self.atom)
1542 def _get_hash_key(self):
1543 hash_key = getattr(self, "_hash_key", None)
1544 if hash_key is None:
1546 ("blocks", self.root, self.atom, self.eapi)
1547 return self._hash_key
1549 class Package(Task):
1551 __hash__ = Task.__hash__
1552 __slots__ = ("built", "cpv", "depth",
1553 "installed", "metadata", "onlydeps", "operation",
1554 "root_config", "type_name",
1555 "category", "counter", "cp", "cpv_split",
1556 "inherited", "iuse", "mtime",
1557 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1560 "CHOST", "COUNTER", "DEPEND", "EAPI",
1561 "INHERITED", "IUSE", "KEYWORDS",
1562 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1563 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565 def __init__(self, **kwargs):
1566 Task.__init__(self, **kwargs)
1567 self.root = self.root_config.root
1568 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1569 self.cp = portage.cpv_getkey(self.cpv)
1570 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1571 self.category, self.pf = portage.catsplit(self.cpv)
1572 self.cpv_split = portage.catpkgsplit(self.cpv)
1573 self.pv_split = self.cpv_split[1:]
1577 __slots__ = ("__weakref__", "enabled")
1579 def __init__(self, use):
1580 self.enabled = frozenset(use)
1582 class _iuse(object):
1584 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1586 def __init__(self, tokens, iuse_implicit):
1587 self.tokens = tuple(tokens)
1588 self.iuse_implicit = iuse_implicit
1595 enabled.append(x[1:])
1597 disabled.append(x[1:])
1600 self.enabled = frozenset(enabled)
1601 self.disabled = frozenset(disabled)
1602 self.all = frozenset(chain(enabled, disabled, other))
1604 def __getattribute__(self, name):
1607 return object.__getattribute__(self, "regex")
1608 except AttributeError:
1609 all = object.__getattribute__(self, "all")
1610 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1611 # Escape anything except ".*" which is supposed
1612 # to pass through from _get_implicit_iuse()
1613 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1614 regex = "^(%s)$" % "|".join(regex)
1615 regex = regex.replace("\\.\\*", ".*")
1616 self.regex = re.compile(regex)
1617 return object.__getattribute__(self, name)
1619 def _get_hash_key(self):
1620 hash_key = getattr(self, "_hash_key", None)
1621 if hash_key is None:
1622 if self.operation is None:
1623 self.operation = "merge"
1624 if self.onlydeps or self.installed:
1625 self.operation = "nomerge"
1627 (self.type_name, self.root, self.cpv, self.operation)
1628 return self._hash_key
1630 def __lt__(self, other):
1631 if other.cp != self.cp:
1633 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1637 def __le__(self, other):
1638 if other.cp != self.cp:
1640 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1644 def __gt__(self, other):
1645 if other.cp != self.cp:
1647 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1651 def __ge__(self, other):
1652 if other.cp != self.cp:
1654 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1658 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1659 if not x.startswith("UNUSED_"))
1660 _all_metadata_keys.discard("CDEPEND")
1661 _all_metadata_keys.update(Package.metadata_keys)
1663 from portage.cache.mappings import slot_dict_class
1664 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1666 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1668 Detect metadata updates and synchronize Package attributes.
1671 __slots__ = ("_pkg",)
1672 _wrapped_keys = frozenset(
1673 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1675 def __init__(self, pkg, metadata):
1676 _PackageMetadataWrapperBase.__init__(self)
1678 self.update(metadata)
1680 def __setitem__(self, k, v):
1681 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1682 if k in self._wrapped_keys:
1683 getattr(self, "_set_" + k.lower())(k, v)
1685 def _set_inherited(self, k, v):
1686 if isinstance(v, basestring):
1687 v = frozenset(v.split())
1688 self._pkg.inherited = v
1690 def _set_iuse(self, k, v):
1691 self._pkg.iuse = self._pkg._iuse(
1692 v.split(), self._pkg.root_config.iuse_implicit)
1694 def _set_slot(self, k, v):
1697 def _set_use(self, k, v):
1698 self._pkg.use = self._pkg._use(v.split())
1700 def _set_counter(self, k, v):
1701 if isinstance(v, basestring):
1706 self._pkg.counter = v
1708 def _set__mtime_(self, k, v):
1709 if isinstance(v, basestring):
1716 class EbuildFetchonly(SlotObject):
1718 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1721 settings = self.settings
1723 portdb = pkg.root_config.trees["porttree"].dbapi
1724 ebuild_path = portdb.findname(pkg.cpv)
1725 settings.setcpv(pkg)
1726 debug = settings.get("PORTAGE_DEBUG") == "1"
1727 use_cache = 1 # always true
1728 portage.doebuild_environment(ebuild_path, "fetch",
1729 settings["ROOT"], settings, debug, use_cache, portdb)
1730 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1733 rval = self._execute_with_builddir()
1735 rval = portage.doebuild(ebuild_path, "fetch",
1736 settings["ROOT"], settings, debug=debug,
1737 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1738 mydbapi=portdb, tree="porttree")
1740 if rval != os.EX_OK:
1741 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1742 eerror(msg, phase="unpack", key=pkg.cpv)
1746 def _execute_with_builddir(self):
1747 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1748 # ensuring sane $PWD (bug #239560) and storing elog
1749 # messages. Use a private temp directory, in order
1750 # to avoid locking the main one.
1751 settings = self.settings
1752 global_tmpdir = settings["PORTAGE_TMPDIR"]
1753 from tempfile import mkdtemp
1755 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1757 if e.errno != portage.exception.PermissionDenied.errno:
1759 raise portage.exception.PermissionDenied(global_tmpdir)
1760 settings["PORTAGE_TMPDIR"] = private_tmpdir
1761 settings.backup_changes("PORTAGE_TMPDIR")
1763 retval = self._execute()
1765 settings["PORTAGE_TMPDIR"] = global_tmpdir
1766 settings.backup_changes("PORTAGE_TMPDIR")
1767 shutil.rmtree(private_tmpdir)
1771 settings = self.settings
1773 root_config = pkg.root_config
1774 portdb = root_config.trees["porttree"].dbapi
1775 ebuild_path = portdb.findname(pkg.cpv)
1776 debug = settings.get("PORTAGE_DEBUG") == "1"
1777 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1779 retval = portage.doebuild(ebuild_path, "fetch",
1780 self.settings["ROOT"], self.settings, debug=debug,
1781 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1782 mydbapi=portdb, tree="porttree")
1784 if retval != os.EX_OK:
1785 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1786 eerror(msg, phase="unpack", key=pkg.cpv)
1788 portage.elog.elog_process(self.pkg.cpv, self.settings)
1791 class PollConstants(object):
1794 Provides POLL* constants that are equivalent to those from the
1795 select module, for use by PollSelectAdapter.
1798 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1801 locals()[k] = getattr(select, k, v)
1805 class AsynchronousTask(SlotObject):
1807 Subclasses override _wait() and _poll() so that calls
1808 to public methods can be wrapped for implementing
1809 hooks such as exit listener notification.
1811 Sublasses should call self.wait() to notify exit listeners after
1812 the task is complete and self.returncode has been set.
1815 __slots__ = ("background", "cancelled", "returncode") + \
1816 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1820 Start an asynchronous task and then return as soon as possible.
1826 raise NotImplementedError(self)
1829 return self.returncode is None
1836 return self.returncode
1839 if self.returncode is None:
1842 return self.returncode
1845 return self.returncode
1848 self.cancelled = True
1851 def addStartListener(self, f):
1853 The function will be called with one argument, a reference to self.
1855 if self._start_listeners is None:
1856 self._start_listeners = []
1857 self._start_listeners.append(f)
1859 def removeStartListener(self, f):
1860 if self._start_listeners is None:
1862 self._start_listeners.remove(f)
1864 def _start_hook(self):
1865 if self._start_listeners is not None:
1866 start_listeners = self._start_listeners
1867 self._start_listeners = None
1869 for f in start_listeners:
1872 def addExitListener(self, f):
1874 The function will be called with one argument, a reference to self.
1876 if self._exit_listeners is None:
1877 self._exit_listeners = []
1878 self._exit_listeners.append(f)
1880 def removeExitListener(self, f):
1881 if self._exit_listeners is None:
1882 if self._exit_listener_stack is not None:
1883 self._exit_listener_stack.remove(f)
1885 self._exit_listeners.remove(f)
1887 def _wait_hook(self):
1889 Call this method after the task completes, just before returning
1890 the returncode from wait() or poll(). This hook is
1891 used to trigger exit listeners when the returncode first
1894 if self.returncode is not None and \
1895 self._exit_listeners is not None:
1897 # This prevents recursion, in case one of the
1898 # exit handlers triggers this method again by
1899 # calling wait(). Use a stack that gives
1900 # removeExitListener() an opportunity to consume
1901 # listeners from the stack, before they can get
1902 # called below. This is necessary because a call
1903 # to one exit listener may result in a call to
1904 # removeExitListener() for another listener on
1905 # the stack. That listener needs to be removed
1906 # from the stack since it would be inconsistent
1907 # to call it after it has been been passed into
1908 # removeExitListener().
1909 self._exit_listener_stack = self._exit_listeners
1910 self._exit_listeners = None
1912 self._exit_listener_stack.reverse()
1913 while self._exit_listener_stack:
1914 self._exit_listener_stack.pop()(self)
1916 class AbstractPollTask(AsynchronousTask):
1918 __slots__ = ("scheduler",) + \
1922 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1923 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1926 def _unregister(self):
1927 raise NotImplementedError(self)
1929 def _unregister_if_appropriate(self, event):
1930 if self._registered:
1931 if event & self._exceptional_events:
1934 elif event & PollConstants.POLLHUP:
1938 class PipeReader(AbstractPollTask):
1941 Reads output from one or more files and saves it in memory,
1942 for retrieval via the getvalue() method. This is driven by
1943 the scheduler's poll() loop, so it runs entirely within the
1947 __slots__ = ("input_files",) + \
1948 ("_read_data", "_reg_ids")
1951 self._reg_ids = set()
1952 self._read_data = []
1953 for k, f in self.input_files.iteritems():
1954 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1955 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1956 self._reg_ids.add(self.scheduler.register(f.fileno(),
1957 self._registered_events, self._output_handler))
1958 self._registered = True
1961 return self._registered
1964 if self.returncode is None:
1966 self.cancelled = True
1970 if self.returncode is not None:
1971 return self.returncode
1973 if self._registered:
1974 self.scheduler.schedule(self._reg_ids)
1977 self.returncode = os.EX_OK
1978 return self.returncode
1981 """Retrieve the entire contents"""
1982 return "".join(self._read_data)
1985 """Free the memory buffer."""
1986 self._read_data = None
1988 def _output_handler(self, fd, event):
1990 if event & PollConstants.POLLIN:
1992 for f in self.input_files.itervalues():
1993 if fd == f.fileno():
1996 buf = array.array('B')
1998 buf.fromfile(f, self._bufsize)
2003 self._read_data.append(buf.tostring())
2008 self._unregister_if_appropriate(event)
2009 return self._registered
2011 def _unregister(self):
2013 Unregister from the scheduler and close open files.
2016 self._registered = False
2018 if self._reg_ids is not None:
2019 for reg_id in self._reg_ids:
2020 self.scheduler.unregister(reg_id)
2021 self._reg_ids = None
2023 if self.input_files is not None:
2024 for f in self.input_files.itervalues():
2026 self.input_files = None
2028 class CompositeTask(AsynchronousTask):
2030 __slots__ = ("scheduler",) + ("_current_task",)
2033 return self._current_task is not None
2036 self.cancelled = True
2037 if self._current_task is not None:
2038 self._current_task.cancel()
2042 This does a loop calling self._current_task.poll()
2043 repeatedly as long as the value of self._current_task
2044 keeps changing. It calls poll() a maximum of one time
2045 for a given self._current_task instance. This is useful
2046 since calling poll() on a task can trigger advance to
2047 the next task could eventually lead to the returncode
2048 being set in cases when polling only a single task would
2049 not have the same effect.
2054 task = self._current_task
2055 if task is None or task is prev:
2056 # don't poll the same task more than once
2061 return self.returncode
2067 task = self._current_task
2069 # don't wait for the same task more than once
2072 # Before the task.wait() method returned, an exit
2073 # listener should have set self._current_task to either
2074 # a different task or None. Something is wrong.
2075 raise AssertionError("self._current_task has not " + \
2076 "changed since calling wait", self, task)
2080 return self.returncode
2082 def _assert_current(self, task):
2084 Raises an AssertionError if the given task is not the
2085 same one as self._current_task. This can be useful
2088 if task is not self._current_task:
2089 raise AssertionError("Unrecognized task: %s" % (task,))
2091 def _default_exit(self, task):
2093 Calls _assert_current() on the given task and then sets the
2094 composite returncode attribute if task.returncode != os.EX_OK.
2095 If the task failed then self._current_task will be set to None.
2096 Subclasses can use this as a generic task exit callback.
2099 @returns: The task.returncode attribute.
2101 self._assert_current(task)
2102 if task.returncode != os.EX_OK:
2103 self.returncode = task.returncode
2104 self._current_task = None
2105 return task.returncode
2107 def _final_exit(self, task):
2109 Assumes that task is the final task of this composite task.
2110 Calls _default_exit() and sets self.returncode to the task's
2111 returncode and sets self._current_task to None.
2113 self._default_exit(task)
2114 self._current_task = None
2115 self.returncode = task.returncode
2116 return self.returncode
2118 def _default_final_exit(self, task):
2120 This calls _final_exit() and then wait().
2122 Subclasses can use this as a generic final task exit callback.
2125 self._final_exit(task)
2128 def _start_task(self, task, exit_handler):
2130 Register exit handler for the given task, set it
2131 as self._current_task, and call task.start().
2133 Subclasses can use this as a generic way to start
2137 task.addExitListener(exit_handler)
2138 self._current_task = task
2141 class TaskSequence(CompositeTask):
2143 A collection of tasks that executes sequentially. Each task
2144 must have a addExitListener() method that can be used as
2145 a means to trigger movement from one task to the next.
2148 __slots__ = ("_task_queue",)
2150 def __init__(self, **kwargs):
2151 AsynchronousTask.__init__(self, **kwargs)
2152 self._task_queue = deque()
2154 def add(self, task):
2155 self._task_queue.append(task)
2158 self._start_next_task()
2161 self._task_queue.clear()
2162 CompositeTask.cancel(self)
2164 def _start_next_task(self):
2165 self._start_task(self._task_queue.popleft(),
2166 self._task_exit_handler)
2168 def _task_exit_handler(self, task):
2169 if self._default_exit(task) != os.EX_OK:
2171 elif self._task_queue:
2172 self._start_next_task()
2174 self._final_exit(task)
2177 class SubProcess(AbstractPollTask):
2179 __slots__ = ("pid",) + \
2180 ("_files", "_reg_id")
2182 # A file descriptor is required for the scheduler to monitor changes from
2183 # inside a poll() loop. When logging is not enabled, create a pipe just to
2184 # serve this purpose alone.
2188 if self.returncode is not None:
2189 return self.returncode
2190 if self.pid is None:
2191 return self.returncode
2192 if self._registered:
2193 return self.returncode
2196 retval = os.waitpid(self.pid, os.WNOHANG)
2198 if e.errno != errno.ECHILD:
2201 retval = (self.pid, 1)
2203 if retval == (0, 0):
2205 self._set_returncode(retval)
2206 return self.returncode
2211 os.kill(self.pid, signal.SIGTERM)
2213 if e.errno != errno.ESRCH:
2217 self.cancelled = True
2218 if self.pid is not None:
2220 return self.returncode
2223 return self.pid is not None and \
2224 self.returncode is None
2228 if self.returncode is not None:
2229 return self.returncode
2231 if self._registered:
2232 self.scheduler.schedule(self._reg_id)
2234 if self.returncode is not None:
2235 return self.returncode
2238 wait_retval = os.waitpid(self.pid, 0)
2240 if e.errno != errno.ECHILD:
2243 self._set_returncode((self.pid, 1))
2245 self._set_returncode(wait_retval)
2247 return self.returncode
2249 def _unregister(self):
2251 Unregister from the scheduler and close open files.
2254 self._registered = False
2256 if self._reg_id is not None:
2257 self.scheduler.unregister(self._reg_id)
2260 if self._files is not None:
2261 for f in self._files.itervalues():
2265 def _set_returncode(self, wait_retval):
2267 retval = wait_retval[1]
2269 if retval != os.EX_OK:
2271 retval = (retval & 0xff) << 8
2273 retval = retval >> 8
2275 self.returncode = retval
2277 class SpawnProcess(SubProcess):
2280 Constructor keyword args are passed into portage.process.spawn().
2281 The required "args" keyword argument will be passed as the first
2285 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2286 "uid", "gid", "groups", "umask", "logfile",
2287 "path_lookup", "pre_exec")
2289 __slots__ = ("args",) + \
2292 _file_names = ("log", "process", "stdout")
2293 _files_dict = slot_dict_class(_file_names, prefix="")
2300 if self.fd_pipes is None:
2302 fd_pipes = self.fd_pipes
2303 fd_pipes.setdefault(0, sys.stdin.fileno())
2304 fd_pipes.setdefault(1, sys.stdout.fileno())
2305 fd_pipes.setdefault(2, sys.stderr.fileno())
2307 # flush any pending output
2308 for fd in fd_pipes.itervalues():
2309 if fd == sys.stdout.fileno():
2311 if fd == sys.stderr.fileno():
2314 logfile = self.logfile
2315 self._files = self._files_dict()
2318 master_fd, slave_fd = self._pipe(fd_pipes)
2319 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2320 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2323 fd_pipes_orig = fd_pipes.copy()
2325 # TODO: Use job control functions like tcsetpgrp() to control
2326 # access to stdin. Until then, use /dev/null so that any
2327 # attempts to read from stdin will immediately return EOF
2328 # instead of blocking indefinitely.
2329 null_input = open('/dev/null', 'rb')
2330 fd_pipes[0] = null_input.fileno()
2332 fd_pipes[0] = fd_pipes_orig[0]
2334 files.process = os.fdopen(master_fd, 'rb')
2335 if logfile is not None:
2337 fd_pipes[1] = slave_fd
2338 fd_pipes[2] = slave_fd
2340 files.log = open(logfile, mode='ab')
2341 portage.util.apply_secpass_permissions(logfile,
2342 uid=portage.portage_uid, gid=portage.portage_gid,
2345 if not self.background:
2346 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2348 output_handler = self._output_handler
2352 # Create a dummy pipe so the scheduler can monitor
2353 # the process from inside a poll() loop.
2354 fd_pipes[self._dummy_pipe_fd] = slave_fd
2356 fd_pipes[1] = slave_fd
2357 fd_pipes[2] = slave_fd
2358 output_handler = self._dummy_handler
2361 for k in self._spawn_kwarg_names:
2362 v = getattr(self, k)
2366 kwargs["fd_pipes"] = fd_pipes
2367 kwargs["returnpid"] = True
2368 kwargs.pop("logfile", None)
2370 self._reg_id = self.scheduler.register(files.process.fileno(),
2371 self._registered_events, output_handler)
2372 self._registered = True
2374 retval = self._spawn(self.args, **kwargs)
2377 if null_input is not None:
2380 if isinstance(retval, int):
2383 self.returncode = retval
2387 self.pid = retval[0]
2388 portage.process.spawned_pids.remove(self.pid)
2390 def _pipe(self, fd_pipes):
2392 @type fd_pipes: dict
2393 @param fd_pipes: pipes from which to copy terminal size if desired.
2397 def _spawn(self, args, **kwargs):
2398 return portage.process.spawn(args, **kwargs)
2400 def _output_handler(self, fd, event):
2402 if event & PollConstants.POLLIN:
2405 buf = array.array('B')
2407 buf.fromfile(files.process, self._bufsize)
2412 if not self.background:
2413 buf.tofile(files.stdout)
2414 files.stdout.flush()
2415 buf.tofile(files.log)
2421 self._unregister_if_appropriate(event)
2422 return self._registered
2424 def _dummy_handler(self, fd, event):
2426 This method is mainly interested in detecting EOF, since
2427 the only purpose of the pipe is to allow the scheduler to
2428 monitor the process from inside a poll() loop.
2431 if event & PollConstants.POLLIN:
2433 buf = array.array('B')
2435 buf.fromfile(self._files.process, self._bufsize)
2445 self._unregister_if_appropriate(event)
2446 return self._registered
2448 class MiscFunctionsProcess(SpawnProcess):
2450 Spawns misc-functions.sh with an existing ebuild environment.
2453 __slots__ = ("commands", "phase", "pkg", "settings")
2456 settings = self.settings
2457 settings.pop("EBUILD_PHASE", None)
2458 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2459 misc_sh_binary = os.path.join(portage_bin_path,
2460 os.path.basename(portage.const.MISC_SH_BINARY))
2462 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2463 self.logfile = settings.get("PORTAGE_LOG_FILE")
2465 portage._doebuild_exit_status_unlink(
2466 settings.get("EBUILD_EXIT_STATUS_FILE"))
2468 SpawnProcess._start(self)
2470 def _spawn(self, args, **kwargs):
2471 settings = self.settings
2472 debug = settings.get("PORTAGE_DEBUG") == "1"
2473 return portage.spawn(" ".join(args), settings,
2474 debug=debug, **kwargs)
2476 def _set_returncode(self, wait_retval):
2477 SpawnProcess._set_returncode(self, wait_retval)
2478 self.returncode = portage._doebuild_exit_status_check_and_log(
2479 self.settings, self.phase, self.returncode)
2481 class EbuildFetcher(SpawnProcess):
2483 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2488 root_config = self.pkg.root_config
2489 portdb = root_config.trees["porttree"].dbapi
2490 ebuild_path = portdb.findname(self.pkg.cpv)
2491 settings = self.config_pool.allocate()
2492 settings.setcpv(self.pkg)
2494 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2495 # should not be touched since otherwise it could interfere with
2496 # another instance of the same cpv concurrently being built for a
2497 # different $ROOT (currently, builds only cooperate with prefetchers
2498 # that are spawned for the same $ROOT).
2499 if not self.prefetch:
2500 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2501 self._build_dir.lock()
2502 self._build_dir.clean()
2503 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2504 if self.logfile is None:
2505 self.logfile = settings.get("PORTAGE_LOG_FILE")
2511 # If any incremental variables have been overridden
2512 # via the environment, those values need to be passed
2513 # along here so that they are correctly considered by
2514 # the config instance in the subproccess.
2515 fetch_env = os.environ.copy()
2517 nocolor = settings.get("NOCOLOR")
2518 if nocolor is not None:
2519 fetch_env["NOCOLOR"] = nocolor
2521 fetch_env["PORTAGE_NICENESS"] = "0"
2523 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2525 ebuild_binary = os.path.join(
2526 settings["PORTAGE_BIN_PATH"], "ebuild")
2528 fetch_args = [ebuild_binary, ebuild_path, phase]
2529 debug = settings.get("PORTAGE_DEBUG") == "1"
2531 fetch_args.append("--debug")
2533 self.args = fetch_args
2534 self.env = fetch_env
2535 SpawnProcess._start(self)
2537 def _pipe(self, fd_pipes):
2538 """When appropriate, use a pty so that fetcher progress bars,
2539 like wget has, will work properly."""
2540 if self.background or not sys.stdout.isatty():
2541 # When the output only goes to a log file,
2542 # there's no point in creating a pty.
2544 stdout_pipe = fd_pipes.get(1)
2545 got_pty, master_fd, slave_fd = \
2546 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2547 return (master_fd, slave_fd)
2549 def _set_returncode(self, wait_retval):
2550 SpawnProcess._set_returncode(self, wait_retval)
2551 # Collect elog messages that might have been
2552 # created by the pkg_nofetch phase.
2553 if self._build_dir is not None:
2554 # Skip elog messages for prefetch, in order to avoid duplicates.
2555 if not self.prefetch and self.returncode != os.EX_OK:
2557 if self.logfile is not None:
2559 elog_out = open(self.logfile, 'a')
2560 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2561 if self.logfile is not None:
2562 msg += ", Log file:"
2563 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2564 if self.logfile is not None:
2565 eerror(" '%s'" % (self.logfile,),
2566 phase="unpack", key=self.pkg.cpv, out=elog_out)
2567 if elog_out is not None:
2569 if not self.prefetch:
2570 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2571 features = self._build_dir.settings.features
2572 if self.returncode == os.EX_OK:
2573 self._build_dir.clean()
2574 self._build_dir.unlock()
2575 self.config_pool.deallocate(self._build_dir.settings)
2576 self._build_dir = None
2578 class EbuildBuildDir(SlotObject):
2580 __slots__ = ("dir_path", "pkg", "settings",
2581 "locked", "_catdir", "_lock_obj")
2583 def __init__(self, **kwargs):
2584 SlotObject.__init__(self, **kwargs)
2589 This raises an AlreadyLocked exception if lock() is called
2590 while a lock is already held. In order to avoid this, call
2591 unlock() or check whether the "locked" attribute is True
2592 or False before calling lock().
2594 if self._lock_obj is not None:
2595 raise self.AlreadyLocked((self._lock_obj,))
2597 dir_path = self.dir_path
2598 if dir_path is None:
2599 root_config = self.pkg.root_config
2600 portdb = root_config.trees["porttree"].dbapi
2601 ebuild_path = portdb.findname(self.pkg.cpv)
2602 settings = self.settings
2603 settings.setcpv(self.pkg)
2604 debug = settings.get("PORTAGE_DEBUG") == "1"
2605 use_cache = 1 # always true
2606 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2607 self.settings, debug, use_cache, portdb)
2608 dir_path = self.settings["PORTAGE_BUILDDIR"]
2610 catdir = os.path.dirname(dir_path)
2611 self._catdir = catdir
2613 portage.util.ensure_dirs(os.path.dirname(catdir),
2614 gid=portage.portage_gid,
2618 catdir_lock = portage.locks.lockdir(catdir)
2619 portage.util.ensure_dirs(catdir,
2620 gid=portage.portage_gid,
2622 self._lock_obj = portage.locks.lockdir(dir_path)
2624 self.locked = self._lock_obj is not None
2625 if catdir_lock is not None:
2626 portage.locks.unlockdir(catdir_lock)
2629 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2630 by keepwork or keeptemp in FEATURES."""
2631 settings = self.settings
2632 features = settings.features
2633 if not ("keepwork" in features or "keeptemp" in features):
2635 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2636 except EnvironmentError, e:
2637 if e.errno != errno.ENOENT:
2642 if self._lock_obj is None:
2645 portage.locks.unlockdir(self._lock_obj)
2646 self._lock_obj = None
2649 catdir = self._catdir
2652 catdir_lock = portage.locks.lockdir(catdir)
2658 if e.errno not in (errno.ENOENT,
2659 errno.ENOTEMPTY, errno.EEXIST):
2662 portage.locks.unlockdir(catdir_lock)
2664 class AlreadyLocked(portage.exception.PortageException):
2667 class EbuildBuild(CompositeTask):
2669 __slots__ = ("args_set", "config_pool", "find_blockers",
2670 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2671 "prefetcher", "settings", "world_atom") + \
2672 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2676 logger = self.logger
2679 settings = self.settings
2680 world_atom = self.world_atom
2681 root_config = pkg.root_config
2684 portdb = root_config.trees[tree].dbapi
2685 settings.setcpv(pkg)
2686 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2687 ebuild_path = portdb.findname(self.pkg.cpv)
2688 self._ebuild_path = ebuild_path
2690 prefetcher = self.prefetcher
2691 if prefetcher is None:
2693 elif not prefetcher.isAlive():
2695 elif prefetcher.poll() is None:
2697 waiting_msg = "Fetching files " + \
2698 "in the background. " + \
2699 "To view fetch progress, run `tail -f " + \
2700 "/var/log/emerge-fetch.log` in another " + \
2702 msg_prefix = colorize("GOOD", " * ")
2703 from textwrap import wrap
2704 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2705 for line in wrap(waiting_msg, 65))
2706 if not self.background:
2707 writemsg(waiting_msg, noiselevel=-1)
2709 self._current_task = prefetcher
2710 prefetcher.addExitListener(self._prefetch_exit)
2713 self._prefetch_exit(prefetcher)
2715 def _prefetch_exit(self, prefetcher):
2719 settings = self.settings
2722 fetcher = EbuildFetchonly(
2723 fetch_all=opts.fetch_all_uri,
2724 pkg=pkg, pretend=opts.pretend,
2726 retval = fetcher.execute()
2727 self.returncode = retval
2731 fetcher = EbuildFetcher(config_pool=self.config_pool,
2732 fetchall=opts.fetch_all_uri,
2733 fetchonly=opts.fetchonly,
2734 background=self.background,
2735 pkg=pkg, scheduler=self.scheduler)
2737 self._start_task(fetcher, self._fetch_exit)
2739 def _fetch_exit(self, fetcher):
2743 fetch_failed = False
2745 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2747 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2749 if fetch_failed and fetcher.logfile is not None and \
2750 os.path.exists(fetcher.logfile):
2751 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2753 if not fetch_failed and fetcher.logfile is not None:
2754 # Fetch was successful, so remove the fetch log.
2756 os.unlink(fetcher.logfile)
2760 if fetch_failed or opts.fetchonly:
2764 logger = self.logger
2766 pkg_count = self.pkg_count
2767 scheduler = self.scheduler
2768 settings = self.settings
2769 features = settings.features
2770 ebuild_path = self._ebuild_path
2771 system_set = pkg.root_config.sets["system"]
2773 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2774 self._build_dir.lock()
2776 # Cleaning is triggered before the setup
2777 # phase, in portage.doebuild().
2778 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2779 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2780 short_msg = "emerge: (%s of %s) %s Clean" % \
2781 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2782 logger.log(msg, short_msg=short_msg)
2784 #buildsyspkg: Check if we need to _force_ binary package creation
2785 self._issyspkg = "buildsyspkg" in features and \
2786 system_set.findAtomForPackage(pkg) and \
2789 if opts.buildpkg or self._issyspkg:
2791 self._buildpkg = True
2793 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2794 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2795 short_msg = "emerge: (%s of %s) %s Compile" % \
2796 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2797 logger.log(msg, short_msg=short_msg)
2800 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2801 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2802 short_msg = "emerge: (%s of %s) %s Compile" % \
2803 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2804 logger.log(msg, short_msg=short_msg)
2806 build = EbuildExecuter(background=self.background, pkg=pkg,
2807 scheduler=scheduler, settings=settings)
2808 self._start_task(build, self._build_exit)
2810 def _unlock_builddir(self):
2811 portage.elog.elog_process(self.pkg.cpv, self.settings)
2812 self._build_dir.unlock()
2814 def _build_exit(self, build):
2815 if self._default_exit(build) != os.EX_OK:
2816 self._unlock_builddir()
2821 buildpkg = self._buildpkg
2824 self._final_exit(build)
2829 msg = ">>> This is a system package, " + \
2830 "let's pack a rescue tarball.\n"
2832 log_path = self.settings.get("PORTAGE_LOG_FILE")
2833 if log_path is not None:
2834 log_file = open(log_path, 'a')
2840 if not self.background:
2841 portage.writemsg_stdout(msg, noiselevel=-1)
2843 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2844 scheduler=self.scheduler, settings=self.settings)
2846 self._start_task(packager, self._buildpkg_exit)
2848 def _buildpkg_exit(self, packager):
2850 Released build dir lock when there is a failure or
2851 when in buildpkgonly mode. Otherwise, the lock will
2852 be released when merge() is called.
2855 if self._default_exit(packager) != os.EX_OK:
2856 self._unlock_builddir()
2860 if self.opts.buildpkgonly:
2861 # Need to call "clean" phase for buildpkgonly mode
2862 portage.elog.elog_process(self.pkg.cpv, self.settings)
2864 clean_phase = EbuildPhase(background=self.background,
2865 pkg=self.pkg, phase=phase,
2866 scheduler=self.scheduler, settings=self.settings,
2868 self._start_task(clean_phase, self._clean_exit)
2871 # Continue holding the builddir lock until
2872 # after the package has been installed.
2873 self._current_task = None
2874 self.returncode = packager.returncode
2877 def _clean_exit(self, clean_phase):
2878 if self._final_exit(clean_phase) != os.EX_OK or \
2879 self.opts.buildpkgonly:
2880 self._unlock_builddir()
2885 Install the package and then clean up and release locks.
2886 Only call this after the build has completed successfully
2887 and neither fetchonly nor buildpkgonly mode are enabled.
2890 find_blockers = self.find_blockers
2891 ldpath_mtimes = self.ldpath_mtimes
2892 logger = self.logger
2894 pkg_count = self.pkg_count
2895 settings = self.settings
2896 world_atom = self.world_atom
2897 ebuild_path = self._ebuild_path
2900 merge = EbuildMerge(find_blockers=self.find_blockers,
2901 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2902 pkg_count=pkg_count, pkg_path=ebuild_path,
2903 scheduler=self.scheduler,
2904 settings=settings, tree=tree, world_atom=world_atom)
2906 msg = " === (%s of %s) Merging (%s::%s)" % \
2907 (pkg_count.curval, pkg_count.maxval,
2908 pkg.cpv, ebuild_path)
2909 short_msg = "emerge: (%s of %s) %s Merge" % \
2910 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2911 logger.log(msg, short_msg=short_msg)
2914 rval = merge.execute()
2916 self._unlock_builddir()
2920 class EbuildExecuter(CompositeTask):
2922 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2924 _phases = ("prepare", "configure", "compile", "test", "install")
2926 _live_eclasses = frozenset([
2936 self._tree = "porttree"
2939 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2940 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2941 self._start_task(clean_phase, self._clean_phase_exit)
2943 def _clean_phase_exit(self, clean_phase):
2945 if self._default_exit(clean_phase) != os.EX_OK:
2950 scheduler = self.scheduler
2951 settings = self.settings
2954 # This initializes PORTAGE_LOG_FILE.
2955 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2957 setup_phase = EbuildPhase(background=self.background,
2958 pkg=pkg, phase="setup", scheduler=scheduler,
2959 settings=settings, tree=self._tree)
2961 setup_phase.addExitListener(self._setup_exit)
2962 self._current_task = setup_phase
2963 self.scheduler.scheduleSetup(setup_phase)
2965 def _setup_exit(self, setup_phase):
2967 if self._default_exit(setup_phase) != os.EX_OK:
2971 unpack_phase = EbuildPhase(background=self.background,
2972 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2973 settings=self.settings, tree=self._tree)
2975 if self._live_eclasses.intersection(self.pkg.inherited):
2976 # Serialize $DISTDIR access for live ebuilds since
2977 # otherwise they can interfere with eachother.
2979 unpack_phase.addExitListener(self._unpack_exit)
2980 self._current_task = unpack_phase
2981 self.scheduler.scheduleUnpack(unpack_phase)
2984 self._start_task(unpack_phase, self._unpack_exit)
2986 def _unpack_exit(self, unpack_phase):
2988 if self._default_exit(unpack_phase) != os.EX_OK:
2992 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2995 phases = self._phases
2996 eapi = pkg.metadata["EAPI"]
2997 if eapi in ("0", "1"):
2998 # skip src_prepare and src_configure
3001 for phase in phases:
3002 ebuild_phases.add(EbuildPhase(background=self.background,
3003 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3004 settings=self.settings, tree=self._tree))
3006 self._start_task(ebuild_phases, self._default_final_exit)
3008 class EbuildMetadataPhase(SubProcess):
3011 Asynchronous interface for the ebuild "depend" phase which is
3012 used to extract metadata from the ebuild.
3015 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3016 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3019 _file_names = ("ebuild",)
3020 _files_dict = slot_dict_class(_file_names, prefix="")
3024 settings = self.settings
3026 ebuild_path = self.ebuild_path
3027 debug = settings.get("PORTAGE_DEBUG") == "1"
3031 if self.fd_pipes is not None:
3032 fd_pipes = self.fd_pipes.copy()
3036 fd_pipes.setdefault(0, sys.stdin.fileno())
3037 fd_pipes.setdefault(1, sys.stdout.fileno())
3038 fd_pipes.setdefault(2, sys.stderr.fileno())
3040 # flush any pending output
3041 for fd in fd_pipes.itervalues():
3042 if fd == sys.stdout.fileno():
3044 if fd == sys.stderr.fileno():
3047 fd_pipes_orig = fd_pipes.copy()
3048 self._files = self._files_dict()
3051 master_fd, slave_fd = os.pipe()
3052 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3053 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3055 fd_pipes[self._metadata_fd] = slave_fd
3057 self._raw_metadata = []
3058 files.ebuild = os.fdopen(master_fd, 'r')
3059 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3060 self._registered_events, self._output_handler)
3061 self._registered = True
3063 retval = portage.doebuild(ebuild_path, "depend",
3064 settings["ROOT"], settings, debug,
3065 mydbapi=self.portdb, tree="porttree",
3066 fd_pipes=fd_pipes, returnpid=True)
3070 if isinstance(retval, int):
3071 # doebuild failed before spawning
3073 self.returncode = retval
3077 self.pid = retval[0]
3078 portage.process.spawned_pids.remove(self.pid)
3080 def _output_handler(self, fd, event):
3082 if event & PollConstants.POLLIN:
3083 self._raw_metadata.append(self._files.ebuild.read())
3084 if not self._raw_metadata[-1]:
3088 self._unregister_if_appropriate(event)
3089 return self._registered
3091 def _set_returncode(self, wait_retval):
3092 SubProcess._set_returncode(self, wait_retval)
3093 if self.returncode == os.EX_OK:
3094 metadata_lines = "".join(self._raw_metadata).splitlines()
3095 if len(portage.auxdbkeys) != len(metadata_lines):
3096 # Don't trust bash's returncode if the
3097 # number of lines is incorrect.
3100 metadata = izip(portage.auxdbkeys, metadata_lines)
3101 self.metadata_callback(self.cpv, self.ebuild_path,
3102 self.repo_path, metadata, self.ebuild_mtime)
3104 class EbuildProcess(SpawnProcess):
3106 __slots__ = ("phase", "pkg", "settings", "tree")
3109 # Don't open the log file during the clean phase since the
3110 # open file can result in an nfs lock on $T/build.log which
3111 # prevents the clean phase from removing $T.
3112 if self.phase not in ("clean", "cleanrm"):
3113 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3114 SpawnProcess._start(self)
3116 def _pipe(self, fd_pipes):
3117 stdout_pipe = fd_pipes.get(1)
3118 got_pty, master_fd, slave_fd = \
3119 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3120 return (master_fd, slave_fd)
3122 def _spawn(self, args, **kwargs):
3124 root_config = self.pkg.root_config
3126 mydbapi = root_config.trees[tree].dbapi
3127 settings = self.settings
3128 ebuild_path = settings["EBUILD"]
3129 debug = settings.get("PORTAGE_DEBUG") == "1"
3131 rval = portage.doebuild(ebuild_path, self.phase,
3132 root_config.root, settings, debug,
3133 mydbapi=mydbapi, tree=tree, **kwargs)
3137 def _set_returncode(self, wait_retval):
3138 SpawnProcess._set_returncode(self, wait_retval)
3140 if self.phase not in ("clean", "cleanrm"):
3141 self.returncode = portage._doebuild_exit_status_check_and_log(
3142 self.settings, self.phase, self.returncode)
3144 if self.phase == "test" and self.returncode != os.EX_OK and \
3145 "test-fail-continue" in self.settings.features:
3146 self.returncode = os.EX_OK
3148 portage._post_phase_userpriv_perms(self.settings)
3150 class EbuildPhase(CompositeTask):
3152 __slots__ = ("background", "pkg", "phase",
3153 "scheduler", "settings", "tree")
3155 _post_phase_cmds = portage._post_phase_cmds
3159 ebuild_process = EbuildProcess(background=self.background,
3160 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3161 settings=self.settings, tree=self.tree)
3163 self._start_task(ebuild_process, self._ebuild_exit)
3165 def _ebuild_exit(self, ebuild_process):
3167 if self.phase == "install":
3169 log_path = self.settings.get("PORTAGE_LOG_FILE")
3171 if self.background and log_path is not None:
3172 log_file = open(log_path, 'a')
3175 portage._check_build_log(self.settings, out=out)
3177 if log_file is not None:
3180 if self._default_exit(ebuild_process) != os.EX_OK:
3184 settings = self.settings
3186 if self.phase == "install":
3187 portage._post_src_install_uid_fix(settings)
3189 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3190 if post_phase_cmds is not None:
3191 post_phase = MiscFunctionsProcess(background=self.background,
3192 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3193 scheduler=self.scheduler, settings=settings)
3194 self._start_task(post_phase, self._post_phase_exit)
3197 self.returncode = ebuild_process.returncode
3198 self._current_task = None
3201 def _post_phase_exit(self, post_phase):
3202 if self._final_exit(post_phase) != os.EX_OK:
3203 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3205 self._current_task = None
3209 class EbuildBinpkg(EbuildProcess):
3211 This assumes that src_install() has successfully completed.
3213 __slots__ = ("_binpkg_tmpfile",)
3216 self.phase = "package"
3217 self.tree = "porttree"
3219 root_config = pkg.root_config
3220 portdb = root_config.trees["porttree"].dbapi
3221 bintree = root_config.trees["bintree"]
3222 ebuild_path = portdb.findname(self.pkg.cpv)
3223 settings = self.settings
3224 debug = settings.get("PORTAGE_DEBUG") == "1"
3226 bintree.prevent_collision(pkg.cpv)
3227 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3228 pkg.cpv + ".tbz2." + str(os.getpid()))
3229 self._binpkg_tmpfile = binpkg_tmpfile
3230 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3231 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3234 EbuildProcess._start(self)
3236 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3238 def _set_returncode(self, wait_retval):
3239 EbuildProcess._set_returncode(self, wait_retval)
3242 bintree = pkg.root_config.trees["bintree"]
3243 binpkg_tmpfile = self._binpkg_tmpfile
3244 if self.returncode == os.EX_OK:
3245 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3247 class EbuildMerge(SlotObject):
3249 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3250 "pkg", "pkg_count", "pkg_path", "pretend",
3251 "scheduler", "settings", "tree", "world_atom")
3254 root_config = self.pkg.root_config
3255 settings = self.settings
3256 retval = portage.merge(settings["CATEGORY"],
3257 settings["PF"], settings["D"],
3258 os.path.join(settings["PORTAGE_BUILDDIR"],
3259 "build-info"), root_config.root, settings,
3260 myebuild=settings["EBUILD"],
3261 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3262 vartree=root_config.trees["vartree"],
3263 prev_mtimes=self.ldpath_mtimes,
3264 scheduler=self.scheduler,
3265 blockers=self.find_blockers)
3267 if retval == os.EX_OK:
3268 self.world_atom(self.pkg)
3273 def _log_success(self):
3275 pkg_count = self.pkg_count
3276 pkg_path = self.pkg_path
3277 logger = self.logger
3278 if "noclean" not in self.settings.features:
3279 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3280 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3281 logger.log((" === (%s of %s) " + \
3282 "Post-Build Cleaning (%s::%s)") % \
3283 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3284 short_msg=short_msg)
3285 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3286 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3288 class PackageUninstall(AsynchronousTask):
3290 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3294 unmerge(self.pkg.root_config, self.opts, "unmerge",
3295 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3296 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3297 writemsg_level=self._writemsg_level)
3298 except UninstallFailure, e:
3299 self.returncode = e.status
3301 self.returncode = os.EX_OK
3304 def _writemsg_level(self, msg, level=0, noiselevel=0):
3306 log_path = self.settings.get("PORTAGE_LOG_FILE")
3307 background = self.background
3309 if log_path is None:
3310 if not (background and level < logging.WARNING):
3311 portage.util.writemsg_level(msg,
3312 level=level, noiselevel=noiselevel)
3315 portage.util.writemsg_level(msg,
3316 level=level, noiselevel=noiselevel)
3318 f = open(log_path, 'a')
3324 class Binpkg(CompositeTask):
3326 __slots__ = ("find_blockers",
3327 "ldpath_mtimes", "logger", "opts",
3328 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3329 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3330 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3332 def _writemsg_level(self, msg, level=0, noiselevel=0):
3334 if not self.background:
3335 portage.util.writemsg_level(msg,
3336 level=level, noiselevel=noiselevel)
3338 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339 if log_path is not None:
3340 f = open(log_path, 'a')
3349 settings = self.settings
3350 settings.setcpv(pkg)
3351 self._tree = "bintree"
3352 self._bintree = self.pkg.root_config.trees[self._tree]
3353 self._verify = not self.opts.pretend
3355 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3356 "portage", pkg.category, pkg.pf)
3357 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3358 pkg=pkg, settings=settings)
3359 self._image_dir = os.path.join(dir_path, "image")
3360 self._infloc = os.path.join(dir_path, "build-info")
3361 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3362 settings["EBUILD"] = self._ebuild_path
3363 debug = settings.get("PORTAGE_DEBUG") == "1"
3364 portage.doebuild_environment(self._ebuild_path, "setup",
3365 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3366 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3368 # The prefetcher has already completed or it
3369 # could be running now. If it's running now,
3370 # wait for it to complete since it holds
3371 # a lock on the file being fetched. The
3372 # portage.locks functions are only designed
3373 # to work between separate processes. Since
3374 # the lock is held by the current process,
3375 # use the scheduler and fetcher methods to
3376 # synchronize with the fetcher.
3377 prefetcher = self.prefetcher
3378 if prefetcher is None:
3380 elif not prefetcher.isAlive():
3382 elif prefetcher.poll() is None:
3384 waiting_msg = ("Fetching '%s' " + \
3385 "in the background. " + \
3386 "To view fetch progress, run `tail -f " + \
3387 "/var/log/emerge-fetch.log` in another " + \
3388 "terminal.") % prefetcher.pkg_path
3389 msg_prefix = colorize("GOOD", " * ")
3390 from textwrap import wrap
3391 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3392 for line in wrap(waiting_msg, 65))
3393 if not self.background:
3394 writemsg(waiting_msg, noiselevel=-1)
3396 self._current_task = prefetcher
3397 prefetcher.addExitListener(self._prefetch_exit)
3400 self._prefetch_exit(prefetcher)
3402 def _prefetch_exit(self, prefetcher):
3405 pkg_count = self.pkg_count
3406 if not (self.opts.pretend or self.opts.fetchonly):
3407 self._build_dir.lock()
3409 shutil.rmtree(self._build_dir.dir_path)
3410 except EnvironmentError, e:
3411 if e.errno != errno.ENOENT:
3414 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3415 fetcher = BinpkgFetcher(background=self.background,
3416 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3417 pretend=self.opts.pretend, scheduler=self.scheduler)
3418 pkg_path = fetcher.pkg_path
3419 self._pkg_path = pkg_path
3421 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3423 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3424 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3425 short_msg = "emerge: (%s of %s) %s Fetch" % \
3426 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3427 self.logger.log(msg, short_msg=short_msg)
3428 self._start_task(fetcher, self._fetcher_exit)
3431 self._fetcher_exit(fetcher)
3433 def _fetcher_exit(self, fetcher):
3435 # The fetcher only has a returncode when
3436 # --getbinpkg is enabled.
3437 if fetcher.returncode is not None:
3438 self._fetched_pkg = True
3439 if self._default_exit(fetcher) != os.EX_OK:
3440 self._unlock_builddir()
3444 if self.opts.pretend:
3445 self._current_task = None
3446 self.returncode = os.EX_OK
3454 logfile = self.settings.get("PORTAGE_LOG_FILE")
3455 verifier = BinpkgVerifier(background=self.background,
3456 logfile=logfile, pkg=self.pkg)
3457 self._start_task(verifier, self._verifier_exit)
3460 self._verifier_exit(verifier)
3462 def _verifier_exit(self, verifier):
3463 if verifier is not None and \
3464 self._default_exit(verifier) != os.EX_OK:
3465 self._unlock_builddir()
3469 logger = self.logger
3471 pkg_count = self.pkg_count
3472 pkg_path = self._pkg_path
3474 if self._fetched_pkg:
3475 self._bintree.inject(pkg.cpv, filename=pkg_path)
3477 if self.opts.fetchonly:
3478 self._current_task = None
3479 self.returncode = os.EX_OK
3483 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3484 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3485 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3486 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3487 logger.log(msg, short_msg=short_msg)
3490 settings = self.settings
3491 ebuild_phase = EbuildPhase(background=self.background,
3492 pkg=pkg, phase=phase, scheduler=self.scheduler,
3493 settings=settings, tree=self._tree)
3495 self._start_task(ebuild_phase, self._clean_exit)
3497 def _clean_exit(self, clean_phase):
3498 if self._default_exit(clean_phase) != os.EX_OK:
3499 self._unlock_builddir()
3503 dir_path = self._build_dir.dir_path
3506 shutil.rmtree(dir_path)
3507 except (IOError, OSError), e:
3508 if e.errno != errno.ENOENT:
3512 infloc = self._infloc
3514 pkg_path = self._pkg_path
3517 for mydir in (dir_path, self._image_dir, infloc):
3518 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3519 gid=portage.data.portage_gid, mode=dir_mode)
3521 # This initializes PORTAGE_LOG_FILE.
3522 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3523 self._writemsg_level(">>> Extracting info\n")
3525 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3526 check_missing_metadata = ("CATEGORY", "PF")
3527 missing_metadata = set()
3528 for k in check_missing_metadata:
3529 v = pkg_xpak.getfile(k)
3531 missing_metadata.add(k)
3533 pkg_xpak.unpackinfo(infloc)
3534 for k in missing_metadata:
3542 f = open(os.path.join(infloc, k), 'wb')
3548 # Store the md5sum in the vdb.
3549 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3551 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3555 # This gives bashrc users an opportunity to do various things
3556 # such as remove binary packages after they're installed.
3557 settings = self.settings
3558 settings.setcpv(self.pkg)
3559 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3560 settings.backup_changes("PORTAGE_BINPKG_FILE")
3563 setup_phase = EbuildPhase(background=self.background,
3564 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3565 settings=settings, tree=self._tree)
3567 setup_phase.addExitListener(self._setup_exit)
3568 self._current_task = setup_phase
3569 self.scheduler.scheduleSetup(setup_phase)
3571 def _setup_exit(self, setup_phase):
3572 if self._default_exit(setup_phase) != os.EX_OK:
3573 self._unlock_builddir()
3577 extractor = BinpkgExtractorAsync(background=self.background,
3578 image_dir=self._image_dir,
3579 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3580 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3581 self._start_task(extractor, self._extractor_exit)
3583 def _extractor_exit(self, extractor):
3584 if self._final_exit(extractor) != os.EX_OK:
3585 self._unlock_builddir()
3586 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3590 def _unlock_builddir(self):
3591 if self.opts.pretend or self.opts.fetchonly:
3593 portage.elog.elog_process(self.pkg.cpv, self.settings)
3594 self._build_dir.unlock()
3598 # This gives bashrc users an opportunity to do various things
3599 # such as remove binary packages after they're installed.
3600 settings = self.settings
3601 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3602 settings.backup_changes("PORTAGE_BINPKG_FILE")
3604 merge = EbuildMerge(find_blockers=self.find_blockers,
3605 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3606 pkg=self.pkg, pkg_count=self.pkg_count,
3607 pkg_path=self._pkg_path, scheduler=self.scheduler,
3608 settings=settings, tree=self._tree, world_atom=self.world_atom)
3611 retval = merge.execute()
3613 settings.pop("PORTAGE_BINPKG_FILE", None)
3614 self._unlock_builddir()
3617 class BinpkgFetcher(SpawnProcess):
3619 __slots__ = ("pkg", "pretend",
3620 "locked", "pkg_path", "_lock_obj")
3622 def __init__(self, **kwargs):
3623 SpawnProcess.__init__(self, **kwargs)
3625 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3633 pretend = self.pretend
3634 bintree = pkg.root_config.trees["bintree"]
3635 settings = bintree.settings
3636 use_locks = "distlocks" in settings.features
3637 pkg_path = self.pkg_path
3640 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3643 exists = os.path.exists(pkg_path)
3644 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3645 if not (pretend or resume):
3646 # Remove existing file or broken symlink.
3652 # urljoin doesn't work correctly with
3653 # unrecognized protocols like sftp
3654 if bintree._remote_has_index:
3655 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3657 rel_uri = pkg.cpv + ".tbz2"
3658 uri = bintree._remote_base_uri.rstrip("/") + \
3659 "/" + rel_uri.lstrip("/")
3661 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3662 "/" + pkg.pf + ".tbz2"
3665 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3666 self.returncode = os.EX_OK
3670 protocol = urlparse.urlparse(uri)[0]
3671 fcmd_prefix = "FETCHCOMMAND"
3673 fcmd_prefix = "RESUMECOMMAND"
3674 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3676 fcmd = settings.get(fcmd_prefix)
3679 "DISTDIR" : os.path.dirname(pkg_path),
3681 "FILE" : os.path.basename(pkg_path)
3684 fetch_env = dict(settings.iteritems())
3685 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3686 for x in shlex.split(fcmd)]
3688 if self.fd_pipes is None:
3690 fd_pipes = self.fd_pipes
3692 # Redirect all output to stdout since some fetchers like
3693 # wget pollute stderr (if portage detects a problem then it
3694 # can send it's own message to stderr).
3695 fd_pipes.setdefault(0, sys.stdin.fileno())
3696 fd_pipes.setdefault(1, sys.stdout.fileno())
3697 fd_pipes.setdefault(2, sys.stdout.fileno())
3699 self.args = fetch_args
3700 self.env = fetch_env
3701 SpawnProcess._start(self)
3703 def _set_returncode(self, wait_retval):
3704 SpawnProcess._set_returncode(self, wait_retval)
3705 if self.returncode == os.EX_OK:
3706 # If possible, update the mtime to match the remote package if
3707 # the fetcher didn't already do it automatically.
3708 bintree = self.pkg.root_config.trees["bintree"]
3709 if bintree._remote_has_index:
3710 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3711 if remote_mtime is not None:
3713 remote_mtime = long(remote_mtime)
3718 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3722 if remote_mtime != local_mtime:
3724 os.utime(self.pkg_path,
3725 (remote_mtime, remote_mtime))
3734 This raises an AlreadyLocked exception if lock() is called
3735 while a lock is already held. In order to avoid this, call
3736 unlock() or check whether the "locked" attribute is True
3737 or False before calling lock().
3739 if self._lock_obj is not None:
3740 raise self.AlreadyLocked((self._lock_obj,))
3742 self._lock_obj = portage.locks.lockfile(
3743 self.pkg_path, wantnewlockfile=1)
3746 class AlreadyLocked(portage.exception.PortageException):
3750 if self._lock_obj is None:
3752 portage.locks.unlockfile(self._lock_obj)
3753 self._lock_obj = None
3756 class BinpkgVerifier(AsynchronousTask):
3757 __slots__ = ("logfile", "pkg",)
3761 Note: Unlike a normal AsynchronousTask.start() method,
3762 this one does all work is synchronously. The returncode
3763 attribute will be set before it returns.
3767 root_config = pkg.root_config
3768 bintree = root_config.trees["bintree"]
3770 stdout_orig = sys.stdout
3771 stderr_orig = sys.stderr
3773 if self.background and self.logfile is not None:
3774 log_file = open(self.logfile, 'a')
3776 if log_file is not None:
3777 sys.stdout = log_file
3778 sys.stderr = log_file
3780 bintree.digestCheck(pkg)
3781 except portage.exception.FileNotFound:
3782 writemsg("!!! Fetching Binary failed " + \
3783 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3785 except portage.exception.DigestException, e:
3786 writemsg("\n!!! Digest verification failed:\n",
3788 writemsg("!!! %s\n" % e.value[0],
3790 writemsg("!!! Reason: %s\n" % e.value[1],
3792 writemsg("!!! Got: %s\n" % e.value[2],
3794 writemsg("!!! Expected: %s\n" % e.value[3],
3797 if rval != os.EX_OK:
3798 pkg_path = bintree.getname(pkg.cpv)
3799 head, tail = os.path.split(pkg_path)
3800 temp_filename = portage._checksum_failure_temp_file(head, tail)
3801 writemsg("File renamed to '%s'\n" % (temp_filename,),
3804 sys.stdout = stdout_orig
3805 sys.stderr = stderr_orig
3806 if log_file is not None:
3809 self.returncode = rval
3812 class BinpkgPrefetcher(CompositeTask):
3814 __slots__ = ("pkg",) + \
3815 ("pkg_path", "_bintree",)
3818 self._bintree = self.pkg.root_config.trees["bintree"]
3819 fetcher = BinpkgFetcher(background=self.background,
3820 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3821 scheduler=self.scheduler)
3822 self.pkg_path = fetcher.pkg_path
3823 self._start_task(fetcher, self._fetcher_exit)
3825 def _fetcher_exit(self, fetcher):
3827 if self._default_exit(fetcher) != os.EX_OK:
3831 verifier = BinpkgVerifier(background=self.background,
3832 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3833 self._start_task(verifier, self._verifier_exit)
3835 def _verifier_exit(self, verifier):
3836 if self._default_exit(verifier) != os.EX_OK:
3840 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3842 self._current_task = None
3843 self.returncode = os.EX_OK
3846 class BinpkgExtractorAsync(SpawnProcess):
3848 __slots__ = ("image_dir", "pkg", "pkg_path")
3850 _shell_binary = portage.const.BASH_BINARY
3853 self.args = [self._shell_binary, "-c",
3854 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3855 (portage._shell_quote(self.pkg_path),
3856 portage._shell_quote(self.image_dir))]
3858 self.env = self.pkg.root_config.settings.environ()
3859 SpawnProcess._start(self)
3861 class MergeListItem(CompositeTask):
3864 TODO: For parallel scheduling, everything here needs asynchronous
3865 execution support (start, poll, and wait methods).
3868 __slots__ = ("args_set",
3869 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3870 "find_blockers", "logger", "mtimedb", "pkg",
3871 "pkg_count", "pkg_to_replace", "prefetcher",
3872 "settings", "statusMessage", "world_atom") + \
3878 build_opts = self.build_opts
3881 # uninstall, executed by self.merge()
3882 self.returncode = os.EX_OK
3886 args_set = self.args_set
3887 find_blockers = self.find_blockers
3888 logger = self.logger
3889 mtimedb = self.mtimedb
3890 pkg_count = self.pkg_count
3891 scheduler = self.scheduler
3892 settings = self.settings
3893 world_atom = self.world_atom
3894 ldpath_mtimes = mtimedb["ldpath"]
3896 action_desc = "Emerging"
3898 if pkg.type_name == "binary":
3899 action_desc += " binary"
3901 if build_opts.fetchonly:
3902 action_desc = "Fetching"
3904 msg = "%s (%s of %s) %s" % \
3906 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3907 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3908 colorize("GOOD", pkg.cpv))
3910 portdb = pkg.root_config.trees["porttree"].dbapi
3911 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3912 if portdir_repo_name:
3913 pkg_repo_name = pkg.metadata.get("repository")
3914 if pkg_repo_name != portdir_repo_name:
3915 if not pkg_repo_name:
3916 pkg_repo_name = "unknown repo"
3917 msg += " from %s" % pkg_repo_name
3920 msg += " %s %s" % (preposition, pkg.root)
3922 if not build_opts.pretend:
3923 self.statusMessage(msg)
3924 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3925 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3927 if pkg.type_name == "ebuild":
3929 build = EbuildBuild(args_set=args_set,
3930 background=self.background,
3931 config_pool=self.config_pool,
3932 find_blockers=find_blockers,
3933 ldpath_mtimes=ldpath_mtimes, logger=logger,
3934 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3935 prefetcher=self.prefetcher, scheduler=scheduler,
3936 settings=settings, world_atom=world_atom)
3938 self._install_task = build
3939 self._start_task(build, self._default_final_exit)
3942 elif pkg.type_name == "binary":
3944 binpkg = Binpkg(background=self.background,
3945 find_blockers=find_blockers,
3946 ldpath_mtimes=ldpath_mtimes, logger=logger,
3947 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3948 prefetcher=self.prefetcher, settings=settings,
3949 scheduler=scheduler, world_atom=world_atom)
3951 self._install_task = binpkg
3952 self._start_task(binpkg, self._default_final_exit)
3956 self._install_task.poll()
3957 return self.returncode
3960 self._install_task.wait()
3961 return self.returncode
3966 build_opts = self.build_opts
3967 find_blockers = self.find_blockers
3968 logger = self.logger
3969 mtimedb = self.mtimedb
3970 pkg_count = self.pkg_count
3971 prefetcher = self.prefetcher
3972 scheduler = self.scheduler
3973 settings = self.settings
3974 world_atom = self.world_atom
3975 ldpath_mtimes = mtimedb["ldpath"]
3978 if not (build_opts.buildpkgonly or \
3979 build_opts.fetchonly or build_opts.pretend):
3981 uninstall = PackageUninstall(background=self.background,
3982 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3983 pkg=pkg, scheduler=scheduler, settings=settings)
3986 retval = uninstall.wait()
3987 if retval != os.EX_OK:
3991 if build_opts.fetchonly or \
3992 build_opts.buildpkgonly:
3993 return self.returncode
3995 retval = self._install_task.install()
3998 class PackageMerge(AsynchronousTask):
4000 TODO: Implement asynchronous merge so that the scheduler can
4001 run while a merge is executing.
4004 __slots__ = ("merge",)
4008 pkg = self.merge.pkg
4009 pkg_count = self.merge.pkg_count
4012 action_desc = "Uninstalling"
4013 preposition = "from"
4015 action_desc = "Installing"
4018 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4021 msg += " %s %s" % (preposition, pkg.root)
4023 if not self.merge.build_opts.fetchonly and \
4024 not self.merge.build_opts.pretend and \
4025 not self.merge.build_opts.buildpkgonly:
4026 self.merge.statusMessage(msg)
4028 self.returncode = self.merge.merge()
4031 class DependencyArg(object):
4032 def __init__(self, arg=None, root_config=None):
4034 self.root_config = root_config
4037 return str(self.arg)
4039 class AtomArg(DependencyArg):
4040 def __init__(self, atom=None, **kwargs):
4041 DependencyArg.__init__(self, **kwargs)
4043 if not isinstance(self.atom, portage.dep.Atom):
4044 self.atom = portage.dep.Atom(self.atom)
4045 self.set = (self.atom, )
4047 class PackageArg(DependencyArg):
4048 def __init__(self, package=None, **kwargs):
4049 DependencyArg.__init__(self, **kwargs)
4050 self.package = package
4051 self.atom = portage.dep.Atom("=" + package.cpv)
4052 self.set = (self.atom, )
4054 class SetArg(DependencyArg):
4055 def __init__(self, set=None, **kwargs):
4056 DependencyArg.__init__(self, **kwargs)
4058 self.name = self.arg[len(SETPREFIX):]
4060 class Dependency(SlotObject):
4061 __slots__ = ("atom", "blocker", "depth",
4062 "parent", "onlydeps", "priority", "root")
4063 def __init__(self, **kwargs):
4064 SlotObject.__init__(self, **kwargs)
4065 if self.priority is None:
4066 self.priority = DepPriority()
4067 if self.depth is None:
4070 class BlockerCache(portage.cache.mappings.MutableMapping):
4071 """This caches blockers of installed packages so that dep_check does not
4072 have to be done for every single installed package on every invocation of
4073 emerge. The cache is invalidated whenever it is detected that something
4074 has changed that might alter the results of dep_check() calls:
4075 1) the set of installed packages (including COUNTER) has changed
4076 2) the old-style virtuals have changed
4079 # Number of uncached packages to trigger cache update, since
4080 # it's wasteful to update it for every vdb change.
4081 _cache_threshold = 5
4083 class BlockerData(object):
4085 __slots__ = ("__weakref__", "atoms", "counter")
4087 def __init__(self, counter, atoms):
4088 self.counter = counter
4091 def __init__(self, myroot, vardb):
4093 self._virtuals = vardb.settings.getvirtuals()
4094 self._cache_filename = os.path.join(myroot,
4095 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4096 self._cache_version = "1"
4097 self._cache_data = None
4098 self._modified = set()
4103 f = open(self._cache_filename, mode='rb')
4104 mypickle = pickle.Unpickler(f)
4105 self._cache_data = mypickle.load()
4108 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4109 if isinstance(e, pickle.UnpicklingError):
4110 writemsg("!!! Error loading '%s': %s\n" % \
4111 (self._cache_filename, str(e)), noiselevel=-1)
4114 cache_valid = self._cache_data and \
4115 isinstance(self._cache_data, dict) and \
4116 self._cache_data.get("version") == self._cache_version and \
4117 isinstance(self._cache_data.get("blockers"), dict)
4119 # Validate all the atoms and counters so that
4120 # corruption is detected as soon as possible.
4121 invalid_items = set()
4122 for k, v in self._cache_data["blockers"].iteritems():
4123 if not isinstance(k, basestring):
4124 invalid_items.add(k)
4127 if portage.catpkgsplit(k) is None:
4128 invalid_items.add(k)
4130 except portage.exception.InvalidData:
4131 invalid_items.add(k)
4133 if not isinstance(v, tuple) or \
4135 invalid_items.add(k)
4138 if not isinstance(counter, (int, long)):
4139 invalid_items.add(k)
4141 if not isinstance(atoms, (list, tuple)):
4142 invalid_items.add(k)
4144 invalid_atom = False
4146 if not isinstance(atom, basestring):
4149 if atom[:1] != "!" or \
4150 not portage.isvalidatom(
4151 atom, allow_blockers=True):
4155 invalid_items.add(k)
4158 for k in invalid_items:
4159 del self._cache_data["blockers"][k]
4160 if not self._cache_data["blockers"]:
4164 self._cache_data = {"version":self._cache_version}
4165 self._cache_data["blockers"] = {}
4166 self._cache_data["virtuals"] = self._virtuals
4167 self._modified.clear()
4170 """If the current user has permission and the internal blocker cache
4171 been updated, save it to disk and mark it unmodified. This is called
4172 by emerge after it has proccessed blockers for all installed packages.
4173 Currently, the cache is only written if the user has superuser
4174 privileges (since that's required to obtain a lock), but all users
4175 have read access and benefit from faster blocker lookups (as long as
4176 the entire cache is still valid). The cache is stored as a pickled
4177 dict object with the following format:
4181 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4182 "virtuals" : vardb.settings.getvirtuals()
4185 if len(self._modified) >= self._cache_threshold and \
4188 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4189 pickle.dump(self._cache_data, f, -1)
4191 portage.util.apply_secpass_permissions(
4192 self._cache_filename, gid=portage.portage_gid, mode=0644)
4193 except (IOError, OSError), e:
4195 self._modified.clear()
4197 def __setitem__(self, cpv, blocker_data):
4199 Update the cache and mark it as modified for a future call to
4202 @param cpv: Package for which to cache blockers.
4204 @param blocker_data: An object with counter and atoms attributes.
4205 @type blocker_data: BlockerData
4207 self._cache_data["blockers"][cpv] = \
4208 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4209 self._modified.add(cpv)
4212 if self._cache_data is None:
4213 # triggered by python-trace
4215 return iter(self._cache_data["blockers"])
4217 def __delitem__(self, cpv):
4218 del self._cache_data["blockers"][cpv]
4220 def __getitem__(self, cpv):
4223 @returns: An object with counter and atoms attributes.
4225 return self.BlockerData(*self._cache_data["blockers"][cpv])
4227 class BlockerDB(object):
4229 def __init__(self, root_config):
4230 self._root_config = root_config
4231 self._vartree = root_config.trees["vartree"]
4232 self._portdb = root_config.trees["porttree"].dbapi
4234 self._dep_check_trees = None
4235 self._fake_vartree = None
4237 def _get_fake_vartree(self, acquire_lock=0):
4238 fake_vartree = self._fake_vartree
4239 if fake_vartree is None:
4240 fake_vartree = FakeVartree(self._root_config,
4241 acquire_lock=acquire_lock)
4242 self._fake_vartree = fake_vartree
4243 self._dep_check_trees = { self._vartree.root : {
4244 "porttree" : fake_vartree,
4245 "vartree" : fake_vartree,
4248 fake_vartree.sync(acquire_lock=acquire_lock)
4251 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4252 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4253 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4254 settings = self._vartree.settings
4255 stale_cache = set(blocker_cache)
4256 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4257 dep_check_trees = self._dep_check_trees
4258 vardb = fake_vartree.dbapi
4259 installed_pkgs = list(vardb)
4261 for inst_pkg in installed_pkgs:
4262 stale_cache.discard(inst_pkg.cpv)
4263 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4264 if cached_blockers is not None and \
4265 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4266 cached_blockers = None
4267 if cached_blockers is not None:
4268 blocker_atoms = cached_blockers.atoms
4270 # Use aux_get() to trigger FakeVartree global
4271 # updates on *DEPEND when appropriate.
4272 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4274 portage.dep._dep_check_strict = False
4275 success, atoms = portage.dep_check(depstr,
4276 vardb, settings, myuse=inst_pkg.use.enabled,
4277 trees=dep_check_trees, myroot=inst_pkg.root)
4279 portage.dep._dep_check_strict = True
4281 pkg_location = os.path.join(inst_pkg.root,
4282 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4283 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4284 (pkg_location, atoms), noiselevel=-1)
4287 blocker_atoms = [atom for atom in atoms \
4288 if atom.startswith("!")]
4289 blocker_atoms.sort()
4290 counter = long(inst_pkg.metadata["COUNTER"])
4291 blocker_cache[inst_pkg.cpv] = \
4292 blocker_cache.BlockerData(counter, blocker_atoms)
4293 for cpv in stale_cache:
4294 del blocker_cache[cpv]
4295 blocker_cache.flush()
4297 blocker_parents = digraph()
4299 for pkg in installed_pkgs:
4300 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4301 blocker_atom = blocker_atom.lstrip("!")
4302 blocker_atoms.append(blocker_atom)
4303 blocker_parents.add(blocker_atom, pkg)
4305 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4306 blocking_pkgs = set()
4307 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4308 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4310 # Check for blockers in the other direction.
4311 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4313 portage.dep._dep_check_strict = False
4314 success, atoms = portage.dep_check(depstr,
4315 vardb, settings, myuse=new_pkg.use.enabled,
4316 trees=dep_check_trees, myroot=new_pkg.root)
4318 portage.dep._dep_check_strict = True
4320 # We should never get this far with invalid deps.
4321 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4324 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4327 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4328 for inst_pkg in installed_pkgs:
4330 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4331 except (portage.exception.InvalidDependString, StopIteration):
4333 blocking_pkgs.add(inst_pkg)
4335 return blocking_pkgs
4337 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4339 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4340 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4341 p_type, p_root, p_key, p_status = parent_node
4343 if p_status == "nomerge":
4344 category, pf = portage.catsplit(p_key)
4345 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4346 msg.append("Portage is unable to process the dependencies of the ")
4347 msg.append("'%s' package. " % p_key)
4348 msg.append("In order to correct this problem, the package ")
4349 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4350 msg.append("As a temporary workaround, the --nodeps option can ")
4351 msg.append("be used to ignore all dependencies. For reference, ")
4352 msg.append("the problematic dependencies can be found in the ")
4353 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4355 msg.append("This package can not be installed. ")
4356 msg.append("Please notify the '%s' package maintainer " % p_key)
4357 msg.append("about this problem.")
4359 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4360 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4362 class PackageVirtualDbapi(portage.dbapi):
4364 A dbapi-like interface class that represents the state of the installed
4365 package database as new packages are installed, replacing any packages
4366 that previously existed in the same slot. The main difference between
4367 this class and fakedbapi is that this one uses Package instances
4368 internally (passed in via cpv_inject() and cpv_remove() calls).
4370 def __init__(self, settings):
4371 portage.dbapi.__init__(self)
4372 self.settings = settings
4373 self._match_cache = {}
4379 Remove all packages.
4383 self._cp_map.clear()
4384 self._cpv_map.clear()
4387 obj = PackageVirtualDbapi(self.settings)
4388 obj._match_cache = self._match_cache.copy()
4389 obj._cp_map = self._cp_map.copy()
4390 for k, v in obj._cp_map.iteritems():
4391 obj._cp_map[k] = v[:]
4392 obj._cpv_map = self._cpv_map.copy()
4396 return self._cpv_map.itervalues()
4398 def __contains__(self, item):
4399 existing = self._cpv_map.get(item.cpv)
4400 if existing is not None and \
4405 def get(self, item, default=None):
4406 cpv = getattr(item, "cpv", None)
4410 type_name, root, cpv, operation = item
4412 existing = self._cpv_map.get(cpv)
4413 if existing is not None and \
4418 def match_pkgs(self, atom):
4419 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4421 def _clear_cache(self):
4422 if self._categories is not None:
4423 self._categories = None
4424 if self._match_cache:
4425 self._match_cache = {}
4427 def match(self, origdep, use_cache=1):
4428 result = self._match_cache.get(origdep)
4429 if result is not None:
4431 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4432 self._match_cache[origdep] = result
4435 def cpv_exists(self, cpv):
4436 return cpv in self._cpv_map
4438 def cp_list(self, mycp, use_cache=1):
4439 cachelist = self._match_cache.get(mycp)
4440 # cp_list() doesn't expand old-style virtuals
4441 if cachelist and cachelist[0].startswith(mycp):
4443 cpv_list = self._cp_map.get(mycp)
4444 if cpv_list is None:
4447 cpv_list = [pkg.cpv for pkg in cpv_list]
4448 self._cpv_sort_ascending(cpv_list)
4449 if not (not cpv_list and mycp.startswith("virtual/")):
4450 self._match_cache[mycp] = cpv_list
4454 return list(self._cp_map)
4457 return list(self._cpv_map)
4459 def cpv_inject(self, pkg):
4460 cp_list = self._cp_map.get(pkg.cp)
4463 self._cp_map[pkg.cp] = cp_list
4464 e_pkg = self._cpv_map.get(pkg.cpv)
4465 if e_pkg is not None:
4468 self.cpv_remove(e_pkg)
4469 for e_pkg in cp_list:
4470 if e_pkg.slot_atom == pkg.slot_atom:
4473 self.cpv_remove(e_pkg)
4476 self._cpv_map[pkg.cpv] = pkg
4479 def cpv_remove(self, pkg):
4480 old_pkg = self._cpv_map.get(pkg.cpv)
4483 self._cp_map[pkg.cp].remove(pkg)
4484 del self._cpv_map[pkg.cpv]
4487 def aux_get(self, cpv, wants):
4488 metadata = self._cpv_map[cpv].metadata
4489 return [metadata.get(x, "") for x in wants]
4491 def aux_update(self, cpv, values):
4492 self._cpv_map[cpv].metadata.update(values)
4495 class depgraph(object):
4497 pkg_tree_map = RootConfig.pkg_tree_map
4499 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4501 def __init__(self, settings, trees, myopts, myparams, spinner):
4502 self.settings = settings
4503 self.target_root = settings["ROOT"]
4504 self.myopts = myopts
4505 self.myparams = myparams
4507 if settings.get("PORTAGE_DEBUG", "") == "1":
4509 self.spinner = spinner
4510 self._running_root = trees["/"]["root_config"]
4511 self._opts_no_restart = Scheduler._opts_no_restart
4512 self.pkgsettings = {}
4513 # Maps slot atom to package for each Package added to the graph.
4514 self._slot_pkg_map = {}
4515 # Maps nodes to the reasons they were selected for reinstallation.
4516 self._reinstall_nodes = {}
4519 self._trees_orig = trees
4521 # Contains a filtered view of preferred packages that are selected
4522 # from available repositories.
4523 self._filtered_trees = {}
4524 # Contains installed packages and new packages that have been added
4526 self._graph_trees = {}
4527 # All Package instances
4528 self._pkg_cache = {}
4529 for myroot in trees:
4530 self.trees[myroot] = {}
4531 # Create a RootConfig instance that references
4532 # the FakeVartree instead of the real one.
4533 self.roots[myroot] = RootConfig(
4534 trees[myroot]["vartree"].settings,
4536 trees[myroot]["root_config"].setconfig)
4537 for tree in ("porttree", "bintree"):
4538 self.trees[myroot][tree] = trees[myroot][tree]
4539 self.trees[myroot]["vartree"] = \
4540 FakeVartree(trees[myroot]["root_config"],
4541 pkg_cache=self._pkg_cache)
4542 self.pkgsettings[myroot] = portage.config(
4543 clone=self.trees[myroot]["vartree"].settings)
4544 self._slot_pkg_map[myroot] = {}
4545 vardb = self.trees[myroot]["vartree"].dbapi
4546 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4547 "--buildpkgonly" not in self.myopts
4548 # This fakedbapi instance will model the state that the vdb will
4549 # have after new packages have been installed.
4550 fakedb = PackageVirtualDbapi(vardb.settings)
4551 if preload_installed_pkgs:
4553 self.spinner.update()
4554 # This triggers metadata updates via FakeVartree.
4555 vardb.aux_get(pkg.cpv, [])
4556 fakedb.cpv_inject(pkg)
4558 # Now that the vardb state is cached in our FakeVartree,
4559 # we won't be needing the real vartree cache for awhile.
4560 # To make some room on the heap, clear the vardbapi
4562 trees[myroot]["vartree"].dbapi._clear_cache()
4565 self.mydbapi[myroot] = fakedb
4568 graph_tree.dbapi = fakedb
4569 self._graph_trees[myroot] = {}
4570 self._filtered_trees[myroot] = {}
4571 # Substitute the graph tree for the vartree in dep_check() since we
4572 # want atom selections to be consistent with package selections
4573 # have already been made.
4574 self._graph_trees[myroot]["porttree"] = graph_tree
4575 self._graph_trees[myroot]["vartree"] = graph_tree
4576 def filtered_tree():
4578 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4579 self._filtered_trees[myroot]["porttree"] = filtered_tree
4581 # Passing in graph_tree as the vartree here could lead to better
4582 # atom selections in some cases by causing atoms for packages that
4583 # have been added to the graph to be preferred over other choices.
4584 # However, it can trigger atom selections that result in
4585 # unresolvable direct circular dependencies. For example, this
4586 # happens with gwydion-dylan which depends on either itself or
4587 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4588 # gwydion-dylan-bin needs to be selected in order to avoid a
4589 # an unresolvable direct circular dependency.
4591 # To solve the problem described above, pass in "graph_db" so that
4592 # packages that have been added to the graph are distinguishable
4593 # from other available packages and installed packages. Also, pass
4594 # the parent package into self._select_atoms() calls so that
4595 # unresolvable direct circular dependencies can be detected and
4596 # avoided when possible.
4597 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4598 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4601 portdb = self.trees[myroot]["porttree"].dbapi
4602 bindb = self.trees[myroot]["bintree"].dbapi
4603 vardb = self.trees[myroot]["vartree"].dbapi
4604 # (db, pkg_type, built, installed, db_keys)
4605 if "--usepkgonly" not in self.myopts:
4606 db_keys = list(portdb._aux_cache_keys)
4607 dbs.append((portdb, "ebuild", False, False, db_keys))
4608 if "--usepkg" in self.myopts:
4609 db_keys = list(bindb._aux_cache_keys)
4610 dbs.append((bindb, "binary", True, False, db_keys))
4611 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4612 dbs.append((vardb, "installed", True, True, db_keys))
4613 self._filtered_trees[myroot]["dbs"] = dbs
4614 if "--usepkg" in self.myopts:
4615 self.trees[myroot]["bintree"].populate(
4616 "--getbinpkg" in self.myopts,
4617 "--getbinpkgonly" in self.myopts)
4620 self.digraph=portage.digraph()
4621 # contains all sets added to the graph
4623 # contains atoms given as arguments
4624 self._sets["args"] = InternalPackageSet()
4625 # contains all atoms from all sets added to the graph, including
4626 # atoms given as arguments
4627 self._set_atoms = InternalPackageSet()
4628 self._atom_arg_map = {}
4629 # contains all nodes pulled in by self._set_atoms
4630 self._set_nodes = set()
4631 # Contains only Blocker -> Uninstall edges
4632 self._blocker_uninstalls = digraph()
4633 # Contains only Package -> Blocker edges
4634 self._blocker_parents = digraph()
4635 # Contains only irrelevant Package -> Blocker edges
4636 self._irrelevant_blockers = digraph()
4637 # Contains only unsolvable Package -> Blocker edges
4638 self._unsolvable_blockers = digraph()
4639 # Contains all Blocker -> Blocked Package edges
4640 self._blocked_pkgs = digraph()
4641 # Contains world packages that have been protected from
4642 # uninstallation but may not have been added to the graph
4643 # if the graph is not complete yet.
4644 self._blocked_world_pkgs = {}
4645 self._slot_collision_info = {}
4646 # Slot collision nodes are not allowed to block other packages since
4647 # blocker validation is only able to account for one package per slot.
4648 self._slot_collision_nodes = set()
4649 self._parent_atoms = {}
4650 self._slot_conflict_parent_atoms = set()
4651 self._serialized_tasks_cache = None
4652 self._scheduler_graph = None
4653 self._displayed_list = None
4654 self._pprovided_args = []
4655 self._missing_args = []
4656 self._masked_installed = set()
4657 self._unsatisfied_deps_for_display = []
4658 self._unsatisfied_blockers_for_display = None
4659 self._circular_deps_for_display = None
4660 self._dep_stack = []
4661 self._unsatisfied_deps = []
4662 self._initially_unsatisfied_deps = []
4663 self._ignored_deps = []
4664 self._required_set_names = set(["system", "world"])
4665 self._select_atoms = self._select_atoms_highest_available
4666 self._select_package = self._select_pkg_highest_available
4667 self._highest_pkg_cache = {}
4669 def _show_slot_collision_notice(self):
4670 """Show an informational message advising the user to mask one of the
4671 the packages. In some cases it may be possible to resolve this
4672 automatically, but support for backtracking (removal nodes that have
4673 already been selected) will be required in order to handle all possible
4677 if not self._slot_collision_info:
4680 self._show_merge_list()
4683 msg.append("\n!!! Multiple package instances within a single " + \
4684 "package slot have been pulled\n")
4685 msg.append("!!! into the dependency graph, resulting" + \
4686 " in a slot conflict:\n\n")
4688 # Max number of parents shown, to avoid flooding the display.
4690 explanation_columns = 70
4692 for (slot_atom, root), slot_nodes \
4693 in self._slot_collision_info.iteritems():
4694 msg.append(str(slot_atom))
4697 for node in slot_nodes:
4699 msg.append(str(node))
4700 parent_atoms = self._parent_atoms.get(node)
4703 # Prefer conflict atoms over others.
4704 for parent_atom in parent_atoms:
4705 if len(pruned_list) >= max_parents:
4707 if parent_atom in self._slot_conflict_parent_atoms:
4708 pruned_list.add(parent_atom)
4710 # If this package was pulled in by conflict atoms then
4711 # show those alone since those are the most interesting.
4713 # When generating the pruned list, prefer instances
4714 # of DependencyArg over instances of Package.
4715 for parent_atom in parent_atoms:
4716 if len(pruned_list) >= max_parents:
4718 parent, atom = parent_atom
4719 if isinstance(parent, DependencyArg):
4720 pruned_list.add(parent_atom)
4721 # Prefer Packages instances that themselves have been
4722 # pulled into collision slots.
4723 for parent_atom in parent_atoms:
4724 if len(pruned_list) >= max_parents:
4726 parent, atom = parent_atom
4727 if isinstance(parent, Package) and \
4728 (parent.slot_atom, parent.root) \
4729 in self._slot_collision_info:
4730 pruned_list.add(parent_atom)
4731 for parent_atom in parent_atoms:
4732 if len(pruned_list) >= max_parents:
4734 pruned_list.add(parent_atom)
4735 omitted_parents = len(parent_atoms) - len(pruned_list)
4736 parent_atoms = pruned_list
4737 msg.append(" pulled in by\n")
4738 for parent_atom in parent_atoms:
4739 parent, atom = parent_atom
4740 msg.append(2*indent)
4741 if isinstance(parent,
4742 (PackageArg, AtomArg)):
4743 # For PackageArg and AtomArg types, it's
4744 # redundant to display the atom attribute.
4745 msg.append(str(parent))
4747 # Display the specific atom from SetArg or
4749 msg.append("%s required by %s" % (atom, parent))
4752 msg.append(2*indent)
4753 msg.append("(and %d more)\n" % omitted_parents)
4755 msg.append(" (no parents)\n")
4757 explanation = self._slot_conflict_explanation(slot_nodes)
4760 msg.append(indent + "Explanation:\n\n")
4761 for line in textwrap.wrap(explanation, explanation_columns):
4762 msg.append(2*indent + line + "\n")
4765 sys.stderr.write("".join(msg))
4768 explanations_for_all = explanations == len(self._slot_collision_info)
4770 if explanations_for_all or "--quiet" in self.myopts:
4774 msg.append("It may be possible to solve this problem ")
4775 msg.append("by using package.mask to prevent one of ")
4776 msg.append("those packages from being selected. ")
4777 msg.append("However, it is also possible that conflicting ")
4778 msg.append("dependencies exist such that they are impossible to ")
4779 msg.append("satisfy simultaneously. If such a conflict exists in ")
4780 msg.append("the dependencies of two different packages, then those ")
4781 msg.append("packages can not be installed simultaneously.")
4783 from formatter import AbstractFormatter, DumbWriter
4784 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4786 f.add_flowing_data(x)
4790 msg.append("For more information, see MASKED PACKAGES ")
4791 msg.append("section in the emerge man page or refer ")
4792 msg.append("to the Gentoo Handbook.")
4794 f.add_flowing_data(x)
4798 def _slot_conflict_explanation(self, slot_nodes):
4800 When a slot conflict occurs due to USE deps, there are a few
4801 different cases to consider:
4803 1) New USE are correctly set but --newuse wasn't requested so an
4804 installed package with incorrect USE happened to get pulled
4805 into graph before the new one.
4807 2) New USE are incorrectly set but an installed package has correct
4808 USE so it got pulled into the graph, and a new instance also got
4809 pulled in due to --newuse or an upgrade.
4811 3) Multiple USE deps exist that can't be satisfied simultaneously,
4812 and multiple package instances got pulled into the same slot to
4813 satisfy the conflicting deps.
4815 Currently, explanations and suggested courses of action are generated
4816 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4819 if len(slot_nodes) != 2:
4820 # Suggestions are only implemented for
4821 # conflicts between two packages.
4824 all_conflict_atoms = self._slot_conflict_parent_atoms
4826 matched_atoms = None
4827 unmatched_node = None
4828 for node in slot_nodes:
4829 parent_atoms = self._parent_atoms.get(node)
4830 if not parent_atoms:
4831 # Normally, there are always parent atoms. If there are
4832 # none then something unexpected is happening and there's
4833 # currently no suggestion for this case.
4835 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4836 for parent_atom in conflict_atoms:
4837 parent, atom = parent_atom
4839 # Suggestions are currently only implemented for cases
4840 # in which all conflict atoms have USE deps.
4843 if matched_node is not None:
4844 # If conflict atoms match multiple nodes
4845 # then there's no suggestion.
4848 matched_atoms = conflict_atoms
4850 if unmatched_node is not None:
4851 # Neither node is matched by conflict atoms, and
4852 # there is no suggestion for this case.
4854 unmatched_node = node
4856 if matched_node is None or unmatched_node is None:
4857 # This shouldn't happen.
4860 if unmatched_node.installed and not matched_node.installed:
4861 return "New USE are correctly set, but --newuse wasn't" + \
4862 " requested, so an installed package with incorrect USE " + \
4863 "happened to get pulled into the dependency graph. " + \
4864 "In order to solve " + \
4865 "this, either specify the --newuse option or explicitly " + \
4866 " reinstall '%s'." % matched_node.slot_atom
4868 if matched_node.installed and not unmatched_node.installed:
4869 atoms = sorted(set(atom for parent, atom in matched_atoms))
4870 explanation = ("New USE for '%s' are incorrectly set. " + \
4871 "In order to solve this, adjust USE to satisfy '%s'") % \
4872 (matched_node.slot_atom, atoms[0])
4874 for atom in atoms[1:-1]:
4875 explanation += ", '%s'" % (atom,)
4878 explanation += " and '%s'" % (atoms[-1],)
4884 def _process_slot_conflicts(self):
4886 Process slot conflict data to identify specific atoms which
4887 lead to conflict. These atoms only match a subset of the
4888 packages that have been pulled into a given slot.
4890 for (slot_atom, root), slot_nodes \
4891 in self._slot_collision_info.iteritems():
4893 all_parent_atoms = set()
4894 for pkg in slot_nodes:
4895 parent_atoms = self._parent_atoms.get(pkg)
4896 if not parent_atoms:
4898 all_parent_atoms.update(parent_atoms)
4900 for pkg in slot_nodes:
4901 parent_atoms = self._parent_atoms.get(pkg)
4902 if parent_atoms is None:
4903 parent_atoms = set()
4904 self._parent_atoms[pkg] = parent_atoms
4905 for parent_atom in all_parent_atoms:
4906 if parent_atom in parent_atoms:
4908 # Use package set for matching since it will match via
4909 # PROVIDE when necessary, while match_from_list does not.
4910 parent, atom = parent_atom
4911 atom_set = InternalPackageSet(
4912 initial_atoms=(atom,))
4913 if atom_set.findAtomForPackage(pkg):
4914 parent_atoms.add(parent_atom)
4916 self._slot_conflict_parent_atoms.add(parent_atom)
4918 def _reinstall_for_flags(self, forced_flags,
4919 orig_use, orig_iuse, cur_use, cur_iuse):
4920 """Return a set of flags that trigger reinstallation, or None if there
4921 are no such flags."""
4922 if "--newuse" in self.myopts:
4923 flags = set(orig_iuse.symmetric_difference(
4924 cur_iuse).difference(forced_flags))
4925 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4926 cur_iuse.intersection(cur_use)))
4929 elif "changed-use" == self.myopts.get("--reinstall"):
4930 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4931 cur_iuse.intersection(cur_use))
4936 def _create_graph(self, allow_unsatisfied=False):
4937 dep_stack = self._dep_stack
4939 self.spinner.update()
4940 dep = dep_stack.pop()
4941 if isinstance(dep, Package):
4942 if not self._add_pkg_deps(dep,
4943 allow_unsatisfied=allow_unsatisfied):
4946 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4950 def _add_dep(self, dep, allow_unsatisfied=False):
4951 debug = "--debug" in self.myopts
4952 buildpkgonly = "--buildpkgonly" in self.myopts
4953 nodeps = "--nodeps" in self.myopts
4954 empty = "empty" in self.myparams
4955 deep = "deep" in self.myparams
4956 update = "--update" in self.myopts and dep.depth <= 1
4958 if not buildpkgonly and \
4960 dep.parent not in self._slot_collision_nodes:
4961 if dep.parent.onlydeps:
4962 # It's safe to ignore blockers if the
4963 # parent is an --onlydeps node.
4965 # The blocker applies to the root where
4966 # the parent is or will be installed.
4967 blocker = Blocker(atom=dep.atom,
4968 eapi=dep.parent.metadata["EAPI"],
4969 root=dep.parent.root)
4970 self._blocker_parents.add(blocker, dep.parent)
4972 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4973 onlydeps=dep.onlydeps)
4975 if dep.priority.optional:
4976 # This could be an unecessary build-time dep
4977 # pulled in by --with-bdeps=y.
4979 if allow_unsatisfied:
4980 self._unsatisfied_deps.append(dep)
4982 self._unsatisfied_deps_for_display.append(
4983 ((dep.root, dep.atom), {"myparent":dep.parent}))
4985 # In some cases, dep_check will return deps that shouldn't
4986 # be proccessed any further, so they are identified and
4987 # discarded here. Try to discard as few as possible since
4988 # discarded dependencies reduce the amount of information
4989 # available for optimization of merge order.
4990 if dep.priority.satisfied and \
4991 not dep_pkg.installed and \
4992 not (existing_node or empty or deep or update):
4994 if dep.root == self.target_root:
4996 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4997 except StopIteration:
4999 except portage.exception.InvalidDependString:
5000 if not dep_pkg.installed:
5001 # This shouldn't happen since the package
5002 # should have been masked.
5005 self._ignored_deps.append(dep)
5008 if not self._add_pkg(dep_pkg, dep):
5012 def _add_pkg(self, pkg, dep):
5019 myparent = dep.parent
5020 priority = dep.priority
5022 if priority is None:
5023 priority = DepPriority()
5025 Fills the digraph with nodes comprised of packages to merge.
5026 mybigkey is the package spec of the package to merge.
5027 myparent is the package depending on mybigkey ( or None )
5028 addme = Should we add this package to the digraph or are we just looking at it's deps?
5029 Think --onlydeps, we need to ignore packages in that case.
5032 #IUSE-aware emerge -> USE DEP aware depgraph
5033 #"no downgrade" emerge
5035 # Ensure that the dependencies of the same package
5036 # are never processed more than once.
5037 previously_added = pkg in self.digraph
5039 # select the correct /var database that we'll be checking against
5040 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5041 pkgsettings = self.pkgsettings[pkg.root]
5046 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5047 except portage.exception.InvalidDependString, e:
5048 if not pkg.installed:
5049 show_invalid_depstring_notice(
5050 pkg, pkg.metadata["PROVIDE"], str(e))
5054 if not pkg.onlydeps:
5055 if not pkg.installed and \
5056 "empty" not in self.myparams and \
5057 vardbapi.match(pkg.slot_atom):
5058 # Increase the priority of dependencies on packages that
5059 # are being rebuilt. This optimizes merge order so that
5060 # dependencies are rebuilt/updated as soon as possible,
5061 # which is needed especially when emerge is called by
5062 # revdep-rebuild since dependencies may be affected by ABI
5063 # breakage that has rendered them useless. Don't adjust
5064 # priority here when in "empty" mode since all packages
5065 # are being merged in that case.
5066 priority.rebuild = True
5068 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5069 slot_collision = False
5071 existing_node_matches = pkg.cpv == existing_node.cpv
5072 if existing_node_matches and \
5073 pkg != existing_node and \
5074 dep.atom is not None:
5075 # Use package set for matching since it will match via
5076 # PROVIDE when necessary, while match_from_list does not.
5077 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5078 if not atom_set.findAtomForPackage(existing_node):
5079 existing_node_matches = False
5080 if existing_node_matches:
5081 # The existing node can be reused.
5083 for parent_atom in arg_atoms:
5084 parent, atom = parent_atom
5085 self.digraph.add(existing_node, parent,
5087 self._add_parent_atom(existing_node, parent_atom)
5088 # If a direct circular dependency is not an unsatisfied
5089 # buildtime dependency then drop it here since otherwise
5090 # it can skew the merge order calculation in an unwanted
5092 if existing_node != myparent or \
5093 (priority.buildtime and not priority.satisfied):
5094 self.digraph.addnode(existing_node, myparent,
5096 if dep.atom is not None and dep.parent is not None:
5097 self._add_parent_atom(existing_node,
5098 (dep.parent, dep.atom))
5102 # A slot collision has occurred. Sometimes this coincides
5103 # with unresolvable blockers, so the slot collision will be
5104 # shown later if there are no unresolvable blockers.
5105 self._add_slot_conflict(pkg)
5106 slot_collision = True
5109 # Now add this node to the graph so that self.display()
5110 # can show use flags and --tree portage.output. This node is
5111 # only being partially added to the graph. It must not be
5112 # allowed to interfere with the other nodes that have been
5113 # added. Do not overwrite data for existing nodes in
5114 # self.mydbapi since that data will be used for blocker
5116 # Even though the graph is now invalid, continue to process
5117 # dependencies so that things like --fetchonly can still
5118 # function despite collisions.
5120 elif not previously_added:
5121 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5122 self.mydbapi[pkg.root].cpv_inject(pkg)
5123 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5125 if not pkg.installed:
5126 # Allow this package to satisfy old-style virtuals in case it
5127 # doesn't already. Any pre-existing providers will be preferred
5130 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5131 # For consistency, also update the global virtuals.
5132 settings = self.roots[pkg.root].settings
5134 settings.setinst(pkg.cpv, pkg.metadata)
5136 except portage.exception.InvalidDependString, e:
5137 show_invalid_depstring_notice(
5138 pkg, pkg.metadata["PROVIDE"], str(e))
5143 self._set_nodes.add(pkg)
5145 # Do this even when addme is False (--onlydeps) so that the
5146 # parent/child relationship is always known in case
5147 # self._show_slot_collision_notice() needs to be called later.
5148 self.digraph.add(pkg, myparent, priority=priority)
5149 if dep.atom is not None and dep.parent is not None:
5150 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5153 for parent_atom in arg_atoms:
5154 parent, atom = parent_atom
5155 self.digraph.add(pkg, parent, priority=priority)
5156 self._add_parent_atom(pkg, parent_atom)
5158 """ This section determines whether we go deeper into dependencies or not.
5159 We want to go deeper on a few occasions:
5160 Installing package A, we need to make sure package A's deps are met.
5161 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5162 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5164 dep_stack = self._dep_stack
5165 if "recurse" not in self.myparams:
5167 elif pkg.installed and \
5168 "deep" not in self.myparams:
5169 dep_stack = self._ignored_deps
5171 self.spinner.update()
5176 if not previously_added:
5177 dep_stack.append(pkg)
5180 def _add_parent_atom(self, pkg, parent_atom):
5181 parent_atoms = self._parent_atoms.get(pkg)
5182 if parent_atoms is None:
5183 parent_atoms = set()
5184 self._parent_atoms[pkg] = parent_atoms
5185 parent_atoms.add(parent_atom)
5187 def _add_slot_conflict(self, pkg):
5188 self._slot_collision_nodes.add(pkg)
5189 slot_key = (pkg.slot_atom, pkg.root)
5190 slot_nodes = self._slot_collision_info.get(slot_key)
5191 if slot_nodes is None:
5193 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5194 self._slot_collision_info[slot_key] = slot_nodes
5197 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5199 mytype = pkg.type_name
5202 metadata = pkg.metadata
5203 myuse = pkg.use.enabled
5205 depth = pkg.depth + 1
5206 removal_action = "remove" in self.myparams
5209 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5211 edepend[k] = metadata[k]
5213 if not pkg.built and \
5214 "--buildpkgonly" in self.myopts and \
5215 "deep" not in self.myparams and \
5216 "empty" not in self.myparams:
5217 edepend["RDEPEND"] = ""
5218 edepend["PDEPEND"] = ""
5219 bdeps_optional = False
5221 if pkg.built and not removal_action:
5222 if self.myopts.get("--with-bdeps", "n") == "y":
5223 # Pull in build time deps as requested, but marked them as
5224 # "optional" since they are not strictly required. This allows
5225 # more freedom in the merge order calculation for solving
5226 # circular dependencies. Don't convert to PDEPEND since that
5227 # could make --with-bdeps=y less effective if it is used to
5228 # adjust merge order to prevent built_with_use() calls from
5230 bdeps_optional = True
5232 # built packages do not have build time dependencies.
5233 edepend["DEPEND"] = ""
5235 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5236 edepend["DEPEND"] = ""
5239 ("/", edepend["DEPEND"],
5240 self._priority(buildtime=(not bdeps_optional),
5241 optional=bdeps_optional)),
5242 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5243 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5246 debug = "--debug" in self.myopts
5247 strict = mytype != "installed"
5249 for dep_root, dep_string, dep_priority in deps:
5254 print "Parent: ", jbigkey
5255 print "Depstring:", dep_string
5256 print "Priority:", dep_priority
5257 vardb = self.roots[dep_root].trees["vartree"].dbapi
5259 selected_atoms = self._select_atoms(dep_root,
5260 dep_string, myuse=myuse, parent=pkg, strict=strict,
5261 priority=dep_priority)
5262 except portage.exception.InvalidDependString, e:
5263 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5266 print "Candidates:", selected_atoms
5268 for atom in selected_atoms:
5271 atom = portage.dep.Atom(atom)
5273 mypriority = dep_priority.copy()
5274 if not atom.blocker and vardb.match(atom):
5275 mypriority.satisfied = True
5277 if not self._add_dep(Dependency(atom=atom,
5278 blocker=atom.blocker, depth=depth, parent=pkg,
5279 priority=mypriority, root=dep_root),
5280 allow_unsatisfied=allow_unsatisfied):
5283 except portage.exception.InvalidAtom, e:
5284 show_invalid_depstring_notice(
5285 pkg, dep_string, str(e))
5287 if not pkg.installed:
5291 print "Exiting...", jbigkey
5292 except portage.exception.AmbiguousPackageName, e:
5294 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5295 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5297 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5298 portage.writemsg("\n", noiselevel=-1)
5299 if mytype == "binary":
5301 "!!! This binary package cannot be installed: '%s'\n" % \
5302 mykey, noiselevel=-1)
5303 elif mytype == "ebuild":
5304 portdb = self.roots[myroot].trees["porttree"].dbapi
5305 myebuild, mylocation = portdb.findname2(mykey)
5306 portage.writemsg("!!! This ebuild cannot be installed: " + \
5307 "'%s'\n" % myebuild, noiselevel=-1)
5308 portage.writemsg("!!! Please notify the package maintainer " + \
5309 "that atoms must be fully-qualified.\n", noiselevel=-1)
5313 def _priority(self, **kwargs):
5314 if "remove" in self.myparams:
5315 priority_constructor = UnmergeDepPriority
5317 priority_constructor = DepPriority
5318 return priority_constructor(**kwargs)
5320 def _dep_expand(self, root_config, atom_without_category):
5322 @param root_config: a root config instance
5323 @type root_config: RootConfig
5324 @param atom_without_category: an atom without a category component
5325 @type atom_without_category: String
5327 @returns: a list of atoms containing categories (possibly empty)
5329 null_cp = portage.dep_getkey(insert_category_into_atom(
5330 atom_without_category, "null"))
5331 cat, atom_pn = portage.catsplit(null_cp)
5333 dbs = self._filtered_trees[root_config.root]["dbs"]
5335 for db, pkg_type, built, installed, db_keys in dbs:
5336 for cat in db.categories:
5337 if db.cp_list("%s/%s" % (cat, atom_pn)):
5341 for cat in categories:
5342 deps.append(insert_category_into_atom(
5343 atom_without_category, cat))
5346 def _have_new_virt(self, root, atom_cp):
5348 for db, pkg_type, built, installed, db_keys in \
5349 self._filtered_trees[root]["dbs"]:
5350 if db.cp_list(atom_cp):
5355 def _iter_atoms_for_pkg(self, pkg):
5356 # TODO: add multiple $ROOT support
5357 if pkg.root != self.target_root:
5359 atom_arg_map = self._atom_arg_map
5360 root_config = self.roots[pkg.root]
5361 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5362 atom_cp = portage.dep_getkey(atom)
5363 if atom_cp != pkg.cp and \
5364 self._have_new_virt(pkg.root, atom_cp):
5366 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5367 visible_pkgs.reverse() # descending order
5369 for visible_pkg in visible_pkgs:
5370 if visible_pkg.cp != atom_cp:
5372 if pkg >= visible_pkg:
5373 # This is descending order, and we're not
5374 # interested in any versions <= pkg given.
5376 if pkg.slot_atom != visible_pkg.slot_atom:
5377 higher_slot = visible_pkg
5379 if higher_slot is not None:
5381 for arg in atom_arg_map[(atom, pkg.root)]:
5382 if isinstance(arg, PackageArg) and \
5387 def select_files(self, myfiles):
5388 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5389 appropriate depgraph and return a favorite list."""
5390 debug = "--debug" in self.myopts
5391 root_config = self.roots[self.target_root]
5392 sets = root_config.sets
5393 getSetAtoms = root_config.setconfig.getSetAtoms
5395 myroot = self.target_root
5396 dbs = self._filtered_trees[myroot]["dbs"]
5397 vardb = self.trees[myroot]["vartree"].dbapi
5398 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5399 portdb = self.trees[myroot]["porttree"].dbapi
5400 bindb = self.trees[myroot]["bintree"].dbapi
5401 pkgsettings = self.pkgsettings[myroot]
5403 onlydeps = "--onlydeps" in self.myopts
5406 ext = os.path.splitext(x)[1]
5408 if not os.path.exists(x):
5410 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5411 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5412 elif os.path.exists(
5413 os.path.join(pkgsettings["PKGDIR"], x)):
5414 x = os.path.join(pkgsettings["PKGDIR"], x)
5416 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5417 print "!!! Please ensure the tbz2 exists as specified.\n"
5418 return 0, myfavorites
5419 mytbz2=portage.xpak.tbz2(x)
5420 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5421 if os.path.realpath(x) != \
5422 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5423 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5424 return 0, myfavorites
5425 db_keys = list(bindb._aux_cache_keys)
5426 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5427 pkg = Package(type_name="binary", root_config=root_config,
5428 cpv=mykey, built=True, metadata=metadata,
5430 self._pkg_cache[pkg] = pkg
5431 args.append(PackageArg(arg=x, package=pkg,
5432 root_config=root_config))
5433 elif ext==".ebuild":
5434 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5435 pkgdir = os.path.dirname(ebuild_path)
5436 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5437 cp = pkgdir[len(tree_root)+1:]
5438 e = portage.exception.PackageNotFound(
5439 ("%s is not in a valid portage tree " + \
5440 "hierarchy or does not exist") % x)
5441 if not portage.isvalidatom(cp):
5443 cat = portage.catsplit(cp)[0]
5444 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5445 if not portage.isvalidatom("="+mykey):
5447 ebuild_path = portdb.findname(mykey)
5449 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5450 cp, os.path.basename(ebuild_path)):
5451 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5452 return 0, myfavorites
5453 if mykey not in portdb.xmatch(
5454 "match-visible", portage.dep_getkey(mykey)):
5455 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5456 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5457 print colorize("BAD", "*** page for details.")
5458 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5461 raise portage.exception.PackageNotFound(
5462 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5463 db_keys = list(portdb._aux_cache_keys)
5464 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5465 pkg = Package(type_name="ebuild", root_config=root_config,
5466 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5467 pkgsettings.setcpv(pkg)
5468 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5469 self._pkg_cache[pkg] = pkg
5470 args.append(PackageArg(arg=x, package=pkg,
5471 root_config=root_config))
5472 elif x.startswith(os.path.sep):
5473 if not x.startswith(myroot):
5474 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5475 " $ROOT.\n") % x, noiselevel=-1)
5477 # Queue these up since it's most efficient to handle
5478 # multiple files in a single iter_owners() call.
5479 lookup_owners.append(x)
5481 if x in ("system", "world"):
5483 if x.startswith(SETPREFIX):
5484 s = x[len(SETPREFIX):]
5486 raise portage.exception.PackageSetNotFound(s)
5489 # Recursively expand sets so that containment tests in
5490 # self._get_parent_sets() properly match atoms in nested
5491 # sets (like if world contains system).
5492 expanded_set = InternalPackageSet(
5493 initial_atoms=getSetAtoms(s))
5494 self._sets[s] = expanded_set
5495 args.append(SetArg(arg=x, set=expanded_set,
5496 root_config=root_config))
5498 if not is_valid_package_atom(x):
5499 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5501 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5502 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5504 # Don't expand categories or old-style virtuals here unless
5505 # necessary. Expansion of old-style virtuals here causes at
5506 # least the following problems:
5507 # 1) It's more difficult to determine which set(s) an atom
5508 # came from, if any.
5509 # 2) It takes away freedom from the resolver to choose other
5510 # possible expansions when necessary.
5512 args.append(AtomArg(arg=x, atom=x,
5513 root_config=root_config))
5515 expanded_atoms = self._dep_expand(root_config, x)
5516 installed_cp_set = set()
5517 for atom in expanded_atoms:
5518 atom_cp = portage.dep_getkey(atom)
5519 if vardb.cp_list(atom_cp):
5520 installed_cp_set.add(atom_cp)
5521 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5522 installed_cp = iter(installed_cp_set).next()
5523 expanded_atoms = [atom for atom in expanded_atoms \
5524 if portage.dep_getkey(atom) == installed_cp]
5526 if len(expanded_atoms) > 1:
5529 ambiguous_package_name(x, expanded_atoms, root_config,
5530 self.spinner, self.myopts)
5531 return False, myfavorites
5533 atom = expanded_atoms[0]
5535 null_atom = insert_category_into_atom(x, "null")
5536 null_cp = portage.dep_getkey(null_atom)
5537 cat, atom_pn = portage.catsplit(null_cp)
5538 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5540 # Allow the depgraph to choose which virtual.
5541 atom = insert_category_into_atom(x, "virtual")
5543 atom = insert_category_into_atom(x, "null")
5545 args.append(AtomArg(arg=x, atom=atom,
5546 root_config=root_config))
5550 search_for_multiple = False
5551 if len(lookup_owners) > 1:
5552 search_for_multiple = True
5554 for x in lookup_owners:
5555 if not search_for_multiple and os.path.isdir(x):
5556 search_for_multiple = True
5557 relative_paths.append(x[len(myroot):])
5560 for pkg, relative_path in \
5561 real_vardb._owners.iter_owners(relative_paths):
5562 owners.add(pkg.mycpv)
5563 if not search_for_multiple:
5567 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5568 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5572 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5574 # portage now masks packages with missing slot, but it's
5575 # possible that one was installed by an older version
5576 atom = portage.cpv_getkey(cpv)
5578 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5579 args.append(AtomArg(arg=atom, atom=atom,
5580 root_config=root_config))
5582 if "--update" in self.myopts:
5583 # In some cases, the greedy slots behavior can pull in a slot that
5584 # the user would want to uninstall due to it being blocked by a
5585 # newer version in a different slot. Therefore, it's necessary to
5586 # detect and discard any that should be uninstalled. Each time
5587 # that arguments are updated, package selections are repeated in
5588 # order to ensure consistency with the current arguments:
5590 # 1) Initialize args
5591 # 2) Select packages and generate initial greedy atoms
5592 # 3) Update args with greedy atoms
5593 # 4) Select packages and generate greedy atoms again, while
5594 # accounting for any blockers between selected packages
5595 # 5) Update args with revised greedy atoms
5597 self._set_args(args)
5600 greedy_args.append(arg)
5601 if not isinstance(arg, AtomArg):
5603 for atom in self._greedy_slots(arg.root_config, arg.atom):
5605 AtomArg(arg=arg.arg, atom=atom,
5606 root_config=arg.root_config))
5608 self._set_args(greedy_args)
5611 # Revise greedy atoms, accounting for any blockers
5612 # between selected packages.
5613 revised_greedy_args = []
5615 revised_greedy_args.append(arg)
5616 if not isinstance(arg, AtomArg):
5618 for atom in self._greedy_slots(arg.root_config, arg.atom,
5619 blocker_lookahead=True):
5620 revised_greedy_args.append(
5621 AtomArg(arg=arg.arg, atom=atom,
5622 root_config=arg.root_config))
5623 args = revised_greedy_args
5624 del revised_greedy_args
5626 self._set_args(args)
5628 myfavorites = set(myfavorites)
5630 if isinstance(arg, (AtomArg, PackageArg)):
5631 myfavorites.add(arg.atom)
5632 elif isinstance(arg, SetArg):
5633 myfavorites.add(arg.arg)
5634 myfavorites = list(myfavorites)
5636 pprovideddict = pkgsettings.pprovideddict
5638 portage.writemsg("\n", noiselevel=-1)
5639 # Order needs to be preserved since a feature of --nodeps
5640 # is to allow the user to force a specific merge order.
5644 for atom in arg.set:
5645 self.spinner.update()
5646 dep = Dependency(atom=atom, onlydeps=onlydeps,
5647 root=myroot, parent=arg)
5648 atom_cp = portage.dep_getkey(atom)
5650 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5651 if pprovided and portage.match_from_list(atom, pprovided):
5652 # A provided package has been specified on the command line.
5653 self._pprovided_args.append((arg, atom))
5655 if isinstance(arg, PackageArg):
5656 if not self._add_pkg(arg.package, dep) or \
5657 not self._create_graph():
5658 sys.stderr.write(("\n\n!!! Problem resolving " + \
5659 "dependencies for %s\n") % arg.arg)
5660 return 0, myfavorites
5663 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5664 (arg, atom), noiselevel=-1)
5665 pkg, existing_node = self._select_package(
5666 myroot, atom, onlydeps=onlydeps)
5668 if not (isinstance(arg, SetArg) and \
5669 arg.name in ("system", "world")):
5670 self._unsatisfied_deps_for_display.append(
5671 ((myroot, atom), {}))
5672 return 0, myfavorites
5673 self._missing_args.append((arg, atom))
5675 if atom_cp != pkg.cp:
5676 # For old-style virtuals, we need to repeat the
5677 # package.provided check against the selected package.
5678 expanded_atom = atom.replace(atom_cp, pkg.cp)
5679 pprovided = pprovideddict.get(pkg.cp)
5681 portage.match_from_list(expanded_atom, pprovided):
5682 # A provided package has been
5683 # specified on the command line.
5684 self._pprovided_args.append((arg, atom))
5686 if pkg.installed and "selective" not in self.myparams:
5687 self._unsatisfied_deps_for_display.append(
5688 ((myroot, atom), {}))
5689 # Previous behavior was to bail out in this case, but
5690 # since the dep is satisfied by the installed package,
5691 # it's more friendly to continue building the graph
5692 # and just show a warning message. Therefore, only bail
5693 # out here if the atom is not from either the system or
5695 if not (isinstance(arg, SetArg) and \
5696 arg.name in ("system", "world")):
5697 return 0, myfavorites
5699 # Add the selected package to the graph as soon as possible
5700 # so that later dep_check() calls can use it as feedback
5701 # for making more consistent atom selections.
5702 if not self._add_pkg(pkg, dep):
5703 if isinstance(arg, SetArg):
5704 sys.stderr.write(("\n\n!!! Problem resolving " + \
5705 "dependencies for %s from %s\n") % \
5708 sys.stderr.write(("\n\n!!! Problem resolving " + \
5709 "dependencies for %s\n") % atom)
5710 return 0, myfavorites
5712 except portage.exception.MissingSignature, e:
5713 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5714 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5715 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5716 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5717 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5718 return 0, myfavorites
5719 except portage.exception.InvalidSignature, e:
5720 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5721 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5722 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5723 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5724 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5725 return 0, myfavorites
5726 except SystemExit, e:
5727 raise # Needed else can't exit
5728 except Exception, e:
5729 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5730 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5733 # Now that the root packages have been added to the graph,
5734 # process the dependencies.
5735 if not self._create_graph():
5736 return 0, myfavorites
5739 if "--usepkgonly" in self.myopts:
5740 for xs in self.digraph.all_nodes():
5741 if not isinstance(xs, Package):
5743 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5747 print "Missing binary for:",xs[2]
5751 except self._unknown_internal_error:
5752 return False, myfavorites
5754 # We're true here unless we are missing binaries.
5755 return (not missing,myfavorites)
5757 def _set_args(self, args):
5759 Create the "args" package set from atoms and packages given as
5760 arguments. This method can be called multiple times if necessary.
5761 The package selection cache is automatically invalidated, since
5762 arguments influence package selections.
5764 args_set = self._sets["args"]
5767 if not isinstance(arg, (AtomArg, PackageArg)):
5770 if atom in args_set:
5774 self._set_atoms.clear()
5775 self._set_atoms.update(chain(*self._sets.itervalues()))
5776 atom_arg_map = self._atom_arg_map
5777 atom_arg_map.clear()
5779 for atom in arg.set:
5780 atom_key = (atom, arg.root_config.root)
5781 refs = atom_arg_map.get(atom_key)
5784 atom_arg_map[atom_key] = refs
5788 # Invalidate the package selection cache, since
5789 # arguments influence package selections.
5790 self._highest_pkg_cache.clear()
5791 for trees in self._filtered_trees.itervalues():
5792 trees["porttree"].dbapi._clear_cache()
5794 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5796 Return a list of slot atoms corresponding to installed slots that
5797 differ from the slot of the highest visible match. When
5798 blocker_lookahead is True, slot atoms that would trigger a blocker
5799 conflict are automatically discarded, potentially allowing automatic
5800 uninstallation of older slots when appropriate.
5802 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5803 if highest_pkg is None:
5805 vardb = root_config.trees["vartree"].dbapi
5807 for cpv in vardb.match(atom):
5808 # don't mix new virtuals with old virtuals
5809 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5810 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5812 slots.add(highest_pkg.metadata["SLOT"])
5816 slots.remove(highest_pkg.metadata["SLOT"])
5819 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5820 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5821 if pkg is not None and \
5822 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5823 greedy_pkgs.append(pkg)
5826 if not blocker_lookahead:
5827 return [pkg.slot_atom for pkg in greedy_pkgs]
5830 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5831 for pkg in greedy_pkgs + [highest_pkg]:
5832 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5834 atoms = self._select_atoms(
5835 pkg.root, dep_str, pkg.use.enabled,
5836 parent=pkg, strict=True)
5837 except portage.exception.InvalidDependString:
5839 blocker_atoms = (x for x in atoms if x.blocker)
5840 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5842 if highest_pkg not in blockers:
5845 # filter packages with invalid deps
5846 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5848 # filter packages that conflict with highest_pkg
5849 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5850 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5851 blockers[pkg].findAtomForPackage(highest_pkg))]
5856 # If two packages conflict, discard the lower version.
5857 discard_pkgs = set()
5858 greedy_pkgs.sort(reverse=True)
5859 for i in xrange(len(greedy_pkgs) - 1):
5860 pkg1 = greedy_pkgs[i]
5861 if pkg1 in discard_pkgs:
5863 for j in xrange(i + 1, len(greedy_pkgs)):
5864 pkg2 = greedy_pkgs[j]
5865 if pkg2 in discard_pkgs:
5867 if blockers[pkg1].findAtomForPackage(pkg2) or \
5868 blockers[pkg2].findAtomForPackage(pkg1):
5870 discard_pkgs.add(pkg2)
5872 return [pkg.slot_atom for pkg in greedy_pkgs \
5873 if pkg not in discard_pkgs]
5875 def _select_atoms_from_graph(self, *pargs, **kwargs):
5877 Prefer atoms matching packages that have already been
5878 added to the graph or those that are installed and have
5879 not been scheduled for replacement.
5881 kwargs["trees"] = self._graph_trees
5882 return self._select_atoms_highest_available(*pargs, **kwargs)
5884 def _select_atoms_highest_available(self, root, depstring,
5885 myuse=None, parent=None, strict=True, trees=None, priority=None):
5886 """This will raise InvalidDependString if necessary. If trees is
5887 None then self._filtered_trees is used."""
5888 pkgsettings = self.pkgsettings[root]
5890 trees = self._filtered_trees
5891 if not getattr(priority, "buildtime", False):
5892 # The parent should only be passed to dep_check() for buildtime
5893 # dependencies since that's the only case when it's appropriate
5894 # to trigger the circular dependency avoidance code which uses it.
5895 # It's important not to trigger the same circular dependency
5896 # avoidance code for runtime dependencies since it's not needed
5897 # and it can promote an incorrect package choice.
5901 if parent is not None:
5902 trees[root]["parent"] = parent
5904 portage.dep._dep_check_strict = False
5905 mycheck = portage.dep_check(depstring, None,
5906 pkgsettings, myuse=myuse,
5907 myroot=root, trees=trees)
5909 if parent is not None:
5910 trees[root].pop("parent")
5911 portage.dep._dep_check_strict = True
5913 raise portage.exception.InvalidDependString(mycheck[1])
5914 selected_atoms = mycheck[1]
5915 return selected_atoms
5917 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5918 atom = portage.dep.Atom(atom)
5919 atom_set = InternalPackageSet(initial_atoms=(atom,))
5920 atom_without_use = atom
5922 atom_without_use = portage.dep.remove_slot(atom)
5924 atom_without_use += ":" + atom.slot
5925 atom_without_use = portage.dep.Atom(atom_without_use)
5926 xinfo = '"%s"' % atom
5929 # Discard null/ from failed cpv_expand category expansion.
5930 xinfo = xinfo.replace("null/", "")
5931 masked_packages = []
5933 missing_licenses = []
5934 have_eapi_mask = False
5935 pkgsettings = self.pkgsettings[root]
5936 implicit_iuse = pkgsettings._get_implicit_iuse()
5937 root_config = self.roots[root]
5938 portdb = self.roots[root].trees["porttree"].dbapi
5939 dbs = self._filtered_trees[root]["dbs"]
5940 for db, pkg_type, built, installed, db_keys in dbs:
5944 if hasattr(db, "xmatch"):
5945 cpv_list = db.xmatch("match-all", atom_without_use)
5947 cpv_list = db.match(atom_without_use)
5950 for cpv in cpv_list:
5951 metadata, mreasons = get_mask_info(root_config, cpv,
5952 pkgsettings, db, pkg_type, built, installed, db_keys)
5953 if metadata is not None:
5954 pkg = Package(built=built, cpv=cpv,
5955 installed=installed, metadata=metadata,
5956 root_config=root_config)
5957 if pkg.cp != atom.cp:
5958 # A cpv can be returned from dbapi.match() as an
5959 # old-style virtual match even in cases when the
5960 # package does not actually PROVIDE the virtual.
5961 # Filter out any such false matches here.
5962 if not atom_set.findAtomForPackage(pkg):
5964 if atom.use and not mreasons:
5965 missing_use.append(pkg)
5967 masked_packages.append(
5968 (root_config, pkgsettings, cpv, metadata, mreasons))
5970 missing_use_reasons = []
5971 missing_iuse_reasons = []
5972 for pkg in missing_use:
5973 use = pkg.use.enabled
5974 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5975 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5977 for x in atom.use.required:
5978 if iuse_re.match(x) is None:
5979 missing_iuse.append(x)
5982 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5983 missing_iuse_reasons.append((pkg, mreasons))
5985 need_enable = sorted(atom.use.enabled.difference(use))
5986 need_disable = sorted(atom.use.disabled.intersection(use))
5987 if need_enable or need_disable:
5989 changes.extend(colorize("red", "+" + x) \
5990 for x in need_enable)
5991 changes.extend(colorize("blue", "-" + x) \
5992 for x in need_disable)
5993 mreasons.append("Change USE: %s" % " ".join(changes))
5994 missing_use_reasons.append((pkg, mreasons))
5996 if missing_iuse_reasons and not missing_use_reasons:
5997 missing_use_reasons = missing_iuse_reasons
5998 elif missing_use_reasons:
5999 # Only show the latest version.
6000 del missing_use_reasons[1:]
6002 if missing_use_reasons:
6003 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6004 print "!!! One of the following packages is required to complete your request:"
6005 for pkg, mreasons in missing_use_reasons:
6006 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6008 elif masked_packages:
6010 colorize("BAD", "All ebuilds that could satisfy ") + \
6011 colorize("INFORM", xinfo) + \
6012 colorize("BAD", " have been masked.")
6013 print "!!! One of the following masked packages is required to complete your request:"
6014 have_eapi_mask = show_masked_packages(masked_packages)
6017 msg = ("The current version of portage supports " + \
6018 "EAPI '%s'. You must upgrade to a newer version" + \
6019 " of portage before EAPI masked packages can" + \
6020 " be installed.") % portage.const.EAPI
6021 from textwrap import wrap
6022 for line in wrap(msg, 75):
6027 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6029 # Show parent nodes and the argument that pulled them in.
6030 traversed_nodes = set()
6033 while node is not None:
6034 traversed_nodes.add(node)
6035 msg.append('(dependency required by "%s" [%s])' % \
6036 (colorize('INFORM', str(node.cpv)), node.type_name))
6037 # When traversing to parents, prefer arguments over packages
6038 # since arguments are root nodes. Never traverse the same
6039 # package twice, in order to prevent an infinite loop.
6040 selected_parent = None
6041 for parent in self.digraph.parent_nodes(node):
6042 if isinstance(parent, DependencyArg):
6043 msg.append('(dependency required by "%s" [argument])' % \
6044 (colorize('INFORM', str(parent))))
6045 selected_parent = None
6047 if parent not in traversed_nodes:
6048 selected_parent = parent
6049 node = selected_parent
6055 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6056 cache_key = (root, atom, onlydeps)
6057 ret = self._highest_pkg_cache.get(cache_key)
6060 if pkg and not existing:
6061 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6062 if existing and existing == pkg:
6063 # Update the cache to reflect that the
6064 # package has been added to the graph.
6066 self._highest_pkg_cache[cache_key] = ret
6068 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6069 self._highest_pkg_cache[cache_key] = ret
6072 settings = pkg.root_config.settings
6073 if visible(settings, pkg) and not (pkg.installed and \
6074 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6075 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6078 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6079 root_config = self.roots[root]
6080 pkgsettings = self.pkgsettings[root]
6081 dbs = self._filtered_trees[root]["dbs"]
6082 vardb = self.roots[root].trees["vartree"].dbapi
6083 portdb = self.roots[root].trees["porttree"].dbapi
6084 # List of acceptable packages, ordered by type preference.
6085 matched_packages = []
6086 highest_version = None
6087 if not isinstance(atom, portage.dep.Atom):
6088 atom = portage.dep.Atom(atom)
6090 atom_set = InternalPackageSet(initial_atoms=(atom,))
6091 existing_node = None
6093 usepkgonly = "--usepkgonly" in self.myopts
6094 empty = "empty" in self.myparams
6095 selective = "selective" in self.myparams
6097 noreplace = "--noreplace" in self.myopts
6098 # Behavior of the "selective" parameter depends on
6099 # whether or not a package matches an argument atom.
6100 # If an installed package provides an old-style
6101 # virtual that is no longer provided by an available
6102 # package, the installed package may match an argument
6103 # atom even though none of the available packages do.
6104 # Therefore, "selective" logic does not consider
6105 # whether or not an installed package matches an
6106 # argument atom. It only considers whether or not
6107 # available packages match argument atoms, which is
6108 # represented by the found_available_arg flag.
6109 found_available_arg = False
6110 for find_existing_node in True, False:
6113 for db, pkg_type, built, installed, db_keys in dbs:
6116 if installed and not find_existing_node:
6117 want_reinstall = reinstall or empty or \
6118 (found_available_arg and not selective)
6119 if want_reinstall and matched_packages:
6121 if hasattr(db, "xmatch"):
6122 cpv_list = db.xmatch("match-all", atom)
6124 cpv_list = db.match(atom)
6126 # USE=multislot can make an installed package appear as if
6127 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6128 # won't do any good as long as USE=multislot is enabled since
6129 # the newly built package still won't have the expected slot.
6130 # Therefore, assume that such SLOT dependencies are already
6131 # satisfied rather than forcing a rebuild.
6132 if installed and not cpv_list and atom.slot:
6133 for cpv in db.match(atom.cp):
6134 slot_available = False
6135 for other_db, other_type, other_built, \
6136 other_installed, other_keys in dbs:
6139 other_db.aux_get(cpv, ["SLOT"])[0]:
6140 slot_available = True
6144 if not slot_available:
6146 inst_pkg = self._pkg(cpv, "installed",
6147 root_config, installed=installed)
6148 # Remove the slot from the atom and verify that
6149 # the package matches the resulting atom.
6150 atom_without_slot = portage.dep.remove_slot(atom)
6152 atom_without_slot += str(atom.use)
6153 atom_without_slot = portage.dep.Atom(atom_without_slot)
6154 if portage.match_from_list(
6155 atom_without_slot, [inst_pkg]):
6156 cpv_list = [inst_pkg.cpv]
6161 pkg_status = "merge"
6162 if installed or onlydeps:
6163 pkg_status = "nomerge"
6166 for cpv in cpv_list:
6167 # Make --noreplace take precedence over --newuse.
6168 if not installed and noreplace and \
6169 cpv in vardb.match(atom):
6170 # If the installed version is masked, it may
6171 # be necessary to look at lower versions,
6172 # in case there is a visible downgrade.
6174 reinstall_for_flags = None
6175 cache_key = (pkg_type, root, cpv, pkg_status)
6176 calculated_use = True
6177 pkg = self._pkg_cache.get(cache_key)
6179 calculated_use = False
6181 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6184 pkg = Package(built=built, cpv=cpv,
6185 installed=installed, metadata=metadata,
6186 onlydeps=onlydeps, root_config=root_config,
6188 metadata = pkg.metadata
6189 if not built and ("?" in metadata["LICENSE"] or \
6190 "?" in metadata["PROVIDE"]):
6191 # This is avoided whenever possible because
6192 # it's expensive. It only needs to be done here
6193 # if it has an effect on visibility.
6194 pkgsettings.setcpv(pkg)
6195 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6196 calculated_use = True
6197 self._pkg_cache[pkg] = pkg
6199 if not installed or (built and matched_packages):
6200 # Only enforce visibility on installed packages
6201 # if there is at least one other visible package
6202 # available. By filtering installed masked packages
6203 # here, packages that have been masked since they
6204 # were installed can be automatically downgraded
6205 # to an unmasked version.
6207 if not visible(pkgsettings, pkg):
6209 except portage.exception.InvalidDependString:
6213 # Enable upgrade or downgrade to a version
6214 # with visible KEYWORDS when the installed
6215 # version is masked by KEYWORDS, but never
6216 # reinstall the same exact version only due
6217 # to a KEYWORDS mask.
6218 if built and matched_packages:
6220 different_version = None
6221 for avail_pkg in matched_packages:
6222 if not portage.dep.cpvequal(
6223 pkg.cpv, avail_pkg.cpv):
6224 different_version = avail_pkg
6226 if different_version is not None:
6229 pkgsettings._getMissingKeywords(
6230 pkg.cpv, pkg.metadata):
6233 # If the ebuild no longer exists or it's
6234 # keywords have been dropped, reject built
6235 # instances (installed or binary).
6236 # If --usepkgonly is enabled, assume that
6237 # the ebuild status should be ignored.
6241 pkg.cpv, "ebuild", root_config)
6242 except portage.exception.PackageNotFound:
6245 if not visible(pkgsettings, pkg_eb):
6248 if not pkg.built and not calculated_use:
6249 # This is avoided whenever possible because
6251 pkgsettings.setcpv(pkg)
6252 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6254 if pkg.cp != atom.cp:
6255 # A cpv can be returned from dbapi.match() as an
6256 # old-style virtual match even in cases when the
6257 # package does not actually PROVIDE the virtual.
6258 # Filter out any such false matches here.
6259 if not atom_set.findAtomForPackage(pkg):
6263 if root == self.target_root:
6265 # Ebuild USE must have been calculated prior
6266 # to this point, in case atoms have USE deps.
6267 myarg = self._iter_atoms_for_pkg(pkg).next()
6268 except StopIteration:
6270 except portage.exception.InvalidDependString:
6272 # masked by corruption
6274 if not installed and myarg:
6275 found_available_arg = True
6277 if atom.use and not pkg.built:
6278 use = pkg.use.enabled
6279 if atom.use.enabled.difference(use):
6281 if atom.use.disabled.intersection(use):
6283 if pkg.cp == atom_cp:
6284 if highest_version is None:
6285 highest_version = pkg
6286 elif pkg > highest_version:
6287 highest_version = pkg
6288 # At this point, we've found the highest visible
6289 # match from the current repo. Any lower versions
6290 # from this repo are ignored, so this so the loop
6291 # will always end with a break statement below
6293 if find_existing_node:
6294 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6297 if portage.dep.match_from_list(atom, [e_pkg]):
6298 if highest_version and \
6299 e_pkg.cp == atom_cp and \
6300 e_pkg < highest_version and \
6301 e_pkg.slot_atom != highest_version.slot_atom:
6302 # There is a higher version available in a
6303 # different slot, so this existing node is
6307 matched_packages.append(e_pkg)
6308 existing_node = e_pkg
6310 # Compare built package to current config and
6311 # reject the built package if necessary.
6312 if built and not installed and \
6313 ("--newuse" in self.myopts or \
6314 "--reinstall" in self.myopts):
6315 iuses = pkg.iuse.all
6316 old_use = pkg.use.enabled
6318 pkgsettings.setcpv(myeb)
6320 pkgsettings.setcpv(pkg)
6321 now_use = pkgsettings["PORTAGE_USE"].split()
6322 forced_flags = set()
6323 forced_flags.update(pkgsettings.useforce)
6324 forced_flags.update(pkgsettings.usemask)
6326 if myeb and not usepkgonly:
6327 cur_iuse = myeb.iuse.all
6328 if self._reinstall_for_flags(forced_flags,
6332 # Compare current config to installed package
6333 # and do not reinstall if possible.
6334 if not installed and \
6335 ("--newuse" in self.myopts or \
6336 "--reinstall" in self.myopts) and \
6337 cpv in vardb.match(atom):
6338 pkgsettings.setcpv(pkg)
6339 forced_flags = set()
6340 forced_flags.update(pkgsettings.useforce)
6341 forced_flags.update(pkgsettings.usemask)
6342 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6343 old_iuse = set(filter_iuse_defaults(
6344 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6345 cur_use = pkgsettings["PORTAGE_USE"].split()
6346 cur_iuse = pkg.iuse.all
6347 reinstall_for_flags = \
6348 self._reinstall_for_flags(
6349 forced_flags, old_use, old_iuse,
6351 if reinstall_for_flags:
6355 matched_packages.append(pkg)
6356 if reinstall_for_flags:
6357 self._reinstall_nodes[pkg] = \
6361 if not matched_packages:
6364 if "--debug" in self.myopts:
6365 for pkg in matched_packages:
6366 portage.writemsg("%s %s\n" % \
6367 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6369 # Filter out any old-style virtual matches if they are
6370 # mixed with new-style virtual matches.
6371 cp = portage.dep_getkey(atom)
6372 if len(matched_packages) > 1 and \
6373 "virtual" == portage.catsplit(cp)[0]:
6374 for pkg in matched_packages:
6377 # Got a new-style virtual, so filter
6378 # out any old-style virtuals.
6379 matched_packages = [pkg for pkg in matched_packages \
6383 if len(matched_packages) > 1:
6384 bestmatch = portage.best(
6385 [pkg.cpv for pkg in matched_packages])
6386 matched_packages = [pkg for pkg in matched_packages \
6387 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6389 # ordered by type preference ("ebuild" type is the last resort)
6390 return matched_packages[-1], existing_node
6392 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6394 Select packages that have already been added to the graph or
6395 those that are installed and have not been scheduled for
6398 graph_db = self._graph_trees[root]["porttree"].dbapi
6399 matches = graph_db.match_pkgs(atom)
6402 pkg = matches[-1] # highest match
6403 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6404 return pkg, in_graph
6406 def _complete_graph(self):
6408 Add any deep dependencies of required sets (args, system, world) that
6409 have not been pulled into the graph yet. This ensures that the graph
6410 is consistent such that initially satisfied deep dependencies are not
6411 broken in the new graph. Initially unsatisfied dependencies are
6412 irrelevant since we only want to avoid breaking dependencies that are
6415 Since this method can consume enough time to disturb users, it is
6416 currently only enabled by the --complete-graph option.
6418 if "--buildpkgonly" in self.myopts or \
6419 "recurse" not in self.myparams:
6422 if "complete" not in self.myparams:
6423 # Skip this to avoid consuming enough time to disturb users.
6426 # Put the depgraph into a mode that causes it to only
6427 # select packages that have already been added to the
6428 # graph or those that are installed and have not been
6429 # scheduled for replacement. Also, toggle the "deep"
6430 # parameter so that all dependencies are traversed and
6432 self._select_atoms = self._select_atoms_from_graph
6433 self._select_package = self._select_pkg_from_graph
6434 already_deep = "deep" in self.myparams
6435 if not already_deep:
6436 self.myparams.add("deep")
6438 for root in self.roots:
6439 required_set_names = self._required_set_names.copy()
6440 if root == self.target_root and \
6441 (already_deep or "empty" in self.myparams):
6442 required_set_names.difference_update(self._sets)
6443 if not required_set_names and not self._ignored_deps:
6445 root_config = self.roots[root]
6446 setconfig = root_config.setconfig
6448 # Reuse existing SetArg instances when available.
6449 for arg in self.digraph.root_nodes():
6450 if not isinstance(arg, SetArg):
6452 if arg.root_config != root_config:
6454 if arg.name in required_set_names:
6456 required_set_names.remove(arg.name)
6457 # Create new SetArg instances only when necessary.
6458 for s in required_set_names:
6459 expanded_set = InternalPackageSet(
6460 initial_atoms=setconfig.getSetAtoms(s))
6461 atom = SETPREFIX + s
6462 args.append(SetArg(arg=atom, set=expanded_set,
6463 root_config=root_config))
6464 vardb = root_config.trees["vartree"].dbapi
6466 for atom in arg.set:
6467 self._dep_stack.append(
6468 Dependency(atom=atom, root=root, parent=arg))
6469 if self._ignored_deps:
6470 self._dep_stack.extend(self._ignored_deps)
6471 self._ignored_deps = []
6472 if not self._create_graph(allow_unsatisfied=True):
6474 # Check the unsatisfied deps to see if any initially satisfied deps
6475 # will become unsatisfied due to an upgrade. Initially unsatisfied
6476 # deps are irrelevant since we only want to avoid breaking deps
6477 # that are initially satisfied.
6478 while self._unsatisfied_deps:
6479 dep = self._unsatisfied_deps.pop()
6480 matches = vardb.match_pkgs(dep.atom)
6482 self._initially_unsatisfied_deps.append(dep)
6484 # An scheduled installation broke a deep dependency.
6485 # Add the installed package to the graph so that it
6486 # will be appropriately reported as a slot collision
6487 # (possibly solvable via backtracking).
6488 pkg = matches[-1] # highest match
6489 if not self._add_pkg(pkg, dep):
6491 if not self._create_graph(allow_unsatisfied=True):
6495 def _pkg(self, cpv, type_name, root_config, installed=False):
6497 Get a package instance from the cache, or create a new
6498 one if necessary. Raises KeyError from aux_get if it
6499 failures for some reason (package does not exist or is
6504 operation = "nomerge"
6505 pkg = self._pkg_cache.get(
6506 (type_name, root_config.root, cpv, operation))
6508 tree_type = self.pkg_tree_map[type_name]
6509 db = root_config.trees[tree_type].dbapi
6510 db_keys = list(self._trees_orig[root_config.root][
6511 tree_type].dbapi._aux_cache_keys)
6513 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6515 raise portage.exception.PackageNotFound(cpv)
6516 pkg = Package(cpv=cpv, metadata=metadata,
6517 root_config=root_config, installed=installed)
6518 if type_name == "ebuild":
6519 settings = self.pkgsettings[root_config.root]
6520 settings.setcpv(pkg)
6521 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6522 self._pkg_cache[pkg] = pkg
6525 def validate_blockers(self):
6526 """Remove any blockers from the digraph that do not match any of the
6527 packages within the graph. If necessary, create hard deps to ensure
6528 correct merge order such that mutually blocking packages are never
6529 installed simultaneously."""
6531 if "--buildpkgonly" in self.myopts or \
6532 "--nodeps" in self.myopts:
6535 #if "deep" in self.myparams:
6537 # Pull in blockers from all installed packages that haven't already
6538 # been pulled into the depgraph. This is not enabled by default
6539 # due to the performance penalty that is incurred by all the
6540 # additional dep_check calls that are required.
6542 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6543 for myroot in self.trees:
6544 vardb = self.trees[myroot]["vartree"].dbapi
6545 portdb = self.trees[myroot]["porttree"].dbapi
6546 pkgsettings = self.pkgsettings[myroot]
6547 final_db = self.mydbapi[myroot]
6549 blocker_cache = BlockerCache(myroot, vardb)
6550 stale_cache = set(blocker_cache)
6553 stale_cache.discard(cpv)
6554 pkg_in_graph = self.digraph.contains(pkg)
6556 # Check for masked installed packages. Only warn about
6557 # packages that are in the graph in order to avoid warning
6558 # about those that will be automatically uninstalled during
6559 # the merge process or by --depclean.
6561 if pkg_in_graph and not visible(pkgsettings, pkg):
6562 self._masked_installed.add(pkg)
6564 blocker_atoms = None
6570 self._blocker_parents.child_nodes(pkg))
6575 self._irrelevant_blockers.child_nodes(pkg))
6578 if blockers is not None:
6579 blockers = set(str(blocker.atom) \
6580 for blocker in blockers)
6582 # If this node has any blockers, create a "nomerge"
6583 # node for it so that they can be enforced.
6584 self.spinner.update()
6585 blocker_data = blocker_cache.get(cpv)
6586 if blocker_data is not None and \
6587 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6590 # If blocker data from the graph is available, use
6591 # it to validate the cache and update the cache if
6593 if blocker_data is not None and \
6594 blockers is not None:
6595 if not blockers.symmetric_difference(
6596 blocker_data.atoms):
6600 if blocker_data is None and \
6601 blockers is not None:
6602 # Re-use the blockers from the graph.
6603 blocker_atoms = sorted(blockers)
6604 counter = long(pkg.metadata["COUNTER"])
6606 blocker_cache.BlockerData(counter, blocker_atoms)
6607 blocker_cache[pkg.cpv] = blocker_data
6611 blocker_atoms = blocker_data.atoms
6613 # Use aux_get() to trigger FakeVartree global
6614 # updates on *DEPEND when appropriate.
6615 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6616 # It is crucial to pass in final_db here in order to
6617 # optimize dep_check calls by eliminating atoms via
6618 # dep_wordreduce and dep_eval calls.
6620 portage.dep._dep_check_strict = False
6622 success, atoms = portage.dep_check(depstr,
6623 final_db, pkgsettings, myuse=pkg.use.enabled,
6624 trees=self._graph_trees, myroot=myroot)
6625 except Exception, e:
6626 if isinstance(e, SystemExit):
6628 # This is helpful, for example, if a ValueError
6629 # is thrown from cpv_expand due to multiple
6630 # matches (this can happen if an atom lacks a
6632 show_invalid_depstring_notice(
6633 pkg, depstr, str(e))
6637 portage.dep._dep_check_strict = True
6639 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6640 if replacement_pkg and \
6641 replacement_pkg[0].operation == "merge":
6642 # This package is being replaced anyway, so
6643 # ignore invalid dependencies so as not to
6644 # annoy the user too much (otherwise they'd be
6645 # forced to manually unmerge it first).
6647 show_invalid_depstring_notice(pkg, depstr, atoms)
6649 blocker_atoms = [myatom for myatom in atoms \
6650 if myatom.startswith("!")]
6651 blocker_atoms.sort()
6652 counter = long(pkg.metadata["COUNTER"])
6653 blocker_cache[cpv] = \
6654 blocker_cache.BlockerData(counter, blocker_atoms)
6657 for atom in blocker_atoms:
6658 blocker = Blocker(atom=portage.dep.Atom(atom),
6659 eapi=pkg.metadata["EAPI"], root=myroot)
6660 self._blocker_parents.add(blocker, pkg)
6661 except portage.exception.InvalidAtom, e:
6662 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6663 show_invalid_depstring_notice(
6664 pkg, depstr, "Invalid Atom: %s" % (e,))
6666 for cpv in stale_cache:
6667 del blocker_cache[cpv]
6668 blocker_cache.flush()
6671 # Discard any "uninstall" tasks scheduled by previous calls
6672 # to this method, since those tasks may not make sense given
6673 # the current graph state.
6674 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6675 if previous_uninstall_tasks:
6676 self._blocker_uninstalls = digraph()
6677 self.digraph.difference_update(previous_uninstall_tasks)
6679 for blocker in self._blocker_parents.leaf_nodes():
6680 self.spinner.update()
6681 root_config = self.roots[blocker.root]
6682 virtuals = root_config.settings.getvirtuals()
6683 myroot = blocker.root
6684 initial_db = self.trees[myroot]["vartree"].dbapi
6685 final_db = self.mydbapi[myroot]
6687 provider_virtual = False
6688 if blocker.cp in virtuals and \
6689 not self._have_new_virt(blocker.root, blocker.cp):
6690 provider_virtual = True
6692 if provider_virtual:
6694 for provider_entry in virtuals[blocker.cp]:
6696 portage.dep_getkey(provider_entry)
6697 atoms.append(blocker.atom.replace(
6698 blocker.cp, provider_cp))
6700 atoms = [blocker.atom]
6702 blocked_initial = []
6704 blocked_initial.extend(initial_db.match_pkgs(atom))
6708 blocked_final.extend(final_db.match_pkgs(atom))
6710 if not blocked_initial and not blocked_final:
6711 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6712 self._blocker_parents.remove(blocker)
6713 # Discard any parents that don't have any more blockers.
6714 for pkg in parent_pkgs:
6715 self._irrelevant_blockers.add(blocker, pkg)
6716 if not self._blocker_parents.child_nodes(pkg):
6717 self._blocker_parents.remove(pkg)
6719 for parent in self._blocker_parents.parent_nodes(blocker):
6720 unresolved_blocks = False
6721 depends_on_order = set()
6722 for pkg in blocked_initial:
6723 if pkg.slot_atom == parent.slot_atom:
6724 # TODO: Support blocks within slots in cases where it
6725 # might make sense. For example, a new version might
6726 # require that the old version be uninstalled at build
6729 if parent.installed:
6730 # Two currently installed packages conflict with
6731 # eachother. Ignore this case since the damage
6732 # is already done and this would be likely to
6733 # confuse users if displayed like a normal blocker.
6736 self._blocked_pkgs.add(pkg, blocker)
6738 if parent.operation == "merge":
6739 # Maybe the blocked package can be replaced or simply
6740 # unmerged to resolve this block.
6741 depends_on_order.add((pkg, parent))
6743 # None of the above blocker resolutions techniques apply,
6744 # so apparently this one is unresolvable.
6745 unresolved_blocks = True
6746 for pkg in blocked_final:
6747 if pkg.slot_atom == parent.slot_atom:
6748 # TODO: Support blocks within slots.
6750 if parent.operation == "nomerge" and \
6751 pkg.operation == "nomerge":
6752 # This blocker will be handled the next time that a
6753 # merge of either package is triggered.
6756 self._blocked_pkgs.add(pkg, blocker)
6758 # Maybe the blocking package can be
6759 # unmerged to resolve this block.
6760 if parent.operation == "merge" and pkg.installed:
6761 depends_on_order.add((pkg, parent))
6763 elif parent.operation == "nomerge":
6764 depends_on_order.add((parent, pkg))
6766 # None of the above blocker resolutions techniques apply,
6767 # so apparently this one is unresolvable.
6768 unresolved_blocks = True
6770 # Make sure we don't unmerge any package that have been pulled
6772 if not unresolved_blocks and depends_on_order:
6773 for inst_pkg, inst_task in depends_on_order:
6774 if self.digraph.contains(inst_pkg) and \
6775 self.digraph.parent_nodes(inst_pkg):
6776 unresolved_blocks = True
6779 if not unresolved_blocks and depends_on_order:
6780 for inst_pkg, inst_task in depends_on_order:
6781 uninst_task = Package(built=inst_pkg.built,
6782 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6783 metadata=inst_pkg.metadata,
6784 operation="uninstall",
6785 root_config=inst_pkg.root_config,
6786 type_name=inst_pkg.type_name)
6787 self._pkg_cache[uninst_task] = uninst_task
6788 # Enforce correct merge order with a hard dep.
6789 self.digraph.addnode(uninst_task, inst_task,
6790 priority=BlockerDepPriority.instance)
6791 # Count references to this blocker so that it can be
6792 # invalidated after nodes referencing it have been
6794 self._blocker_uninstalls.addnode(uninst_task, blocker)
6795 if not unresolved_blocks and not depends_on_order:
6796 self._irrelevant_blockers.add(blocker, parent)
6797 self._blocker_parents.remove_edge(blocker, parent)
6798 if not self._blocker_parents.parent_nodes(blocker):
6799 self._blocker_parents.remove(blocker)
6800 if not self._blocker_parents.child_nodes(parent):
6801 self._blocker_parents.remove(parent)
6802 if unresolved_blocks:
6803 self._unsolvable_blockers.add(blocker, parent)
6807 def _accept_blocker_conflicts(self):
6809 for x in ("--buildpkgonly", "--fetchonly",
6810 "--fetch-all-uri", "--nodeps"):
6811 if x in self.myopts:
6816 def _merge_order_bias(self, mygraph):
6818 For optimal leaf node selection, promote deep system runtime deps and
6819 order nodes from highest to lowest overall reference count.
6823 for node in mygraph.order:
6824 node_info[node] = len(mygraph.parent_nodes(node))
6825 deep_system_deps = (_find_deep_system_runtime_deps(mygraph))
6827 def cmp_merge_preference(node1, node2):
6829 if node1.operation == 'uninstall':
6830 if node2.operation == 'uninstall':
6834 if node2.operation == 'uninstall':
6835 if node1.operation == 'uninstall':
6839 node1_sys = node1 in deep_system_deps
6840 node2_sys = node2 in deep_system_deps
6841 if node1_sys != node2_sys:
6846 return node_info[node2] - node_info[node1]
6848 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6850 def altlist(self, reversed=False):
6852 while self._serialized_tasks_cache is None:
6853 self._resolve_conflicts()
6855 self._serialized_tasks_cache, self._scheduler_graph = \
6856 self._serialize_tasks()
6857 except self._serialize_tasks_retry:
6860 retlist = self._serialized_tasks_cache[:]
6865 def schedulerGraph(self):
6867 The scheduler graph is identical to the normal one except that
6868 uninstall edges are reversed in specific cases that require
6869 conflicting packages to be temporarily installed simultaneously.
6870 This is intended for use by the Scheduler in it's parallelization
6871 logic. It ensures that temporary simultaneous installation of
6872 conflicting packages is avoided when appropriate (especially for
6873 !!atom blockers), but allowed in specific cases that require it.
6875 Note that this method calls break_refs() which alters the state of
6876 internal Package instances such that this depgraph instance should
6877 not be used to perform any more calculations.
6879 if self._scheduler_graph is None:
6881 self.break_refs(self._scheduler_graph.order)
6882 return self._scheduler_graph
6884 def break_refs(self, nodes):
6886 Take a mergelist like that returned from self.altlist() and
6887 break any references that lead back to the depgraph. This is
6888 useful if you want to hold references to packages without
6889 also holding the depgraph on the heap.
6892 if hasattr(node, "root_config"):
6893 # The FakeVartree references the _package_cache which
6894 # references the depgraph. So that Package instances don't
6895 # hold the depgraph and FakeVartree on the heap, replace
6896 # the RootConfig that references the FakeVartree with the
6897 # original RootConfig instance which references the actual
6899 node.root_config = \
6900 self._trees_orig[node.root_config.root]["root_config"]
6902 def _resolve_conflicts(self):
6903 if not self._complete_graph():
6904 raise self._unknown_internal_error()
6906 if not self.validate_blockers():
6907 raise self._unknown_internal_error()
6909 if self._slot_collision_info:
6910 self._process_slot_conflicts()
6912 def _serialize_tasks(self):
6914 if "--debug" in self.myopts:
6915 writemsg("\ndigraph:\n\n", noiselevel=-1)
6916 self.digraph.debug_print()
6917 writemsg("\n", noiselevel=-1)
6919 scheduler_graph = self.digraph.copy()
6920 mygraph=self.digraph.copy()
6921 # Prune "nomerge" root nodes if nothing depends on them, since
6922 # otherwise they slow down merge order calculation. Don't remove
6923 # non-root nodes since they help optimize merge order in some cases
6924 # such as revdep-rebuild.
6925 removed_nodes = set()
6927 for node in mygraph.root_nodes():
6928 if not isinstance(node, Package) or \
6929 node.installed or node.onlydeps:
6930 removed_nodes.add(node)
6932 self.spinner.update()
6933 mygraph.difference_update(removed_nodes)
6934 if not removed_nodes:
6936 removed_nodes.clear()
6937 self._merge_order_bias(mygraph)
6938 def cmp_circular_bias(n1, n2):
6940 RDEPEND is stronger than PDEPEND and this function
6941 measures such a strength bias within a circular
6942 dependency relationship.
6944 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6945 ignore_priority=priority_range.ignore_medium_soft)
6946 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6947 ignore_priority=priority_range.ignore_medium_soft)
6948 if n1_n2_medium == n2_n1_medium:
6953 myblocker_uninstalls = self._blocker_uninstalls.copy()
6955 # Contains uninstall tasks that have been scheduled to
6956 # occur after overlapping blockers have been installed.
6957 scheduled_uninstalls = set()
6958 # Contains any Uninstall tasks that have been ignored
6959 # in order to avoid the circular deps code path. These
6960 # correspond to blocker conflicts that could not be
6962 ignored_uninstall_tasks = set()
6963 have_uninstall_task = False
6964 complete = "complete" in self.myparams
6967 def get_nodes(**kwargs):
6969 Returns leaf nodes excluding Uninstall instances
6970 since those should be executed as late as possible.
6972 return [node for node in mygraph.leaf_nodes(**kwargs) \
6973 if isinstance(node, Package) and \
6974 (node.operation != "uninstall" or \
6975 node in scheduled_uninstalls)]
6977 # sys-apps/portage needs special treatment if ROOT="/"
6978 running_root = self._running_root.root
6979 from portage.const import PORTAGE_PACKAGE_ATOM
6980 runtime_deps = InternalPackageSet(
6981 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6982 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6983 PORTAGE_PACKAGE_ATOM)
6984 replacement_portage = self.mydbapi[running_root].match_pkgs(
6985 PORTAGE_PACKAGE_ATOM)
6988 running_portage = running_portage[0]
6990 running_portage = None
6992 if replacement_portage:
6993 replacement_portage = replacement_portage[0]
6995 replacement_portage = None
6997 if replacement_portage == running_portage:
6998 replacement_portage = None
7000 if replacement_portage is not None:
7001 # update from running_portage to replacement_portage asap
7002 asap_nodes.append(replacement_portage)
7004 if running_portage is not None:
7006 portage_rdepend = self._select_atoms_highest_available(
7007 running_root, running_portage.metadata["RDEPEND"],
7008 myuse=running_portage.use.enabled,
7009 parent=running_portage, strict=False)
7010 except portage.exception.InvalidDependString, e:
7011 portage.writemsg("!!! Invalid RDEPEND in " + \
7012 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7013 (running_root, running_portage.cpv, e), noiselevel=-1)
7015 portage_rdepend = []
7016 runtime_deps.update(atom for atom in portage_rdepend \
7017 if not atom.startswith("!"))
7019 def gather_deps(ignore_priority, mergeable_nodes,
7020 selected_nodes, node):
7022 Recursively gather a group of nodes that RDEPEND on
7023 eachother. This ensures that they are merged as a group
7024 and get their RDEPENDs satisfied as soon as possible.
7026 if node in selected_nodes:
7028 if node not in mergeable_nodes:
7030 if node == replacement_portage and \
7031 mygraph.child_nodes(node,
7032 ignore_priority=priority_range.ignore_medium_soft):
7033 # Make sure that portage always has all of it's
7034 # RDEPENDs installed first.
7036 selected_nodes.add(node)
7037 for child in mygraph.child_nodes(node,
7038 ignore_priority=ignore_priority):
7039 if not gather_deps(ignore_priority,
7040 mergeable_nodes, selected_nodes, child):
7044 def ignore_uninst_or_med(priority):
7045 if priority is BlockerDepPriority.instance:
7047 return priority_range.ignore_medium(priority)
7049 def ignore_uninst_or_med_soft(priority):
7050 if priority is BlockerDepPriority.instance:
7052 return priority_range.ignore_medium_soft(priority)
7054 tree_mode = "--tree" in self.myopts
7055 # Tracks whether or not the current iteration should prefer asap_nodes
7056 # if available. This is set to False when the previous iteration
7057 # failed to select any nodes. It is reset whenever nodes are
7058 # successfully selected.
7061 # Controls whether or not the current iteration should drop edges that
7062 # are "satisfied" by installed packages, in order to solve circular
7063 # dependencies. The deep runtime dependencies of installed packages are
7064 # not checked in this case (bug #199856), so it must be avoided
7065 # whenever possible.
7066 drop_satisfied = False
7068 # State of variables for successive iterations that loosen the
7069 # criteria for node selection.
7071 # iteration prefer_asap drop_satisfied
7076 # If no nodes are selected on the last iteration, it is due to
7077 # unresolved blockers or circular dependencies.
7079 while not mygraph.empty():
7080 self.spinner.update()
7081 selected_nodes = None
7082 ignore_priority = None
7083 if drop_satisfied or (prefer_asap and asap_nodes):
7084 priority_range = DepPrioritySatisfiedRange
7086 priority_range = DepPriorityNormalRange
7087 if prefer_asap and asap_nodes:
7088 # ASAP nodes are merged before their soft deps. Go ahead and
7089 # select root nodes here if necessary, since it's typical for
7090 # the parent to have been removed from the graph already.
7091 asap_nodes = [node for node in asap_nodes \
7092 if mygraph.contains(node)]
7093 for node in asap_nodes:
7094 if not mygraph.child_nodes(node,
7095 ignore_priority=priority_range.ignore_soft):
7096 selected_nodes = [node]
7097 asap_nodes.remove(node)
7099 if not selected_nodes and \
7100 not (prefer_asap and asap_nodes):
7101 for i in xrange(priority_range.NONE,
7102 priority_range.MEDIUM_SOFT + 1):
7103 ignore_priority = priority_range.ignore_priority[i]
7104 nodes = get_nodes(ignore_priority=ignore_priority)
7106 # If there is a mix of uninstall nodes with other
7107 # types, save the uninstall nodes for later since
7108 # sometimes a merge node will render an uninstall
7109 # node unnecessary (due to occupying the same slot),
7110 # and we want to avoid executing a separate uninstall
7111 # task in that case.
7113 good_uninstalls = []
7114 with_some_uninstalls_excluded = []
7116 if node.operation == "uninstall":
7117 slot_node = self.mydbapi[node.root
7118 ].match_pkgs(node.slot_atom)
7120 slot_node[0].operation == "merge":
7122 good_uninstalls.append(node)
7123 with_some_uninstalls_excluded.append(node)
7125 nodes = good_uninstalls
7126 elif with_some_uninstalls_excluded:
7127 nodes = with_some_uninstalls_excluded
7131 if ignore_priority is None and not tree_mode:
7132 # Greedily pop all of these nodes since no
7133 # relationship has been ignored. This optimization
7134 # destroys --tree output, so it's disabled in tree
7136 selected_nodes = nodes
7138 # For optimal merge order:
7139 # * Only pop one node.
7140 # * Removing a root node (node without a parent)
7141 # will not produce a leaf node, so avoid it.
7142 # * It's normal for a selected uninstall to be a
7143 # root node, so don't check them for parents.
7145 if node.operation == "uninstall" or \
7146 mygraph.parent_nodes(node):
7147 selected_nodes = [node]
7153 if not selected_nodes:
7154 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7156 mergeable_nodes = set(nodes)
7157 if prefer_asap and asap_nodes:
7159 for i in xrange(priority_range.SOFT,
7160 priority_range.MEDIUM_SOFT + 1):
7161 ignore_priority = priority_range.ignore_priority[i]
7163 if not mygraph.parent_nodes(node):
7165 selected_nodes = set()
7166 if gather_deps(ignore_priority,
7167 mergeable_nodes, selected_nodes, node):
7170 selected_nodes = None
7174 if prefer_asap and asap_nodes and not selected_nodes:
7175 # We failed to find any asap nodes to merge, so ignore
7176 # them for the next iteration.
7180 if selected_nodes and ignore_priority is not None:
7181 # Try to merge ignored medium_soft deps as soon as possible
7182 # if they're not satisfied by installed packages.
7183 for node in selected_nodes:
7184 children = set(mygraph.child_nodes(node))
7185 soft = children.difference(
7186 mygraph.child_nodes(node,
7187 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7188 medium_soft = children.difference(
7189 mygraph.child_nodes(node,
7191 DepPrioritySatisfiedRange.ignore_medium_soft))
7192 medium_soft.difference_update(soft)
7193 for child in medium_soft:
7194 if child in selected_nodes:
7196 if child in asap_nodes:
7198 asap_nodes.append(child)
7200 if selected_nodes and len(selected_nodes) > 1:
7201 if not isinstance(selected_nodes, list):
7202 selected_nodes = list(selected_nodes)
7203 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7205 if not selected_nodes and not myblocker_uninstalls.is_empty():
7206 # An Uninstall task needs to be executed in order to
7207 # avoid conflict if possible.
7210 priority_range = DepPrioritySatisfiedRange
7212 priority_range = DepPriorityNormalRange
7214 mergeable_nodes = get_nodes(
7215 ignore_priority=ignore_uninst_or_med)
7217 min_parent_deps = None
7219 for task in myblocker_uninstalls.leaf_nodes():
7220 # Do some sanity checks so that system or world packages
7221 # don't get uninstalled inappropriately here (only really
7222 # necessary when --complete-graph has not been enabled).
7224 if task in ignored_uninstall_tasks:
7227 if task in scheduled_uninstalls:
7228 # It's been scheduled but it hasn't
7229 # been executed yet due to dependence
7230 # on installation of blocking packages.
7233 root_config = self.roots[task.root]
7234 inst_pkg = self._pkg_cache[
7235 ("installed", task.root, task.cpv, "nomerge")]
7237 if self.digraph.contains(inst_pkg):
7240 forbid_overlap = False
7241 heuristic_overlap = False
7242 for blocker in myblocker_uninstalls.parent_nodes(task):
7243 if blocker.eapi in ("0", "1"):
7244 heuristic_overlap = True
7245 elif blocker.atom.blocker.overlap.forbid:
7246 forbid_overlap = True
7248 if forbid_overlap and running_root == task.root:
7251 if heuristic_overlap and running_root == task.root:
7252 # Never uninstall sys-apps/portage or it's essential
7253 # dependencies, except through replacement.
7255 runtime_dep_atoms = \
7256 list(runtime_deps.iterAtomsForPackage(task))
7257 except portage.exception.InvalidDependString, e:
7258 portage.writemsg("!!! Invalid PROVIDE in " + \
7259 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7260 (task.root, task.cpv, e), noiselevel=-1)
7264 # Don't uninstall a runtime dep if it appears
7265 # to be the only suitable one installed.
7267 vardb = root_config.trees["vartree"].dbapi
7268 for atom in runtime_dep_atoms:
7269 other_version = None
7270 for pkg in vardb.match_pkgs(atom):
7271 if pkg.cpv == task.cpv and \
7272 pkg.metadata["COUNTER"] == \
7273 task.metadata["COUNTER"]:
7277 if other_version is None:
7283 # For packages in the system set, don't take
7284 # any chances. If the conflict can't be resolved
7285 # by a normal replacement operation then abort.
7288 for atom in root_config.sets[
7289 "system"].iterAtomsForPackage(task):
7292 except portage.exception.InvalidDependString, e:
7293 portage.writemsg("!!! Invalid PROVIDE in " + \
7294 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7295 (task.root, task.cpv, e), noiselevel=-1)
7301 # Note that the world check isn't always
7302 # necessary since self._complete_graph() will
7303 # add all packages from the system and world sets to the
7304 # graph. This just allows unresolved conflicts to be
7305 # detected as early as possible, which makes it possible
7306 # to avoid calling self._complete_graph() when it is
7307 # unnecessary due to blockers triggering an abortion.
7309 # For packages in the world set, go ahead an uninstall
7310 # when necessary, as long as the atom will be satisfied
7311 # in the final state.
7312 graph_db = self.mydbapi[task.root]
7315 for atom in root_config.sets[
7316 "world"].iterAtomsForPackage(task):
7318 for pkg in graph_db.match_pkgs(atom):
7325 self._blocked_world_pkgs[inst_pkg] = atom
7327 except portage.exception.InvalidDependString, e:
7328 portage.writemsg("!!! Invalid PROVIDE in " + \
7329 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7330 (task.root, task.cpv, e), noiselevel=-1)
7336 # Check the deps of parent nodes to ensure that
7337 # the chosen task produces a leaf node. Maybe
7338 # this can be optimized some more to make the
7339 # best possible choice, but the current algorithm
7340 # is simple and should be near optimal for most
7342 mergeable_parent = False
7344 for parent in mygraph.parent_nodes(task):
7345 parent_deps.update(mygraph.child_nodes(parent,
7346 ignore_priority=priority_range.ignore_medium_soft))
7347 if parent in mergeable_nodes and \
7348 gather_deps(ignore_uninst_or_med_soft,
7349 mergeable_nodes, set(), parent):
7350 mergeable_parent = True
7352 if not mergeable_parent:
7355 parent_deps.remove(task)
7356 if min_parent_deps is None or \
7357 len(parent_deps) < min_parent_deps:
7358 min_parent_deps = len(parent_deps)
7361 if uninst_task is not None:
7362 # The uninstall is performed only after blocking
7363 # packages have been merged on top of it. File
7364 # collisions between blocking packages are detected
7365 # and removed from the list of files to be uninstalled.
7366 scheduled_uninstalls.add(uninst_task)
7367 parent_nodes = mygraph.parent_nodes(uninst_task)
7369 # Reverse the parent -> uninstall edges since we want
7370 # to do the uninstall after blocking packages have
7371 # been merged on top of it.
7372 mygraph.remove(uninst_task)
7373 for blocked_pkg in parent_nodes:
7374 mygraph.add(blocked_pkg, uninst_task,
7375 priority=BlockerDepPriority.instance)
7376 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7377 scheduler_graph.add(blocked_pkg, uninst_task,
7378 priority=BlockerDepPriority.instance)
7380 # Reset the state variables for leaf node selection and
7381 # continue trying to select leaf nodes.
7383 drop_satisfied = False
7386 if not selected_nodes:
7387 # Only select root nodes as a last resort. This case should
7388 # only trigger when the graph is nearly empty and the only
7389 # remaining nodes are isolated (no parents or children). Since
7390 # the nodes must be isolated, ignore_priority is not needed.
7391 selected_nodes = get_nodes()
7393 if not selected_nodes and not drop_satisfied:
7394 drop_satisfied = True
7397 if not selected_nodes and not myblocker_uninstalls.is_empty():
7398 # If possible, drop an uninstall task here in order to avoid
7399 # the circular deps code path. The corresponding blocker will
7400 # still be counted as an unresolved conflict.
7402 for node in myblocker_uninstalls.leaf_nodes():
7404 mygraph.remove(node)
7409 ignored_uninstall_tasks.add(node)
7412 if uninst_task is not None:
7413 # Reset the state variables for leaf node selection and
7414 # continue trying to select leaf nodes.
7416 drop_satisfied = False
7419 if not selected_nodes:
7420 self._circular_deps_for_display = mygraph
7421 raise self._unknown_internal_error()
7423 # At this point, we've succeeded in selecting one or more nodes, so
7424 # reset state variables for leaf node selection.
7426 drop_satisfied = False
7428 mygraph.difference_update(selected_nodes)
7430 for node in selected_nodes:
7431 if isinstance(node, Package) and \
7432 node.operation == "nomerge":
7435 # Handle interactions between blockers
7436 # and uninstallation tasks.
7437 solved_blockers = set()
7439 if isinstance(node, Package) and \
7440 "uninstall" == node.operation:
7441 have_uninstall_task = True
7444 vardb = self.trees[node.root]["vartree"].dbapi
7445 previous_cpv = vardb.match(node.slot_atom)
7447 # The package will be replaced by this one, so remove
7448 # the corresponding Uninstall task if necessary.
7449 previous_cpv = previous_cpv[0]
7451 ("installed", node.root, previous_cpv, "uninstall")
7453 mygraph.remove(uninst_task)
7457 if uninst_task is not None and \
7458 uninst_task not in ignored_uninstall_tasks and \
7459 myblocker_uninstalls.contains(uninst_task):
7460 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7461 myblocker_uninstalls.remove(uninst_task)
7462 # Discard any blockers that this Uninstall solves.
7463 for blocker in blocker_nodes:
7464 if not myblocker_uninstalls.child_nodes(blocker):
7465 myblocker_uninstalls.remove(blocker)
7466 solved_blockers.add(blocker)
7468 retlist.append(node)
7470 if (isinstance(node, Package) and \
7471 "uninstall" == node.operation) or \
7472 (uninst_task is not None and \
7473 uninst_task in scheduled_uninstalls):
7474 # Include satisfied blockers in the merge list
7475 # since the user might be interested and also
7476 # it serves as an indicator that blocking packages
7477 # will be temporarily installed simultaneously.
7478 for blocker in solved_blockers:
7479 retlist.append(Blocker(atom=blocker.atom,
7480 root=blocker.root, eapi=blocker.eapi,
7483 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7484 for node in myblocker_uninstalls.root_nodes():
7485 unsolvable_blockers.add(node)
7487 for blocker in unsolvable_blockers:
7488 retlist.append(blocker)
7490 # If any Uninstall tasks need to be executed in order
7491 # to avoid a conflict, complete the graph with any
7492 # dependencies that may have been initially
7493 # neglected (to ensure that unsafe Uninstall tasks
7494 # are properly identified and blocked from execution).
7495 if have_uninstall_task and \
7497 not unsolvable_blockers:
7498 self.myparams.add("complete")
7499 raise self._serialize_tasks_retry("")
7501 if unsolvable_blockers and \
7502 not self._accept_blocker_conflicts():
7503 self._unsatisfied_blockers_for_display = unsolvable_blockers
7504 self._serialized_tasks_cache = retlist[:]
7505 self._scheduler_graph = scheduler_graph
7506 raise self._unknown_internal_error()
7508 if self._slot_collision_info and \
7509 not self._accept_blocker_conflicts():
7510 self._serialized_tasks_cache = retlist[:]
7511 self._scheduler_graph = scheduler_graph
7512 raise self._unknown_internal_error()
7514 return retlist, scheduler_graph
7516 def _show_circular_deps(self, mygraph):
7517 # No leaf nodes are available, so we have a circular
7518 # dependency panic situation. Reduce the noise level to a
7519 # minimum via repeated elimination of root nodes since they
7520 # have no parents and thus can not be part of a cycle.
7522 root_nodes = mygraph.root_nodes(
7523 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7526 mygraph.difference_update(root_nodes)
7527 # Display the USE flags that are enabled on nodes that are part
7528 # of dependency cycles in case that helps the user decide to
7529 # disable some of them.
7531 tempgraph = mygraph.copy()
7532 while not tempgraph.empty():
7533 nodes = tempgraph.leaf_nodes()
7535 node = tempgraph.order[0]
7538 display_order.append(node)
7539 tempgraph.remove(node)
7540 display_order.reverse()
7541 self.myopts.pop("--quiet", None)
7542 self.myopts.pop("--verbose", None)
7543 self.myopts["--tree"] = True
7544 portage.writemsg("\n\n", noiselevel=-1)
7545 self.display(display_order)
7546 prefix = colorize("BAD", " * ")
7547 portage.writemsg("\n", noiselevel=-1)
7548 portage.writemsg(prefix + "Error: circular dependencies:\n",
7550 portage.writemsg("\n", noiselevel=-1)
7551 mygraph.debug_print()
7552 portage.writemsg("\n", noiselevel=-1)
7553 portage.writemsg(prefix + "Note that circular dependencies " + \
7554 "can often be avoided by temporarily\n", noiselevel=-1)
7555 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7556 "optional dependencies.\n", noiselevel=-1)
7558 def _show_merge_list(self):
7559 if self._serialized_tasks_cache is not None and \
7560 not (self._displayed_list and \
7561 (self._displayed_list == self._serialized_tasks_cache or \
7562 self._displayed_list == \
7563 list(reversed(self._serialized_tasks_cache)))):
7564 display_list = self._serialized_tasks_cache[:]
7565 if "--tree" in self.myopts:
7566 display_list.reverse()
7567 self.display(display_list)
7569 def _show_unsatisfied_blockers(self, blockers):
7570 self._show_merge_list()
7571 msg = "Error: The above package list contains " + \
7572 "packages which cannot be installed " + \
7573 "at the same time on the same system."
7574 prefix = colorize("BAD", " * ")
7575 from textwrap import wrap
7576 portage.writemsg("\n", noiselevel=-1)
7577 for line in wrap(msg, 70):
7578 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7580 # Display the conflicting packages along with the packages
7581 # that pulled them in. This is helpful for troubleshooting
7582 # cases in which blockers don't solve automatically and
7583 # the reasons are not apparent from the normal merge list
7587 for blocker in blockers:
7588 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7589 self._blocker_parents.parent_nodes(blocker)):
7590 parent_atoms = self._parent_atoms.get(pkg)
7591 if not parent_atoms:
7592 atom = self._blocked_world_pkgs.get(pkg)
7593 if atom is not None:
7594 parent_atoms = set([("@world", atom)])
7596 conflict_pkgs[pkg] = parent_atoms
7599 # Reduce noise by pruning packages that are only
7600 # pulled in by other conflict packages.
7602 for pkg, parent_atoms in conflict_pkgs.iteritems():
7603 relevant_parent = False
7604 for parent, atom in parent_atoms:
7605 if parent not in conflict_pkgs:
7606 relevant_parent = True
7608 if not relevant_parent:
7609 pruned_pkgs.add(pkg)
7610 for pkg in pruned_pkgs:
7611 del conflict_pkgs[pkg]
7617 # Max number of parents shown, to avoid flooding the display.
7619 for pkg, parent_atoms in conflict_pkgs.iteritems():
7623 # Prefer packages that are not directly involved in a conflict.
7624 for parent_atom in parent_atoms:
7625 if len(pruned_list) >= max_parents:
7627 parent, atom = parent_atom
7628 if parent not in conflict_pkgs:
7629 pruned_list.add(parent_atom)
7631 for parent_atom in parent_atoms:
7632 if len(pruned_list) >= max_parents:
7634 pruned_list.add(parent_atom)
7636 omitted_parents = len(parent_atoms) - len(pruned_list)
7637 msg.append(indent + "%s pulled in by\n" % pkg)
7639 for parent_atom in pruned_list:
7640 parent, atom = parent_atom
7641 msg.append(2*indent)
7642 if isinstance(parent,
7643 (PackageArg, AtomArg)):
7644 # For PackageArg and AtomArg types, it's
7645 # redundant to display the atom attribute.
7646 msg.append(str(parent))
7648 # Display the specific atom from SetArg or
7650 msg.append("%s required by %s" % (atom, parent))
7654 msg.append(2*indent)
7655 msg.append("(and %d more)\n" % omitted_parents)
7659 sys.stderr.write("".join(msg))
7662 if "--quiet" not in self.myopts:
7663 show_blocker_docs_link()
7665 def display(self, mylist, favorites=[], verbosity=None):
7667 # This is used to prevent display_problems() from
7668 # redundantly displaying this exact same merge list
7669 # again via _show_merge_list().
7670 self._displayed_list = mylist
7672 if verbosity is None:
7673 verbosity = ("--quiet" in self.myopts and 1 or \
7674 "--verbose" in self.myopts and 3 or 2)
7675 favorites_set = InternalPackageSet(favorites)
7676 oneshot = "--oneshot" in self.myopts or \
7677 "--onlydeps" in self.myopts
7678 columns = "--columns" in self.myopts
7683 counters = PackageCounters()
7685 if verbosity == 1 and "--verbose" not in self.myopts:
7686 def create_use_string(*args):
7689 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7691 is_new, reinst_flags,
7692 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7693 alphabetical=("--alphabetical" in self.myopts)):
7701 cur_iuse = set(cur_iuse)
7702 enabled_flags = cur_iuse.intersection(cur_use)
7703 removed_iuse = set(old_iuse).difference(cur_iuse)
7704 any_iuse = cur_iuse.union(old_iuse)
7705 any_iuse = list(any_iuse)
7707 for flag in any_iuse:
7710 reinst_flag = reinst_flags and flag in reinst_flags
7711 if flag in enabled_flags:
7713 if is_new or flag in old_use and \
7714 (all_flags or reinst_flag):
7715 flag_str = red(flag)
7716 elif flag not in old_iuse:
7717 flag_str = yellow(flag) + "%*"
7718 elif flag not in old_use:
7719 flag_str = green(flag) + "*"
7720 elif flag in removed_iuse:
7721 if all_flags or reinst_flag:
7722 flag_str = yellow("-" + flag) + "%"
7725 flag_str = "(" + flag_str + ")"
7726 removed.append(flag_str)
7729 if is_new or flag in old_iuse and \
7730 flag not in old_use and \
7731 (all_flags or reinst_flag):
7732 flag_str = blue("-" + flag)
7733 elif flag not in old_iuse:
7734 flag_str = yellow("-" + flag)
7735 if flag not in iuse_forced:
7737 elif flag in old_use:
7738 flag_str = green("-" + flag) + "*"
7740 if flag in iuse_forced:
7741 flag_str = "(" + flag_str + ")"
7743 enabled.append(flag_str)
7745 disabled.append(flag_str)
7748 ret = " ".join(enabled)
7750 ret = " ".join(enabled + disabled + removed)
7752 ret = '%s="%s" ' % (name, ret)
7755 repo_display = RepoDisplay(self.roots)
7759 mygraph = self.digraph.copy()
7761 # If there are any Uninstall instances, add the corresponding
7762 # blockers to the digraph (useful for --tree display).
7764 executed_uninstalls = set(node for node in mylist \
7765 if isinstance(node, Package) and node.operation == "unmerge")
7767 for uninstall in self._blocker_uninstalls.leaf_nodes():
7768 uninstall_parents = \
7769 self._blocker_uninstalls.parent_nodes(uninstall)
7770 if not uninstall_parents:
7773 # Remove the corresponding "nomerge" node and substitute
7774 # the Uninstall node.
7775 inst_pkg = self._pkg_cache[
7776 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7778 mygraph.remove(inst_pkg)
7783 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7785 inst_pkg_blockers = []
7787 # Break the Package -> Uninstall edges.
7788 mygraph.remove(uninstall)
7790 # Resolution of a package's blockers
7791 # depend on it's own uninstallation.
7792 for blocker in inst_pkg_blockers:
7793 mygraph.add(uninstall, blocker)
7795 # Expand Package -> Uninstall edges into
7796 # Package -> Blocker -> Uninstall edges.
7797 for blocker in uninstall_parents:
7798 mygraph.add(uninstall, blocker)
7799 for parent in self._blocker_parents.parent_nodes(blocker):
7800 if parent != inst_pkg:
7801 mygraph.add(blocker, parent)
7803 # If the uninstall task did not need to be executed because
7804 # of an upgrade, display Blocker -> Upgrade edges since the
7805 # corresponding Blocker -> Uninstall edges will not be shown.
7807 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7808 if upgrade_node is not None and \
7809 uninstall not in executed_uninstalls:
7810 for blocker in uninstall_parents:
7811 mygraph.add(upgrade_node, blocker)
7813 unsatisfied_blockers = []
7818 if isinstance(x, Blocker) and not x.satisfied:
7819 unsatisfied_blockers.append(x)
7822 if "--tree" in self.myopts:
7823 depth = len(tree_nodes)
7824 while depth and graph_key not in \
7825 mygraph.child_nodes(tree_nodes[depth-1]):
7828 tree_nodes = tree_nodes[:depth]
7829 tree_nodes.append(graph_key)
7830 display_list.append((x, depth, True))
7831 shown_edges.add((graph_key, tree_nodes[depth-1]))
7833 traversed_nodes = set() # prevent endless circles
7834 traversed_nodes.add(graph_key)
7835 def add_parents(current_node, ordered):
7837 # Do not traverse to parents if this node is an
7838 # an argument or a direct member of a set that has
7839 # been specified as an argument (system or world).
7840 if current_node not in self._set_nodes:
7841 parent_nodes = mygraph.parent_nodes(current_node)
7843 child_nodes = set(mygraph.child_nodes(current_node))
7844 selected_parent = None
7845 # First, try to avoid a direct cycle.
7846 for node in parent_nodes:
7847 if not isinstance(node, (Blocker, Package)):
7849 if node not in traversed_nodes and \
7850 node not in child_nodes:
7851 edge = (current_node, node)
7852 if edge in shown_edges:
7854 selected_parent = node
7856 if not selected_parent:
7857 # A direct cycle is unavoidable.
7858 for node in parent_nodes:
7859 if not isinstance(node, (Blocker, Package)):
7861 if node not in traversed_nodes:
7862 edge = (current_node, node)
7863 if edge in shown_edges:
7865 selected_parent = node
7868 shown_edges.add((current_node, selected_parent))
7869 traversed_nodes.add(selected_parent)
7870 add_parents(selected_parent, False)
7871 display_list.append((current_node,
7872 len(tree_nodes), ordered))
7873 tree_nodes.append(current_node)
7875 add_parents(graph_key, True)
7877 display_list.append((x, depth, True))
7878 mylist = display_list
7879 for x in unsatisfied_blockers:
7880 mylist.append((x, 0, True))
7882 last_merge_depth = 0
7883 for i in xrange(len(mylist)-1,-1,-1):
7884 graph_key, depth, ordered = mylist[i]
7885 if not ordered and depth == 0 and i > 0 \
7886 and graph_key == mylist[i-1][0] and \
7887 mylist[i-1][1] == 0:
7888 # An ordered node got a consecutive duplicate when the tree was
7892 if ordered and graph_key[-1] != "nomerge":
7893 last_merge_depth = depth
7895 if depth >= last_merge_depth or \
7896 i < len(mylist) - 1 and \
7897 depth >= mylist[i+1][1]:
7900 from portage import flatten
7901 from portage.dep import use_reduce, paren_reduce
7902 # files to fetch list - avoids counting a same file twice
7903 # in size display (verbose mode)
7906 # Use this set to detect when all the "repoadd" strings are "[0]"
7907 # and disable the entire repo display in this case.
7910 for mylist_index in xrange(len(mylist)):
7911 x, depth, ordered = mylist[mylist_index]
7915 portdb = self.trees[myroot]["porttree"].dbapi
7916 bindb = self.trees[myroot]["bintree"].dbapi
7917 vardb = self.trees[myroot]["vartree"].dbapi
7918 vartree = self.trees[myroot]["vartree"]
7919 pkgsettings = self.pkgsettings[myroot]
7922 indent = " " * depth
7924 if isinstance(x, Blocker):
7926 blocker_style = "PKG_BLOCKER_SATISFIED"
7927 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7929 blocker_style = "PKG_BLOCKER"
7930 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7932 counters.blocks += 1
7934 counters.blocks_satisfied += 1
7935 resolved = portage.key_expand(
7936 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7937 if "--columns" in self.myopts and "--quiet" in self.myopts:
7938 addl += " " + colorize(blocker_style, resolved)
7940 addl = "[%s %s] %s%s" % \
7941 (colorize(blocker_style, "blocks"),
7942 addl, indent, colorize(blocker_style, resolved))
7943 block_parents = self._blocker_parents.parent_nodes(x)
7944 block_parents = set([pnode[2] for pnode in block_parents])
7945 block_parents = ", ".join(block_parents)
7947 addl += colorize(blocker_style,
7948 " (\"%s\" is blocking %s)") % \
7949 (str(x.atom).lstrip("!"), block_parents)
7951 addl += colorize(blocker_style,
7952 " (is blocking %s)") % block_parents
7953 if isinstance(x, Blocker) and x.satisfied:
7958 blockers.append(addl)
7961 pkg_merge = ordered and pkg_status == "merge"
7962 if not pkg_merge and pkg_status == "merge":
7963 pkg_status = "nomerge"
7964 built = pkg_type != "ebuild"
7965 installed = pkg_type == "installed"
7967 metadata = pkg.metadata
7969 repo_name = metadata["repository"]
7970 if pkg_type == "ebuild":
7971 ebuild_path = portdb.findname(pkg_key)
7972 if not ebuild_path: # shouldn't happen
7973 raise portage.exception.PackageNotFound(pkg_key)
7974 repo_path_real = os.path.dirname(os.path.dirname(
7975 os.path.dirname(ebuild_path)))
7977 repo_path_real = portdb.getRepositoryPath(repo_name)
7978 pkg_use = list(pkg.use.enabled)
7980 restrict = flatten(use_reduce(paren_reduce(
7981 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7982 except portage.exception.InvalidDependString, e:
7983 if not pkg.installed:
7984 show_invalid_depstring_notice(x,
7985 pkg.metadata["RESTRICT"], str(e))
7989 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7990 "fetch" in restrict:
7993 counters.restrict_fetch += 1
7994 if portdb.fetch_check(pkg_key, pkg_use):
7997 counters.restrict_fetch_satisfied += 1
7999 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8000 #param is used for -u, where you still *do* want to see when something is being upgraded.
8003 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8004 if vardb.cpv_exists(pkg_key):
8005 addl=" "+yellow("R")+fetch+" "
8008 counters.reinst += 1
8009 elif pkg_status == "uninstall":
8010 counters.uninst += 1
8011 # filter out old-style virtual matches
8012 elif installed_versions and \
8013 portage.cpv_getkey(installed_versions[0]) == \
8014 portage.cpv_getkey(pkg_key):
8015 myinslotlist = vardb.match(pkg.slot_atom)
8016 # If this is the first install of a new-style virtual, we
8017 # need to filter out old-style virtual matches.
8018 if myinslotlist and \
8019 portage.cpv_getkey(myinslotlist[0]) != \
8020 portage.cpv_getkey(pkg_key):
8023 myoldbest = myinslotlist[:]
8025 if not portage.dep.cpvequal(pkg_key,
8026 portage.best([pkg_key] + myoldbest)):
8028 addl += turquoise("U")+blue("D")
8030 counters.downgrades += 1
8033 addl += turquoise("U") + " "
8035 counters.upgrades += 1
8037 # New slot, mark it new.
8038 addl = " " + green("NS") + fetch + " "
8039 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8041 counters.newslot += 1
8043 if "--changelog" in self.myopts:
8044 inst_matches = vardb.match(pkg.slot_atom)
8046 changelogs.extend(self.calc_changelog(
8047 portdb.findname(pkg_key),
8048 inst_matches[0], pkg_key))
8050 addl = " " + green("N") + " " + fetch + " "
8059 forced_flags = set()
8060 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8061 forced_flags.update(pkgsettings.useforce)
8062 forced_flags.update(pkgsettings.usemask)
8064 cur_use = [flag for flag in pkg.use.enabled \
8065 if flag in pkg.iuse.all]
8066 cur_iuse = sorted(pkg.iuse.all)
8068 if myoldbest and myinslotlist:
8069 previous_cpv = myoldbest[0]
8071 previous_cpv = pkg.cpv
8072 if vardb.cpv_exists(previous_cpv):
8073 old_iuse, old_use = vardb.aux_get(
8074 previous_cpv, ["IUSE", "USE"])
8075 old_iuse = list(set(
8076 filter_iuse_defaults(old_iuse.split())))
8078 old_use = old_use.split()
8085 old_use = [flag for flag in old_use if flag in old_iuse]
8087 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8089 use_expand.reverse()
8090 use_expand_hidden = \
8091 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8093 def map_to_use_expand(myvals, forcedFlags=False,
8097 for exp in use_expand:
8100 for val in myvals[:]:
8101 if val.startswith(exp.lower()+"_"):
8102 if val in forced_flags:
8103 forced[exp].add(val[len(exp)+1:])
8104 ret[exp].append(val[len(exp)+1:])
8107 forced["USE"] = [val for val in myvals \
8108 if val in forced_flags]
8110 for exp in use_expand_hidden:
8116 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8117 # are the only thing that triggered reinstallation.
8118 reinst_flags_map = {}
8119 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8120 reinst_expand_map = None
8121 if reinstall_for_flags:
8122 reinst_flags_map = map_to_use_expand(
8123 list(reinstall_for_flags), removeHidden=False)
8124 for k in list(reinst_flags_map):
8125 if not reinst_flags_map[k]:
8126 del reinst_flags_map[k]
8127 if not reinst_flags_map.get("USE"):
8128 reinst_expand_map = reinst_flags_map.copy()
8129 reinst_expand_map.pop("USE", None)
8130 if reinst_expand_map and \
8131 not set(reinst_expand_map).difference(
8133 use_expand_hidden = \
8134 set(use_expand_hidden).difference(
8137 cur_iuse_map, iuse_forced = \
8138 map_to_use_expand(cur_iuse, forcedFlags=True)
8139 cur_use_map = map_to_use_expand(cur_use)
8140 old_iuse_map = map_to_use_expand(old_iuse)
8141 old_use_map = map_to_use_expand(old_use)
8144 use_expand.insert(0, "USE")
8146 for key in use_expand:
8147 if key in use_expand_hidden:
8149 verboseadd += create_use_string(key.upper(),
8150 cur_iuse_map[key], iuse_forced[key],
8151 cur_use_map[key], old_iuse_map[key],
8152 old_use_map[key], is_new,
8153 reinst_flags_map.get(key))
8158 if pkg_type == "ebuild" and pkg_merge:
8160 myfilesdict = portdb.getfetchsizes(pkg_key,
8161 useflags=pkg_use, debug=self.edebug)
8162 except portage.exception.InvalidDependString, e:
8163 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8164 show_invalid_depstring_notice(x, src_uri, str(e))
8167 if myfilesdict is None:
8168 myfilesdict="[empty/missing/bad digest]"
8170 for myfetchfile in myfilesdict:
8171 if myfetchfile not in myfetchlist:
8172 mysize+=myfilesdict[myfetchfile]
8173 myfetchlist.append(myfetchfile)
8175 counters.totalsize += mysize
8176 verboseadd += format_size(mysize)
8179 # assign index for a previous version in the same slot
8180 has_previous = False
8181 repo_name_prev = None
8182 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8184 slot_matches = vardb.match(slot_atom)
8187 repo_name_prev = vardb.aux_get(slot_matches[0],
8190 # now use the data to generate output
8191 if pkg.installed or not has_previous:
8192 repoadd = repo_display.repoStr(repo_path_real)
8194 repo_path_prev = None
8196 repo_path_prev = portdb.getRepositoryPath(
8198 if repo_path_prev == repo_path_real:
8199 repoadd = repo_display.repoStr(repo_path_real)
8201 repoadd = "%s=>%s" % (
8202 repo_display.repoStr(repo_path_prev),
8203 repo_display.repoStr(repo_path_real))
8205 repoadd_set.add(repoadd)
8207 xs = [portage.cpv_getkey(pkg_key)] + \
8208 list(portage.catpkgsplit(pkg_key)[2:])
8215 if "COLUMNWIDTH" in self.settings:
8217 mywidth = int(self.settings["COLUMNWIDTH"])
8218 except ValueError, e:
8219 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8221 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8222 self.settings["COLUMNWIDTH"], noiselevel=-1)
8224 oldlp = mywidth - 30
8227 # Convert myoldbest from a list to a string.
8231 for pos, key in enumerate(myoldbest):
8232 key = portage.catpkgsplit(key)[2] + \
8233 "-" + portage.catpkgsplit(key)[3]
8234 if key[-3:] == "-r0":
8236 myoldbest[pos] = key
8237 myoldbest = blue("["+", ".join(myoldbest)+"]")
8240 root_config = self.roots[myroot]
8241 system_set = root_config.sets["system"]
8242 world_set = root_config.sets["world"]
8247 pkg_system = system_set.findAtomForPackage(pkg)
8248 pkg_world = world_set.findAtomForPackage(pkg)
8249 if not (oneshot or pkg_world) and \
8250 myroot == self.target_root and \
8251 favorites_set.findAtomForPackage(pkg):
8252 # Maybe it will be added to world now.
8253 if create_world_atom(pkg, favorites_set, root_config):
8255 except portage.exception.InvalidDependString:
8256 # This is reported elsewhere if relevant.
8259 def pkgprint(pkg_str):
8262 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8264 return colorize("PKG_MERGE_WORLD", pkg_str)
8266 return colorize("PKG_MERGE", pkg_str)
8267 elif pkg_status == "uninstall":
8268 return colorize("PKG_UNINSTALL", pkg_str)
8271 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8273 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8275 return colorize("PKG_NOMERGE", pkg_str)
8278 properties = flatten(use_reduce(paren_reduce(
8279 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8280 except portage.exception.InvalidDependString, e:
8281 if not pkg.installed:
8282 show_invalid_depstring_notice(pkg,
8283 pkg.metadata["PROPERTIES"], str(e))
8287 interactive = "interactive" in properties
8288 if interactive and pkg.operation == "merge":
8289 addl = colorize("WARN", "I") + addl[1:]
8291 counters.interactive += 1
8296 if "--columns" in self.myopts:
8297 if "--quiet" in self.myopts:
8298 myprint=addl+" "+indent+pkgprint(pkg_cp)
8299 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8300 myprint=myprint+myoldbest
8301 myprint=myprint+darkgreen("to "+x[1])
8305 myprint = "[%s] %s%s" % \
8306 (pkgprint(pkg_status.ljust(13)),
8307 indent, pkgprint(pkg.cp))
8309 myprint = "[%s %s] %s%s" % \
8310 (pkgprint(pkg.type_name), addl,
8311 indent, pkgprint(pkg.cp))
8312 if (newlp-nc_len(myprint)) > 0:
8313 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8314 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8315 if (oldlp-nc_len(myprint)) > 0:
8316 myprint=myprint+" "*(oldlp-nc_len(myprint))
8317 myprint=myprint+myoldbest
8318 myprint += darkgreen("to " + pkg.root)
8321 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8323 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8324 myprint += indent + pkgprint(pkg_key) + " " + \
8325 myoldbest + darkgreen("to " + myroot)
8327 if "--columns" in self.myopts:
8328 if "--quiet" in self.myopts:
8329 myprint=addl+" "+indent+pkgprint(pkg_cp)
8330 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8331 myprint=myprint+myoldbest
8335 myprint = "[%s] %s%s" % \
8336 (pkgprint(pkg_status.ljust(13)),
8337 indent, pkgprint(pkg.cp))
8339 myprint = "[%s %s] %s%s" % \
8340 (pkgprint(pkg.type_name), addl,
8341 indent, pkgprint(pkg.cp))
8342 if (newlp-nc_len(myprint)) > 0:
8343 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8344 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8345 if (oldlp-nc_len(myprint)) > 0:
8346 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8347 myprint += myoldbest
8350 myprint = "[%s] %s%s %s" % \
8351 (pkgprint(pkg_status.ljust(13)),
8352 indent, pkgprint(pkg.cpv),
8355 myprint = "[%s %s] %s%s %s" % \
8356 (pkgprint(pkg_type), addl, indent,
8357 pkgprint(pkg.cpv), myoldbest)
8359 if columns and pkg.operation == "uninstall":
8361 p.append((myprint, verboseadd, repoadd))
8363 if "--tree" not in self.myopts and \
8364 "--quiet" not in self.myopts and \
8365 not self._opts_no_restart.intersection(self.myopts) and \
8366 pkg.root == self._running_root.root and \
8367 portage.match_from_list(
8368 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8369 not vardb.cpv_exists(pkg.cpv) and \
8370 "--quiet" not in self.myopts:
8371 if mylist_index < len(mylist) - 1:
8372 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8373 p.append(colorize("WARN", " then resume the merge."))
8376 show_repos = repoadd_set and repoadd_set != set(["0"])
8379 if isinstance(x, basestring):
8380 out.write("%s\n" % (x,))
8383 myprint, verboseadd, repoadd = x
8386 myprint += " " + verboseadd
8388 if show_repos and repoadd:
8389 myprint += " " + teal("[%s]" % repoadd)
8391 out.write("%s\n" % (myprint,))
8400 sys.stdout.write(str(repo_display))
8402 if "--changelog" in self.myopts:
8404 for revision,text in changelogs:
8405 print bold('*'+revision)
8406 sys.stdout.write(text)
8411 def display_problems(self):
8413 Display problems with the dependency graph such as slot collisions.
8414 This is called internally by display() to show the problems _after_
8415 the merge list where it is most likely to be seen, but if display()
8416 is not going to be called then this method should be called explicitly
8417 to ensure that the user is notified of problems with the graph.
8419 All output goes to stderr, except for unsatisfied dependencies which
8420 go to stdout for parsing by programs such as autounmask.
8423 # Note that show_masked_packages() sends it's output to
8424 # stdout, and some programs such as autounmask parse the
8425 # output in cases when emerge bails out. However, when
8426 # show_masked_packages() is called for installed packages
8427 # here, the message is a warning that is more appropriate
8428 # to send to stderr, so temporarily redirect stdout to
8429 # stderr. TODO: Fix output code so there's a cleaner way
8430 # to redirect everything to stderr.
8435 sys.stdout = sys.stderr
8436 self._display_problems()
8442 # This goes to stdout for parsing by programs like autounmask.
8443 for pargs, kwargs in self._unsatisfied_deps_for_display:
8444 self._show_unsatisfied_dep(*pargs, **kwargs)
8446 def _display_problems(self):
8447 if self._circular_deps_for_display is not None:
8448 self._show_circular_deps(
8449 self._circular_deps_for_display)
8451 # The user is only notified of a slot conflict if
8452 # there are no unresolvable blocker conflicts.
8453 if self._unsatisfied_blockers_for_display is not None:
8454 self._show_unsatisfied_blockers(
8455 self._unsatisfied_blockers_for_display)
8457 self._show_slot_collision_notice()
8459 # TODO: Add generic support for "set problem" handlers so that
8460 # the below warnings aren't special cases for world only.
8462 if self._missing_args:
8463 world_problems = False
8464 if "world" in self._sets:
8465 # Filter out indirect members of world (from nested sets)
8466 # since only direct members of world are desired here.
8467 world_set = self.roots[self.target_root].sets["world"]
8468 for arg, atom in self._missing_args:
8469 if arg.name == "world" and atom in world_set:
8470 world_problems = True
8474 sys.stderr.write("\n!!! Problems have been " + \
8475 "detected with your world file\n")
8476 sys.stderr.write("!!! Please run " + \
8477 green("emaint --check world")+"\n\n")
8479 if self._missing_args:
8480 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8481 " Ebuilds for the following packages are either all\n")
8482 sys.stderr.write(colorize("BAD", "!!!") + \
8483 " masked or don't exist:\n")
8484 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8485 self._missing_args) + "\n")
8487 if self._pprovided_args:
8489 for arg, atom in self._pprovided_args:
8490 if isinstance(arg, SetArg):
8492 arg_atom = (atom, atom)
8495 arg_atom = (arg.arg, atom)
8496 refs = arg_refs.setdefault(arg_atom, [])
8497 if parent not in refs:
8500 msg.append(bad("\nWARNING: "))
8501 if len(self._pprovided_args) > 1:
8502 msg.append("Requested packages will not be " + \
8503 "merged because they are listed in\n")
8505 msg.append("A requested package will not be " + \
8506 "merged because it is listed in\n")
8507 msg.append("package.provided:\n\n")
8508 problems_sets = set()
8509 for (arg, atom), refs in arg_refs.iteritems():
8512 problems_sets.update(refs)
8514 ref_string = ", ".join(["'%s'" % name for name in refs])
8515 ref_string = " pulled in by " + ref_string
8516 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8518 if "world" in problems_sets:
8519 msg.append("This problem can be solved in one of the following ways:\n\n")
8520 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8521 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8522 msg.append(" C) Remove offending entries from package.provided.\n\n")
8523 msg.append("The best course of action depends on the reason that an offending\n")
8524 msg.append("package.provided entry exists.\n\n")
8525 sys.stderr.write("".join(msg))
8527 masked_packages = []
8528 for pkg in self._masked_installed:
8529 root_config = pkg.root_config
8530 pkgsettings = self.pkgsettings[pkg.root]
8531 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8532 masked_packages.append((root_config, pkgsettings,
8533 pkg.cpv, pkg.metadata, mreasons))
8535 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8536 " The following installed packages are masked:\n")
8537 show_masked_packages(masked_packages)
8541 def calc_changelog(self,ebuildpath,current,next):
8542 if ebuildpath == None or not os.path.exists(ebuildpath):
8544 current = '-'.join(portage.catpkgsplit(current)[1:])
8545 if current.endswith('-r0'):
8546 current = current[:-3]
8547 next = '-'.join(portage.catpkgsplit(next)[1:])
8548 if next.endswith('-r0'):
8550 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8552 changelog = open(changelogpath).read()
8553 except SystemExit, e:
8554 raise # Needed else can't exit
8557 divisions = self.find_changelog_tags(changelog)
8558 #print 'XX from',current,'to',next
8559 #for div,text in divisions: print 'XX',div
8560 # skip entries for all revisions above the one we are about to emerge
8561 for i in range(len(divisions)):
8562 if divisions[i][0]==next:
8563 divisions = divisions[i:]
8565 # find out how many entries we are going to display
8566 for i in range(len(divisions)):
8567 if divisions[i][0]==current:
8568 divisions = divisions[:i]
8571 # couldnt find the current revision in the list. display nothing
8575 def find_changelog_tags(self,changelog):
8579 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8581 if release is not None:
8582 divs.append((release,changelog))
8584 if release is not None:
8585 divs.append((release,changelog[:match.start()]))
8586 changelog = changelog[match.end():]
8587 release = match.group(1)
8588 if release.endswith('.ebuild'):
8589 release = release[:-7]
8590 if release.endswith('-r0'):
8591 release = release[:-3]
8593 def saveNomergeFavorites(self):
8594 """Find atoms in favorites that are not in the mergelist and add them
8595 to the world file if necessary."""
8596 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8597 "--oneshot", "--onlydeps", "--pretend"):
8598 if x in self.myopts:
8600 root_config = self.roots[self.target_root]
8601 world_set = root_config.sets["world"]
8603 world_locked = False
8604 if hasattr(world_set, "lock"):
8608 if hasattr(world_set, "load"):
8609 world_set.load() # maybe it's changed on disk
8611 args_set = self._sets["args"]
8612 portdb = self.trees[self.target_root]["porttree"].dbapi
8613 added_favorites = set()
8614 for x in self._set_nodes:
8615 pkg_type, root, pkg_key, pkg_status = x
8616 if pkg_status != "nomerge":
8620 myfavkey = create_world_atom(x, args_set, root_config)
8622 if myfavkey in added_favorites:
8624 added_favorites.add(myfavkey)
8625 except portage.exception.InvalidDependString, e:
8626 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8627 (pkg_key, str(e)), noiselevel=-1)
8628 writemsg("!!! see '%s'\n\n" % os.path.join(
8629 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8632 for k in self._sets:
8633 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8638 all_added.append(SETPREFIX + k)
8639 all_added.extend(added_favorites)
8642 print ">>> Recording %s in \"world\" favorites file..." % \
8643 colorize("INFORM", str(a))
8645 world_set.update(all_added)
8650 def loadResumeCommand(self, resume_data, skip_masked=False):
8652 Add a resume command to the graph and validate it in the process. This
8653 will raise a PackageNotFound exception if a package is not available.
8656 if not isinstance(resume_data, dict):
8659 mergelist = resume_data.get("mergelist")
8660 if not isinstance(mergelist, list):
8663 fakedb = self.mydbapi
8665 serialized_tasks = []
8668 if not (isinstance(x, list) and len(x) == 4):
8670 pkg_type, myroot, pkg_key, action = x
8671 if pkg_type not in self.pkg_tree_map:
8673 if action != "merge":
8675 tree_type = self.pkg_tree_map[pkg_type]
8676 mydb = trees[myroot][tree_type].dbapi
8677 db_keys = list(self._trees_orig[myroot][
8678 tree_type].dbapi._aux_cache_keys)
8680 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8682 # It does no exist or it is corrupt.
8683 if action == "uninstall":
8685 raise portage.exception.PackageNotFound(pkg_key)
8686 installed = action == "uninstall"
8687 built = pkg_type != "ebuild"
8688 root_config = self.roots[myroot]
8689 pkg = Package(built=built, cpv=pkg_key,
8690 installed=installed, metadata=metadata,
8691 operation=action, root_config=root_config,
8693 if pkg_type == "ebuild":
8694 pkgsettings = self.pkgsettings[myroot]
8695 pkgsettings.setcpv(pkg)
8696 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8697 self._pkg_cache[pkg] = pkg
8699 root_config = self.roots[pkg.root]
8700 if "merge" == pkg.operation and \
8701 not visible(root_config.settings, pkg):
8703 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8705 self._unsatisfied_deps_for_display.append(
8706 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8708 fakedb[myroot].cpv_inject(pkg)
8709 serialized_tasks.append(pkg)
8710 self.spinner.update()
8712 if self._unsatisfied_deps_for_display:
8715 if not serialized_tasks or "--nodeps" in self.myopts:
8716 self._serialized_tasks_cache = serialized_tasks
8717 self._scheduler_graph = self.digraph
8719 self._select_package = self._select_pkg_from_graph
8720 self.myparams.add("selective")
8721 # Always traverse deep dependencies in order to account for
8722 # potentially unsatisfied dependencies of installed packages.
8723 # This is necessary for correct --keep-going or --resume operation
8724 # in case a package from a group of circularly dependent packages
8725 # fails. In this case, a package which has recently been installed
8726 # may have an unsatisfied circular dependency (pulled in by
8727 # PDEPEND, for example). So, even though a package is already
8728 # installed, it may not have all of it's dependencies satisfied, so
8729 # it may not be usable. If such a package is in the subgraph of
8730 # deep depenedencies of a scheduled build, that build needs to
8731 # be cancelled. In order for this type of situation to be
8732 # recognized, deep traversal of dependencies is required.
8733 self.myparams.add("deep")
8735 favorites = resume_data.get("favorites")
8736 args_set = self._sets["args"]
8737 if isinstance(favorites, list):
8738 args = self._load_favorites(favorites)
8742 for task in serialized_tasks:
8743 if isinstance(task, Package) and \
8744 task.operation == "merge":
8745 if not self._add_pkg(task, None):
8748 # Packages for argument atoms need to be explicitly
8749 # added via _add_pkg() so that they are included in the
8750 # digraph (needed at least for --tree display).
8752 for atom in arg.set:
8753 pkg, existing_node = self._select_package(
8754 arg.root_config.root, atom)
8755 if existing_node is None and \
8757 if not self._add_pkg(pkg, Dependency(atom=atom,
8758 root=pkg.root, parent=arg)):
8761 # Allow unsatisfied deps here to avoid showing a masking
8762 # message for an unsatisfied dep that isn't necessarily
8764 if not self._create_graph(allow_unsatisfied=True):
8767 unsatisfied_deps = []
8768 for dep in self._unsatisfied_deps:
8769 if not isinstance(dep.parent, Package):
8771 if dep.parent.operation == "merge":
8772 unsatisfied_deps.append(dep)
8775 # For unsatisfied deps of installed packages, only account for
8776 # them if they are in the subgraph of dependencies of a package
8777 # which is scheduled to be installed.
8778 unsatisfied_install = False
8780 dep_stack = self.digraph.parent_nodes(dep.parent)
8782 node = dep_stack.pop()
8783 if not isinstance(node, Package):
8785 if node.operation == "merge":
8786 unsatisfied_install = True
8788 if node in traversed:
8791 dep_stack.extend(self.digraph.parent_nodes(node))
8793 if unsatisfied_install:
8794 unsatisfied_deps.append(dep)
8796 if masked_tasks or unsatisfied_deps:
8797 # This probably means that a required package
8798 # was dropped via --skipfirst. It makes the
8799 # resume list invalid, so convert it to a
8800 # UnsatisfiedResumeDep exception.
8801 raise self.UnsatisfiedResumeDep(self,
8802 masked_tasks + unsatisfied_deps)
8803 self._serialized_tasks_cache = None
8806 except self._unknown_internal_error:
8811 def _load_favorites(self, favorites):
8813 Use a list of favorites to resume state from a
8814 previous select_files() call. This creates similar
8815 DependencyArg instances to those that would have
8816 been created by the original select_files() call.
8817 This allows Package instances to be matched with
8818 DependencyArg instances during graph creation.
8820 root_config = self.roots[self.target_root]
8821 getSetAtoms = root_config.setconfig.getSetAtoms
8822 sets = root_config.sets
8825 if not isinstance(x, basestring):
8827 if x in ("system", "world"):
8829 if x.startswith(SETPREFIX):
8830 s = x[len(SETPREFIX):]
8835 # Recursively expand sets so that containment tests in
8836 # self._get_parent_sets() properly match atoms in nested
8837 # sets (like if world contains system).
8838 expanded_set = InternalPackageSet(
8839 initial_atoms=getSetAtoms(s))
8840 self._sets[s] = expanded_set
8841 args.append(SetArg(arg=x, set=expanded_set,
8842 root_config=root_config))
8844 if not portage.isvalidatom(x):
8846 args.append(AtomArg(arg=x, atom=x,
8847 root_config=root_config))
8849 self._set_args(args)
8852 class UnsatisfiedResumeDep(portage.exception.PortageException):
8854 A dependency of a resume list is not installed. This
8855 can occur when a required package is dropped from the
8856 merge list via --skipfirst.
8858 def __init__(self, depgraph, value):
8859 portage.exception.PortageException.__init__(self, value)
8860 self.depgraph = depgraph
8862 class _internal_exception(portage.exception.PortageException):
8863 def __init__(self, value=""):
8864 portage.exception.PortageException.__init__(self, value)
8866 class _unknown_internal_error(_internal_exception):
8868 Used by the depgraph internally to terminate graph creation.
8869 The specific reason for the failure should have been dumped
8870 to stderr, unfortunately, the exact reason for the failure
8874 class _serialize_tasks_retry(_internal_exception):
8876 This is raised by the _serialize_tasks() method when it needs to
8877 be called again for some reason. The only case that it's currently
8878 used for is when neglected dependencies need to be added to the
8879 graph in order to avoid making a potentially unsafe decision.
8882 class _dep_check_composite_db(portage.dbapi):
8884 A dbapi-like interface that is optimized for use in dep_check() calls.
8885 This is built on top of the existing depgraph package selection logic.
8886 Some packages that have been added to the graph may be masked from this
8887 view in order to influence the atom preference selection that occurs
8890 def __init__(self, depgraph, root):
8891 portage.dbapi.__init__(self)
8892 self._depgraph = depgraph
8894 self._match_cache = {}
8895 self._cpv_pkg_map = {}
8897 def _clear_cache(self):
8898 self._match_cache.clear()
8899 self._cpv_pkg_map.clear()
8901 def match(self, atom):
8902 ret = self._match_cache.get(atom)
8907 atom = self._dep_expand(atom)
8908 pkg, existing = self._depgraph._select_package(self._root, atom)
8912 # Return the highest available from select_package() as well as
8913 # any matching slots in the graph db.
8915 slots.add(pkg.metadata["SLOT"])
8916 atom_cp = portage.dep_getkey(atom)
8917 if pkg.cp.startswith("virtual/"):
8918 # For new-style virtual lookahead that occurs inside
8919 # dep_check(), examine all slots. This is needed
8920 # so that newer slots will not unnecessarily be pulled in
8921 # when a satisfying lower slot is already installed. For
8922 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8923 # there's no need to pull in a newer slot to satisfy a
8924 # virtual/jdk dependency.
8925 for db, pkg_type, built, installed, db_keys in \
8926 self._depgraph._filtered_trees[self._root]["dbs"]:
8927 for cpv in db.match(atom):
8928 if portage.cpv_getkey(cpv) != pkg.cp:
8930 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8932 if self._visible(pkg):
8933 self._cpv_pkg_map[pkg.cpv] = pkg
8935 slots.remove(pkg.metadata["SLOT"])
8937 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8938 pkg, existing = self._depgraph._select_package(
8939 self._root, slot_atom)
8942 if not self._visible(pkg):
8944 self._cpv_pkg_map[pkg.cpv] = pkg
8947 self._cpv_sort_ascending(ret)
8948 self._match_cache[orig_atom] = ret
8951 def _visible(self, pkg):
8952 if pkg.installed and "selective" not in self._depgraph.myparams:
8954 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8955 except (StopIteration, portage.exception.InvalidDependString):
8962 self._depgraph.pkgsettings[pkg.root], pkg):
8964 except portage.exception.InvalidDependString:
8966 in_graph = self._depgraph._slot_pkg_map[
8967 self._root].get(pkg.slot_atom)
8968 if in_graph is None:
8969 # Mask choices for packages which are not the highest visible
8970 # version within their slot (since they usually trigger slot
8972 highest_visible, in_graph = self._depgraph._select_package(
8973 self._root, pkg.slot_atom)
8974 if pkg != highest_visible:
8976 elif in_graph != pkg:
8977 # Mask choices for packages that would trigger a slot
8978 # conflict with a previously selected package.
8982 def _dep_expand(self, atom):
8984 This is only needed for old installed packages that may
8985 contain atoms that are not fully qualified with a specific
8986 category. Emulate the cpv_expand() function that's used by
8987 dbapi.match() in cases like this. If there are multiple
8988 matches, it's often due to a new-style virtual that has
8989 been added, so try to filter those out to avoid raising
8992 root_config = self._depgraph.roots[self._root]
8994 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8995 if len(expanded_atoms) > 1:
8996 non_virtual_atoms = []
8997 for x in expanded_atoms:
8998 if not portage.dep_getkey(x).startswith("virtual/"):
8999 non_virtual_atoms.append(x)
9000 if len(non_virtual_atoms) == 1:
9001 expanded_atoms = non_virtual_atoms
9002 if len(expanded_atoms) > 1:
9003 # compatible with portage.cpv_expand()
9004 raise portage.exception.AmbiguousPackageName(
9005 [portage.dep_getkey(x) for x in expanded_atoms])
9007 atom = expanded_atoms[0]
9009 null_atom = insert_category_into_atom(atom, "null")
9010 null_cp = portage.dep_getkey(null_atom)
9011 cat, atom_pn = portage.catsplit(null_cp)
9012 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9014 # Allow the resolver to choose which virtual.
9015 atom = insert_category_into_atom(atom, "virtual")
9017 atom = insert_category_into_atom(atom, "null")
9020 def aux_get(self, cpv, wants):
9021 metadata = self._cpv_pkg_map[cpv].metadata
9022 return [metadata.get(x, "") for x in wants]
9024 class RepoDisplay(object):
9025 def __init__(self, roots):
9026 self._shown_repos = {}
9027 self._unknown_repo = False
9029 for root_config in roots.itervalues():
9030 portdir = root_config.settings.get("PORTDIR")
9032 repo_paths.add(portdir)
9033 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9035 repo_paths.update(overlays.split())
9036 repo_paths = list(repo_paths)
9037 self._repo_paths = repo_paths
9038 self._repo_paths_real = [ os.path.realpath(repo_path) \
9039 for repo_path in repo_paths ]
9041 # pre-allocate index for PORTDIR so that it always has index 0.
9042 for root_config in roots.itervalues():
9043 portdb = root_config.trees["porttree"].dbapi
9044 portdir = portdb.porttree_root
9046 self.repoStr(portdir)
9048 def repoStr(self, repo_path_real):
9051 real_index = self._repo_paths_real.index(repo_path_real)
9052 if real_index == -1:
9054 self._unknown_repo = True
9056 shown_repos = self._shown_repos
9057 repo_paths = self._repo_paths
9058 repo_path = repo_paths[real_index]
9059 index = shown_repos.get(repo_path)
9061 index = len(shown_repos)
9062 shown_repos[repo_path] = index
9068 shown_repos = self._shown_repos
9069 unknown_repo = self._unknown_repo
9070 if shown_repos or self._unknown_repo:
9071 output.append("Portage tree and overlays:\n")
9072 show_repo_paths = list(shown_repos)
9073 for repo_path, repo_index in shown_repos.iteritems():
9074 show_repo_paths[repo_index] = repo_path
9076 for index, repo_path in enumerate(show_repo_paths):
9077 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9079 output.append(" "+teal("[?]") + \
9080 " indicates that the source repository could not be determined\n")
9081 return "".join(output)
9083 class PackageCounters(object):
9093 self.blocks_satisfied = 0
9095 self.restrict_fetch = 0
9096 self.restrict_fetch_satisfied = 0
9097 self.interactive = 0
9100 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9103 myoutput.append("Total: %s package" % total_installs)
9104 if total_installs != 1:
9105 myoutput.append("s")
9106 if total_installs != 0:
9107 myoutput.append(" (")
9108 if self.upgrades > 0:
9109 details.append("%s upgrade" % self.upgrades)
9110 if self.upgrades > 1:
9112 if self.downgrades > 0:
9113 details.append("%s downgrade" % self.downgrades)
9114 if self.downgrades > 1:
9117 details.append("%s new" % self.new)
9118 if self.newslot > 0:
9119 details.append("%s in new slot" % self.newslot)
9120 if self.newslot > 1:
9123 details.append("%s reinstall" % self.reinst)
9127 details.append("%s uninstall" % self.uninst)
9130 if self.interactive > 0:
9131 details.append("%s %s" % (self.interactive,
9132 colorize("WARN", "interactive")))
9133 myoutput.append(", ".join(details))
9134 if total_installs != 0:
9135 myoutput.append(")")
9136 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9137 if self.restrict_fetch:
9138 myoutput.append("\nFetch Restriction: %s package" % \
9139 self.restrict_fetch)
9140 if self.restrict_fetch > 1:
9141 myoutput.append("s")
9142 if self.restrict_fetch_satisfied < self.restrict_fetch:
9143 myoutput.append(bad(" (%s unsatisfied)") % \
9144 (self.restrict_fetch - self.restrict_fetch_satisfied))
9146 myoutput.append("\nConflict: %s block" % \
9149 myoutput.append("s")
9150 if self.blocks_satisfied < self.blocks:
9151 myoutput.append(bad(" (%s unsatisfied)") % \
9152 (self.blocks - self.blocks_satisfied))
9153 return "".join(myoutput)
9155 class PollSelectAdapter(PollConstants):
9158 Use select to emulate a poll object, for
9159 systems that don't support poll().
9163 self._registered = {}
9164 self._select_args = [[], [], []]
9166 def register(self, fd, *args):
9168 Only POLLIN is currently supported!
9172 "register expected at most 2 arguments, got " + \
9173 repr(1 + len(args)))
9175 eventmask = PollConstants.POLLIN | \
9176 PollConstants.POLLPRI | PollConstants.POLLOUT
9180 self._registered[fd] = eventmask
9181 self._select_args = None
9183 def unregister(self, fd):
9184 self._select_args = None
9185 del self._registered[fd]
9187 def poll(self, *args):
9190 "poll expected at most 2 arguments, got " + \
9191 repr(1 + len(args)))
9197 select_args = self._select_args
9198 if select_args is None:
9199 select_args = [self._registered.keys(), [], []]
9201 if timeout is not None:
9202 select_args = select_args[:]
9203 # Translate poll() timeout args to select() timeout args:
9205 # | units | value(s) for indefinite block
9206 # ---------|--------------|------------------------------
9207 # poll | milliseconds | omitted, negative, or None
9208 # ---------|--------------|------------------------------
9209 # select | seconds | omitted
9210 # ---------|--------------|------------------------------
9212 if timeout is not None and timeout < 0:
9214 if timeout is not None:
9215 select_args.append(timeout / 1000)
9217 select_events = select.select(*select_args)
9219 for fd in select_events[0]:
9220 poll_events.append((fd, PollConstants.POLLIN))
9223 class SequentialTaskQueue(SlotObject):
9225 __slots__ = ("max_jobs", "running_tasks") + \
9226 ("_dirty", "_scheduling", "_task_queue")
9228 def __init__(self, **kwargs):
9229 SlotObject.__init__(self, **kwargs)
9230 self._task_queue = deque()
9231 self.running_tasks = set()
9232 if self.max_jobs is None:
9236 def add(self, task):
9237 self._task_queue.append(task)
9240 def addFront(self, task):
9241 self._task_queue.appendleft(task)
9252 if self._scheduling:
9253 # Ignore any recursive schedule() calls triggered via
9254 # self._task_exit().
9257 self._scheduling = True
9259 task_queue = self._task_queue
9260 running_tasks = self.running_tasks
9261 max_jobs = self.max_jobs
9262 state_changed = False
9264 while task_queue and \
9265 (max_jobs is True or len(running_tasks) < max_jobs):
9266 task = task_queue.popleft()
9267 cancelled = getattr(task, "cancelled", None)
9269 running_tasks.add(task)
9270 task.addExitListener(self._task_exit)
9272 state_changed = True
9275 self._scheduling = False
9277 return state_changed
9279 def _task_exit(self, task):
9281 Since we can always rely on exit listeners being called, the set of
9282 running tasks is always pruned automatically and there is never any need
9283 to actively prune it.
9285 self.running_tasks.remove(task)
9286 if self._task_queue:
9290 self._task_queue.clear()
9291 running_tasks = self.running_tasks
9292 while running_tasks:
9293 task = running_tasks.pop()
9294 task.removeExitListener(self._task_exit)
9298 def __nonzero__(self):
9299 return bool(self._task_queue or self.running_tasks)
9302 return len(self._task_queue) + len(self.running_tasks)
9304 _can_poll_device = None
9306 def can_poll_device():
9308 Test if it's possible to use poll() on a device such as a pty. This
9309 is known to fail on Darwin.
9311 @returns: True if poll() on a device succeeds, False otherwise.
9314 global _can_poll_device
9315 if _can_poll_device is not None:
9316 return _can_poll_device
9318 if not hasattr(select, "poll"):
9319 _can_poll_device = False
9320 return _can_poll_device
9323 dev_null = open('/dev/null', 'rb')
9325 _can_poll_device = False
9326 return _can_poll_device
9329 p.register(dev_null.fileno(), PollConstants.POLLIN)
9331 invalid_request = False
9332 for f, event in p.poll():
9333 if event & PollConstants.POLLNVAL:
9334 invalid_request = True
9338 _can_poll_device = not invalid_request
9339 return _can_poll_device
9341 def create_poll_instance():
9343 Create an instance of select.poll, or an instance of
9344 PollSelectAdapter there is no poll() implementation or
9345 it is broken somehow.
9347 if can_poll_device():
9348 return select.poll()
9349 return PollSelectAdapter()
9351 getloadavg = getattr(os, "getloadavg", None)
9352 if getloadavg is None:
9355 Uses /proc/loadavg to emulate os.getloadavg().
9356 Raises OSError if the load average was unobtainable.
9359 loadavg_str = open('/proc/loadavg').readline()
9361 # getloadavg() is only supposed to raise OSError, so convert
9362 raise OSError('unknown')
9363 loadavg_split = loadavg_str.split()
9364 if len(loadavg_split) < 3:
9365 raise OSError('unknown')
9369 loadavg_floats.append(float(loadavg_split[i]))
9371 raise OSError('unknown')
9372 return tuple(loadavg_floats)
9374 class PollScheduler(object):
9376 class _sched_iface_class(SlotObject):
9377 __slots__ = ("register", "schedule", "unregister")
9381 self._max_load = None
9383 self._poll_event_queue = []
9384 self._poll_event_handlers = {}
9385 self._poll_event_handler_ids = {}
9386 # Increment id for each new handler.
9387 self._event_handler_id = 0
9388 self._poll_obj = create_poll_instance()
9389 self._scheduling = False
9391 def _schedule(self):
9393 Calls _schedule_tasks() and automatically returns early from
9394 any recursive calls to this method that the _schedule_tasks()
9395 call might trigger. This makes _schedule() safe to call from
9396 inside exit listeners.
9398 if self._scheduling:
9400 self._scheduling = True
9402 return self._schedule_tasks()
9404 self._scheduling = False
9406 def _running_job_count(self):
9409 def _can_add_job(self):
9410 max_jobs = self._max_jobs
9411 max_load = self._max_load
9413 if self._max_jobs is not True and \
9414 self._running_job_count() >= self._max_jobs:
9417 if max_load is not None and \
9418 (max_jobs is True or max_jobs > 1) and \
9419 self._running_job_count() >= 1:
9421 avg1, avg5, avg15 = getloadavg()
9425 if avg1 >= max_load:
9430 def _poll(self, timeout=None):
9432 All poll() calls pass through here. The poll events
9433 are added directly to self._poll_event_queue.
9434 In order to avoid endless blocking, this raises
9435 StopIteration if timeout is None and there are
9436 no file descriptors to poll.
9438 if not self._poll_event_handlers:
9440 if timeout is None and \
9441 not self._poll_event_handlers:
9442 raise StopIteration(
9443 "timeout is None and there are no poll() event handlers")
9445 # The following error is known to occur with Linux kernel versions
9448 # select.error: (4, 'Interrupted system call')
9450 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9451 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9452 # without any events.
9455 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9457 except select.error, e:
9458 writemsg_level("\n!!! select error: %s\n" % (e,),
9459 level=logging.ERROR, noiselevel=-1)
9461 if timeout is not None:
9464 def _next_poll_event(self, timeout=None):
9466 Since the _schedule_wait() loop is called by event
9467 handlers from _poll_loop(), maintain a central event
9468 queue for both of them to share events from a single
9469 poll() call. In order to avoid endless blocking, this
9470 raises StopIteration if timeout is None and there are
9471 no file descriptors to poll.
9473 if not self._poll_event_queue:
9475 return self._poll_event_queue.pop()
9477 def _poll_loop(self):
9479 event_handlers = self._poll_event_handlers
9480 event_handled = False
9483 while event_handlers:
9484 f, event = self._next_poll_event()
9485 handler, reg_id = event_handlers[f]
9487 event_handled = True
9488 except StopIteration:
9489 event_handled = True
9491 if not event_handled:
9492 raise AssertionError("tight loop")
9494 def _schedule_yield(self):
9496 Schedule for a short period of time chosen by the scheduler based
9497 on internal state. Synchronous tasks should call this periodically
9498 in order to allow the scheduler to service pending poll events. The
9499 scheduler will call poll() exactly once, without blocking, and any
9500 resulting poll events will be serviced.
9502 event_handlers = self._poll_event_handlers
9505 if not event_handlers:
9506 return bool(events_handled)
9508 if not self._poll_event_queue:
9512 while event_handlers and self._poll_event_queue:
9513 f, event = self._next_poll_event()
9514 handler, reg_id = event_handlers[f]
9517 except StopIteration:
9520 return bool(events_handled)
9522 def _register(self, f, eventmask, handler):
9525 @return: A unique registration id, for use in schedule() or
9528 if f in self._poll_event_handlers:
9529 raise AssertionError("fd %d is already registered" % f)
9530 self._event_handler_id += 1
9531 reg_id = self._event_handler_id
9532 self._poll_event_handler_ids[reg_id] = f
9533 self._poll_event_handlers[f] = (handler, reg_id)
9534 self._poll_obj.register(f, eventmask)
9537 def _unregister(self, reg_id):
9538 f = self._poll_event_handler_ids[reg_id]
9539 self._poll_obj.unregister(f)
9540 del self._poll_event_handlers[f]
9541 del self._poll_event_handler_ids[reg_id]
9543 def _schedule_wait(self, wait_ids):
9545 Schedule until wait_id is not longer registered
9548 @param wait_id: a task id to wait for
9550 event_handlers = self._poll_event_handlers
9551 handler_ids = self._poll_event_handler_ids
9552 event_handled = False
9554 if isinstance(wait_ids, int):
9555 wait_ids = frozenset([wait_ids])
9558 while wait_ids.intersection(handler_ids):
9559 f, event = self._next_poll_event()
9560 handler, reg_id = event_handlers[f]
9562 event_handled = True
9563 except StopIteration:
9564 event_handled = True
9566 return event_handled
9568 class QueueScheduler(PollScheduler):
9571 Add instances of SequentialTaskQueue and then call run(). The
9572 run() method returns when no tasks remain.
9575 def __init__(self, max_jobs=None, max_load=None):
9576 PollScheduler.__init__(self)
9578 if max_jobs is None:
9581 self._max_jobs = max_jobs
9582 self._max_load = max_load
9583 self.sched_iface = self._sched_iface_class(
9584 register=self._register,
9585 schedule=self._schedule_wait,
9586 unregister=self._unregister)
9589 self._schedule_listeners = []
9592 self._queues.append(q)
9594 def remove(self, q):
9595 self._queues.remove(q)
9599 while self._schedule():
9602 while self._running_job_count():
9605 def _schedule_tasks(self):
9608 @returns: True if there may be remaining tasks to schedule,
9611 while self._can_add_job():
9612 n = self._max_jobs - self._running_job_count()
9616 if not self._start_next_job(n):
9619 for q in self._queues:
9624 def _running_job_count(self):
9626 for q in self._queues:
9627 job_count += len(q.running_tasks)
9628 self._jobs = job_count
9631 def _start_next_job(self, n=1):
9633 for q in self._queues:
9634 initial_job_count = len(q.running_tasks)
9636 final_job_count = len(q.running_tasks)
9637 if final_job_count > initial_job_count:
9638 started_count += (final_job_count - initial_job_count)
9639 if started_count >= n:
9641 return started_count
9643 class TaskScheduler(object):
9646 A simple way to handle scheduling of AsynchrousTask instances. Simply
9647 add tasks and call run(). The run() method returns when no tasks remain.
9650 def __init__(self, max_jobs=None, max_load=None):
9651 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9652 self._scheduler = QueueScheduler(
9653 max_jobs=max_jobs, max_load=max_load)
9654 self.sched_iface = self._scheduler.sched_iface
9655 self.run = self._scheduler.run
9656 self._scheduler.add(self._queue)
9658 def add(self, task):
9659 self._queue.add(task)
9661 class JobStatusDisplay(object):
9663 _bound_properties = ("curval", "failed", "running")
9664 _jobs_column_width = 48
9666 # Don't update the display unless at least this much
9667 # time has passed, in units of seconds.
9668 _min_display_latency = 2
9670 _default_term_codes = {
9676 _termcap_name_map = {
9677 'carriage_return' : 'cr',
9682 def __init__(self, out=sys.stdout, quiet=False):
9683 object.__setattr__(self, "out", out)
9684 object.__setattr__(self, "quiet", quiet)
9685 object.__setattr__(self, "maxval", 0)
9686 object.__setattr__(self, "merges", 0)
9687 object.__setattr__(self, "_changed", False)
9688 object.__setattr__(self, "_displayed", False)
9689 object.__setattr__(self, "_last_display_time", 0)
9690 object.__setattr__(self, "width", 80)
9693 isatty = hasattr(out, "isatty") and out.isatty()
9694 object.__setattr__(self, "_isatty", isatty)
9695 if not isatty or not self._init_term():
9697 for k, capname in self._termcap_name_map.iteritems():
9698 term_codes[k] = self._default_term_codes[capname]
9699 object.__setattr__(self, "_term_codes", term_codes)
9700 encoding = sys.getdefaultencoding()
9701 for k, v in self._term_codes.items():
9702 if not isinstance(v, str):
9703 self._term_codes[k] = v.decode(encoding, 'replace')
9705 def _init_term(self):
9707 Initialize term control codes.
9709 @returns: True if term codes were successfully initialized,
9713 term_type = os.environ.get("TERM", "vt100")
9719 curses.setupterm(term_type, self.out.fileno())
9720 tigetstr = curses.tigetstr
9721 except curses.error:
9726 if tigetstr is None:
9730 for k, capname in self._termcap_name_map.iteritems():
9731 code = tigetstr(capname)
9733 code = self._default_term_codes[capname]
9734 term_codes[k] = code
9735 object.__setattr__(self, "_term_codes", term_codes)
9738 def _format_msg(self, msg):
9739 return ">>> %s" % msg
9743 self._term_codes['carriage_return'] + \
9744 self._term_codes['clr_eol'])
9746 self._displayed = False
9748 def _display(self, line):
9749 self.out.write(line)
9751 self._displayed = True
9753 def _update(self, msg):
9756 if not self._isatty:
9757 out.write(self._format_msg(msg) + self._term_codes['newline'])
9759 self._displayed = True
9765 self._display(self._format_msg(msg))
9767 def displayMessage(self, msg):
9769 was_displayed = self._displayed
9771 if self._isatty and self._displayed:
9774 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9776 self._displayed = False
9779 self._changed = True
9785 for name in self._bound_properties:
9786 object.__setattr__(self, name, 0)
9789 self.out.write(self._term_codes['newline'])
9791 self._displayed = False
9793 def __setattr__(self, name, value):
9794 old_value = getattr(self, name)
9795 if value == old_value:
9797 object.__setattr__(self, name, value)
9798 if name in self._bound_properties:
9799 self._property_change(name, old_value, value)
9801 def _property_change(self, name, old_value, new_value):
9802 self._changed = True
9805 def _load_avg_str(self):
9820 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9824 Display status on stdout, but only if something has
9825 changed since the last call.
9831 current_time = time.time()
9832 time_delta = current_time - self._last_display_time
9833 if self._displayed and \
9835 if not self._isatty:
9837 if time_delta < self._min_display_latency:
9840 self._last_display_time = current_time
9841 self._changed = False
9842 self._display_status()
9844 def _display_status(self):
9845 # Don't use len(self._completed_tasks) here since that also
9846 # can include uninstall tasks.
9847 curval_str = str(self.curval)
9848 maxval_str = str(self.maxval)
9849 running_str = str(self.running)
9850 failed_str = str(self.failed)
9851 load_avg_str = self._load_avg_str()
9853 color_output = StringIO()
9854 plain_output = StringIO()
9855 style_file = portage.output.ConsoleStyleFile(color_output)
9856 style_file.write_listener = plain_output
9857 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9858 style_writer.style_listener = style_file.new_styles
9859 f = formatter.AbstractFormatter(style_writer)
9861 number_style = "INFORM"
9862 f.add_literal_data("Jobs: ")
9863 f.push_style(number_style)
9864 f.add_literal_data(curval_str)
9866 f.add_literal_data(" of ")
9867 f.push_style(number_style)
9868 f.add_literal_data(maxval_str)
9870 f.add_literal_data(" complete")
9873 f.add_literal_data(", ")
9874 f.push_style(number_style)
9875 f.add_literal_data(running_str)
9877 f.add_literal_data(" running")
9880 f.add_literal_data(", ")
9881 f.push_style(number_style)
9882 f.add_literal_data(failed_str)
9884 f.add_literal_data(" failed")
9886 padding = self._jobs_column_width - len(plain_output.getvalue())
9888 f.add_literal_data(padding * " ")
9890 f.add_literal_data("Load avg: ")
9891 f.add_literal_data(load_avg_str)
9893 # Truncate to fit width, to avoid making the terminal scroll if the
9894 # line overflows (happens when the load average is large).
9895 plain_output = plain_output.getvalue()
9896 if self._isatty and len(plain_output) > self.width:
9897 # Use plain_output here since it's easier to truncate
9898 # properly than the color output which contains console
9900 self._update(plain_output[:self.width])
9902 self._update(color_output.getvalue())
9904 xtermTitle(" ".join(plain_output.split()))
9906 class Scheduler(PollScheduler):
9908 _opts_ignore_blockers = \
9909 frozenset(["--buildpkgonly",
9910 "--fetchonly", "--fetch-all-uri",
9911 "--nodeps", "--pretend"])
9913 _opts_no_background = \
9914 frozenset(["--pretend",
9915 "--fetchonly", "--fetch-all-uri"])
9917 _opts_no_restart = frozenset(["--buildpkgonly",
9918 "--fetchonly", "--fetch-all-uri", "--pretend"])
9920 _bad_resume_opts = set(["--ask", "--changelog",
9921 "--resume", "--skipfirst"])
9923 _fetch_log = "/var/log/emerge-fetch.log"
9925 class _iface_class(SlotObject):
9926 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9927 "dblinkElog", "fetch", "register", "schedule",
9928 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9931 class _fetch_iface_class(SlotObject):
9932 __slots__ = ("log_file", "schedule")
9934 _task_queues_class = slot_dict_class(
9935 ("merge", "jobs", "fetch", "unpack"), prefix="")
9937 class _build_opts_class(SlotObject):
9938 __slots__ = ("buildpkg", "buildpkgonly",
9939 "fetch_all_uri", "fetchonly", "pretend")
9941 class _binpkg_opts_class(SlotObject):
9942 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9944 class _pkg_count_class(SlotObject):
9945 __slots__ = ("curval", "maxval")
9947 class _emerge_log_class(SlotObject):
9948 __slots__ = ("xterm_titles",)
9950 def log(self, *pargs, **kwargs):
9951 if not self.xterm_titles:
9952 # Avoid interference with the scheduler's status display.
9953 kwargs.pop("short_msg", None)
9954 emergelog(self.xterm_titles, *pargs, **kwargs)
9956 class _failed_pkg(SlotObject):
9957 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9959 class _ConfigPool(object):
9960 """Interface for a task to temporarily allocate a config
9961 instance from a pool. This allows a task to be constructed
9962 long before the config instance actually becomes needed, like
9963 when prefetchers are constructed for the whole merge list."""
9964 __slots__ = ("_root", "_allocate", "_deallocate")
9965 def __init__(self, root, allocate, deallocate):
9967 self._allocate = allocate
9968 self._deallocate = deallocate
9970 return self._allocate(self._root)
9971 def deallocate(self, settings):
9972 self._deallocate(settings)
9974 class _unknown_internal_error(portage.exception.PortageException):
9976 Used internally to terminate scheduling. The specific reason for
9977 the failure should have been dumped to stderr.
9979 def __init__(self, value=""):
9980 portage.exception.PortageException.__init__(self, value)
9982 def __init__(self, settings, trees, mtimedb, myopts,
9983 spinner, mergelist, favorites, digraph):
9984 PollScheduler.__init__(self)
9985 self.settings = settings
9986 self.target_root = settings["ROOT"]
9988 self.myopts = myopts
9989 self._spinner = spinner
9990 self._mtimedb = mtimedb
9991 self._mergelist = mergelist
9992 self._favorites = favorites
9993 self._args_set = InternalPackageSet(favorites)
9994 self._build_opts = self._build_opts_class()
9995 for k in self._build_opts.__slots__:
9996 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9997 self._binpkg_opts = self._binpkg_opts_class()
9998 for k in self._binpkg_opts.__slots__:
9999 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10002 self._logger = self._emerge_log_class()
10003 self._task_queues = self._task_queues_class()
10004 for k in self._task_queues.allowed_keys:
10005 setattr(self._task_queues, k,
10006 SequentialTaskQueue())
10008 # Holds merges that will wait to be executed when no builds are
10009 # executing. This is useful for system packages since dependencies
10010 # on system packages are frequently unspecified.
10011 self._merge_wait_queue = []
10012 # Holds merges that have been transfered from the merge_wait_queue to
10013 # the actual merge queue. They are removed from this list upon
10014 # completion. Other packages can start building only when this list is
10016 self._merge_wait_scheduled = []
10018 # Holds system packages and their deep runtime dependencies. Before
10019 # being merged, these packages go to merge_wait_queue, to be merged
10020 # when no other packages are building.
10021 self._deep_system_deps = set()
10023 self._status_display = JobStatusDisplay()
10024 self._max_load = myopts.get("--load-average")
10025 max_jobs = myopts.get("--jobs")
10026 if max_jobs is None:
10028 self._set_max_jobs(max_jobs)
10030 # The root where the currently running
10031 # portage instance is installed.
10032 self._running_root = trees["/"]["root_config"]
10034 if settings.get("PORTAGE_DEBUG", "") == "1":
10036 self.pkgsettings = {}
10037 self._config_pool = {}
10038 self._blocker_db = {}
10040 self._config_pool[root] = []
10041 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10043 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10044 schedule=self._schedule_fetch)
10045 self._sched_iface = self._iface_class(
10046 dblinkEbuildPhase=self._dblink_ebuild_phase,
10047 dblinkDisplayMerge=self._dblink_display_merge,
10048 dblinkElog=self._dblink_elog,
10049 fetch=fetch_iface, register=self._register,
10050 schedule=self._schedule_wait,
10051 scheduleSetup=self._schedule_setup,
10052 scheduleUnpack=self._schedule_unpack,
10053 scheduleYield=self._schedule_yield,
10054 unregister=self._unregister)
10056 self._prefetchers = weakref.WeakValueDictionary()
10057 self._pkg_queue = []
10058 self._completed_tasks = set()
10060 self._failed_pkgs = []
10061 self._failed_pkgs_all = []
10062 self._failed_pkgs_die_msgs = []
10063 self._post_mod_echo_msgs = []
10064 self._parallel_fetch = False
10065 merge_count = len([x for x in mergelist \
10066 if isinstance(x, Package) and x.operation == "merge"])
10067 self._pkg_count = self._pkg_count_class(
10068 curval=0, maxval=merge_count)
10069 self._status_display.maxval = self._pkg_count.maxval
10071 # The load average takes some time to respond when new
10072 # jobs are added, so we need to limit the rate of adding
10074 self._job_delay_max = 10
10075 self._job_delay_factor = 1.0
10076 self._job_delay_exp = 1.5
10077 self._previous_job_start_time = None
10079 self._set_digraph(digraph)
10081 # This is used to memoize the _choose_pkg() result when
10082 # no packages can be chosen until one of the existing
10084 self._choose_pkg_return_early = False
10086 features = self.settings.features
10087 if "parallel-fetch" in features and \
10088 not ("--pretend" in self.myopts or \
10089 "--fetch-all-uri" in self.myopts or \
10090 "--fetchonly" in self.myopts):
10091 if "distlocks" not in features:
10092 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10093 portage.writemsg(red("!!!")+" parallel-fetching " + \
10094 "requires the distlocks feature enabled"+"\n",
10096 portage.writemsg(red("!!!")+" you have it disabled, " + \
10097 "thus parallel-fetching is being disabled"+"\n",
10099 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10100 elif len(mergelist) > 1:
10101 self._parallel_fetch = True
10103 if self._parallel_fetch:
10104 # clear out existing fetch log if it exists
10106 open(self._fetch_log, 'w')
10107 except EnvironmentError:
10110 self._running_portage = None
10111 portage_match = self._running_root.trees["vartree"].dbapi.match(
10112 portage.const.PORTAGE_PACKAGE_ATOM)
10114 cpv = portage_match.pop()
10115 self._running_portage = self._pkg(cpv, "installed",
10116 self._running_root, installed=True)
10118 def _poll(self, timeout=None):
10120 PollScheduler._poll(self, timeout=timeout)
10122 def _set_max_jobs(self, max_jobs):
10123 self._max_jobs = max_jobs
10124 self._task_queues.jobs.max_jobs = max_jobs
10126 def _background_mode(self):
10128 Check if background mode is enabled and adjust states as necessary.
10131 @returns: True if background mode is enabled, False otherwise.
10133 background = (self._max_jobs is True or \
10134 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10135 not bool(self._opts_no_background.intersection(self.myopts))
10138 interactive_tasks = self._get_interactive_tasks()
10139 if interactive_tasks:
10141 writemsg_level(">>> Sending package output to stdio due " + \
10142 "to interactive package(s):\n",
10143 level=logging.INFO, noiselevel=-1)
10145 for pkg in interactive_tasks:
10146 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10147 if pkg.root != "/":
10148 pkg_str += " for " + pkg.root
10149 msg.append(pkg_str)
10151 writemsg_level("".join("%s\n" % (l,) for l in msg),
10152 level=logging.INFO, noiselevel=-1)
10153 if self._max_jobs is True or self._max_jobs > 1:
10154 self._set_max_jobs(1)
10155 writemsg_level(">>> Setting --jobs=1 due " + \
10156 "to the above interactive package(s)\n",
10157 level=logging.INFO, noiselevel=-1)
10159 self._status_display.quiet = \
10160 not background or \
10161 ("--quiet" in self.myopts and \
10162 "--verbose" not in self.myopts)
10164 self._logger.xterm_titles = \
10165 "notitles" not in self.settings.features and \
10166 self._status_display.quiet
10170 def _get_interactive_tasks(self):
10171 from portage import flatten
10172 from portage.dep import use_reduce, paren_reduce
10173 interactive_tasks = []
10174 for task in self._mergelist:
10175 if not (isinstance(task, Package) and \
10176 task.operation == "merge"):
10179 properties = flatten(use_reduce(paren_reduce(
10180 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10181 except portage.exception.InvalidDependString, e:
10182 show_invalid_depstring_notice(task,
10183 task.metadata["PROPERTIES"], str(e))
10184 raise self._unknown_internal_error()
10185 if "interactive" in properties:
10186 interactive_tasks.append(task)
10187 return interactive_tasks
10189 def _set_digraph(self, digraph):
10190 if "--nodeps" in self.myopts or \
10191 (self._max_jobs is not True and self._max_jobs < 2):
10193 self._digraph = None
10196 self._digraph = digraph
10197 self._find_system_deps()
10198 self._prune_digraph()
10199 self._prevent_builddir_collisions()
10201 def _find_system_deps(self):
10203 Find system packages and their deep runtime dependencies. Before being
10204 merged, these packages go to merge_wait_queue, to be merged when no
10205 other packages are building.
10207 deep_system_deps = self._deep_system_deps
10208 deep_system_deps.clear()
10209 deep_system_deps.update(
10210 _find_deep_system_runtime_deps(self._digraph))
10211 deep_system_deps.difference_update([pkg for pkg in \
10212 deep_system_deps if pkg.operation != "merge"])
10214 def _prune_digraph(self):
10216 Prune any root nodes that are irrelevant.
10219 graph = self._digraph
10220 completed_tasks = self._completed_tasks
10221 removed_nodes = set()
10223 for node in graph.root_nodes():
10224 if not isinstance(node, Package) or \
10225 (node.installed and node.operation == "nomerge") or \
10227 node in completed_tasks:
10228 removed_nodes.add(node)
10230 graph.difference_update(removed_nodes)
10231 if not removed_nodes:
10233 removed_nodes.clear()
10235 def _prevent_builddir_collisions(self):
10237 When building stages, sometimes the same exact cpv needs to be merged
10238 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10239 in the builddir. Currently, normal file locks would be inappropriate
10240 for this purpose since emerge holds all of it's build dir locks from
10244 for pkg in self._mergelist:
10245 if not isinstance(pkg, Package):
10246 # a satisfied blocker
10250 if pkg.cpv not in cpv_map:
10251 cpv_map[pkg.cpv] = [pkg]
10253 for earlier_pkg in cpv_map[pkg.cpv]:
10254 self._digraph.add(earlier_pkg, pkg,
10255 priority=DepPriority(buildtime=True))
10256 cpv_map[pkg.cpv].append(pkg)
10258 class _pkg_failure(portage.exception.PortageException):
10260 An instance of this class is raised by unmerge() when
10261 an uninstallation fails.
10264 def __init__(self, *pargs):
10265 portage.exception.PortageException.__init__(self, pargs)
10267 self.status = pargs[0]
10269 def _schedule_fetch(self, fetcher):
10271 Schedule a fetcher on the fetch queue, in order to
10272 serialize access to the fetch log.
10274 self._task_queues.fetch.addFront(fetcher)
10276 def _schedule_setup(self, setup_phase):
10278 Schedule a setup phase on the merge queue, in order to
10279 serialize unsandboxed access to the live filesystem.
10281 self._task_queues.merge.addFront(setup_phase)
10284 def _schedule_unpack(self, unpack_phase):
10286 Schedule an unpack phase on the unpack queue, in order
10287 to serialize $DISTDIR access for live ebuilds.
10289 self._task_queues.unpack.add(unpack_phase)
10291 def _find_blockers(self, new_pkg):
10293 Returns a callable which should be called only when
10294 the vdb lock has been acquired.
10296 def get_blockers():
10297 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10298 return get_blockers
10300 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10301 if self._opts_ignore_blockers.intersection(self.myopts):
10304 # Call gc.collect() here to avoid heap overflow that
10305 # triggers 'Cannot allocate memory' errors (reported
10306 # with python-2.5).
10310 blocker_db = self._blocker_db[new_pkg.root]
10312 blocker_dblinks = []
10313 for blocking_pkg in blocker_db.findInstalledBlockers(
10314 new_pkg, acquire_lock=acquire_lock):
10315 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10317 if new_pkg.cpv == blocking_pkg.cpv:
10319 blocker_dblinks.append(portage.dblink(
10320 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10321 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10322 vartree=self.trees[blocking_pkg.root]["vartree"]))
10326 return blocker_dblinks
10328 def _dblink_pkg(self, pkg_dblink):
10329 cpv = pkg_dblink.mycpv
10330 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10331 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10332 installed = type_name == "installed"
10333 return self._pkg(cpv, type_name, root_config, installed=installed)
10335 def _append_to_log_path(self, log_path, msg):
10336 f = open(log_path, 'a')
10342 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10344 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10347 background = self._background
10349 if background and log_path is not None:
10350 log_file = open(log_path, 'a')
10355 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10357 if log_file is not None:
10360 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10361 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10362 background = self._background
10364 if log_path is None:
10365 if not (background and level < logging.WARN):
10366 portage.util.writemsg_level(msg,
10367 level=level, noiselevel=noiselevel)
10370 portage.util.writemsg_level(msg,
10371 level=level, noiselevel=noiselevel)
10372 self._append_to_log_path(log_path, msg)
10374 def _dblink_ebuild_phase(self,
10375 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10377 Using this callback for merge phases allows the scheduler
10378 to run while these phases execute asynchronously, and allows
10379 the scheduler control output handling.
10382 scheduler = self._sched_iface
10383 settings = pkg_dblink.settings
10384 pkg = self._dblink_pkg(pkg_dblink)
10385 background = self._background
10386 log_path = settings.get("PORTAGE_LOG_FILE")
10388 ebuild_phase = EbuildPhase(background=background,
10389 pkg=pkg, phase=phase, scheduler=scheduler,
10390 settings=settings, tree=pkg_dblink.treetype)
10391 ebuild_phase.start()
10392 ebuild_phase.wait()
10394 return ebuild_phase.returncode
10396 def _check_manifests(self):
10397 # Verify all the manifests now so that the user is notified of failure
10398 # as soon as possible.
10399 if "strict" not in self.settings.features or \
10400 "--fetchonly" in self.myopts or \
10401 "--fetch-all-uri" in self.myopts:
10404 shown_verifying_msg = False
10405 quiet_settings = {}
10406 for myroot, pkgsettings in self.pkgsettings.iteritems():
10407 quiet_config = portage.config(clone=pkgsettings)
10408 quiet_config["PORTAGE_QUIET"] = "1"
10409 quiet_config.backup_changes("PORTAGE_QUIET")
10410 quiet_settings[myroot] = quiet_config
10413 for x in self._mergelist:
10414 if not isinstance(x, Package) or \
10415 x.type_name != "ebuild":
10418 if not shown_verifying_msg:
10419 shown_verifying_msg = True
10420 self._status_msg("Verifying ebuild manifests")
10422 root_config = x.root_config
10423 portdb = root_config.trees["porttree"].dbapi
10424 quiet_config = quiet_settings[root_config.root]
10425 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10426 if not portage.digestcheck([], quiet_config, strict=True):
10431 def _add_prefetchers(self):
10433 if not self._parallel_fetch:
10436 if self._parallel_fetch:
10437 self._status_msg("Starting parallel fetch")
10439 prefetchers = self._prefetchers
10440 getbinpkg = "--getbinpkg" in self.myopts
10442 # In order to avoid "waiting for lock" messages
10443 # at the beginning, which annoy users, never
10444 # spawn a prefetcher for the first package.
10445 for pkg in self._mergelist[1:]:
10446 prefetcher = self._create_prefetcher(pkg)
10447 if prefetcher is not None:
10448 self._task_queues.fetch.add(prefetcher)
10449 prefetchers[pkg] = prefetcher
10451 def _create_prefetcher(self, pkg):
10453 @return: a prefetcher, or None if not applicable
10457 if not isinstance(pkg, Package):
10460 elif pkg.type_name == "ebuild":
10462 prefetcher = EbuildFetcher(background=True,
10463 config_pool=self._ConfigPool(pkg.root,
10464 self._allocate_config, self._deallocate_config),
10465 fetchonly=1, logfile=self._fetch_log,
10466 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10468 elif pkg.type_name == "binary" and \
10469 "--getbinpkg" in self.myopts and \
10470 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10472 prefetcher = BinpkgPrefetcher(background=True,
10473 pkg=pkg, scheduler=self._sched_iface)
10477 def _is_restart_scheduled(self):
10479 Check if the merge list contains a replacement
10480 for the current running instance, that will result
10481 in restart after merge.
10483 @returns: True if a restart is scheduled, False otherwise.
10485 if self._opts_no_restart.intersection(self.myopts):
10488 mergelist = self._mergelist
10490 for i, pkg in enumerate(mergelist):
10491 if self._is_restart_necessary(pkg) and \
10492 i != len(mergelist) - 1:
10497 def _is_restart_necessary(self, pkg):
10499 @return: True if merging the given package
10500 requires restart, False otherwise.
10503 # Figure out if we need a restart.
10504 if pkg.root == self._running_root.root and \
10505 portage.match_from_list(
10506 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10507 if self._running_portage:
10508 return pkg.cpv != self._running_portage.cpv
10512 def _restart_if_necessary(self, pkg):
10514 Use execv() to restart emerge. This happens
10515 if portage upgrades itself and there are
10516 remaining packages in the list.
10519 if self._opts_no_restart.intersection(self.myopts):
10522 if not self._is_restart_necessary(pkg):
10525 if pkg == self._mergelist[-1]:
10528 self._main_loop_cleanup()
10530 logger = self._logger
10531 pkg_count = self._pkg_count
10532 mtimedb = self._mtimedb
10533 bad_resume_opts = self._bad_resume_opts
10535 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10536 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10538 logger.log(" *** RESTARTING " + \
10539 "emerge via exec() after change of " + \
10540 "portage version.")
10542 mtimedb["resume"]["mergelist"].remove(list(pkg))
10544 portage.run_exitfuncs()
10545 mynewargv = [sys.argv[0], "--resume"]
10546 resume_opts = self.myopts.copy()
10547 # For automatic resume, we need to prevent
10548 # any of bad_resume_opts from leaking in
10549 # via EMERGE_DEFAULT_OPTS.
10550 resume_opts["--ignore-default-opts"] = True
10551 for myopt, myarg in resume_opts.iteritems():
10552 if myopt not in bad_resume_opts:
10554 mynewargv.append(myopt)
10556 mynewargv.append(myopt +"="+ str(myarg))
10557 # priority only needs to be adjusted on the first run
10558 os.environ["PORTAGE_NICENESS"] = "0"
10559 os.execv(mynewargv[0], mynewargv)
10563 if "--resume" in self.myopts:
10565 portage.writemsg_stdout(
10566 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10567 self._logger.log(" *** Resuming merge...")
10569 self._save_resume_list()
10572 self._background = self._background_mode()
10573 except self._unknown_internal_error:
10576 for root in self.trees:
10577 root_config = self.trees[root]["root_config"]
10579 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10580 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10581 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10582 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10583 if not tmpdir or not os.path.isdir(tmpdir):
10584 msg = "The directory specified in your " + \
10585 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10586 "does not exist. Please create this " + \
10587 "directory or correct your PORTAGE_TMPDIR setting."
10588 msg = textwrap.wrap(msg, 70)
10589 out = portage.output.EOutput()
10594 if self._background:
10595 root_config.settings.unlock()
10596 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10597 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10598 root_config.settings.lock()
10600 self.pkgsettings[root] = portage.config(
10601 clone=root_config.settings)
10603 rval = self._check_manifests()
10604 if rval != os.EX_OK:
10607 keep_going = "--keep-going" in self.myopts
10608 fetchonly = self._build_opts.fetchonly
10609 mtimedb = self._mtimedb
10610 failed_pkgs = self._failed_pkgs
10613 rval = self._merge()
10614 if rval == os.EX_OK or fetchonly or not keep_going:
10616 if "resume" not in mtimedb:
10618 mergelist = self._mtimedb["resume"].get("mergelist")
10622 if not failed_pkgs:
10625 for failed_pkg in failed_pkgs:
10626 mergelist.remove(list(failed_pkg.pkg))
10628 self._failed_pkgs_all.extend(failed_pkgs)
10634 if not self._calc_resume_list():
10637 clear_caches(self.trees)
10638 if not self._mergelist:
10641 self._save_resume_list()
10642 self._pkg_count.curval = 0
10643 self._pkg_count.maxval = len([x for x in self._mergelist \
10644 if isinstance(x, Package) and x.operation == "merge"])
10645 self._status_display.maxval = self._pkg_count.maxval
10647 self._logger.log(" *** Finished. Cleaning up...")
10650 self._failed_pkgs_all.extend(failed_pkgs)
10653 background = self._background
10654 failure_log_shown = False
10655 if background and len(self._failed_pkgs_all) == 1:
10656 # If only one package failed then just show it's
10657 # whole log for easy viewing.
10658 failed_pkg = self._failed_pkgs_all[-1]
10659 build_dir = failed_pkg.build_dir
10662 log_paths = [failed_pkg.build_log]
10664 log_path = self._locate_failure_log(failed_pkg)
10665 if log_path is not None:
10667 log_file = open(log_path)
10671 if log_file is not None:
10673 for line in log_file:
10674 writemsg_level(line, noiselevel=-1)
10677 failure_log_shown = True
10679 # Dump mod_echo output now since it tends to flood the terminal.
10680 # This allows us to avoid having more important output, generated
10681 # later, from being swept away by the mod_echo output.
10682 mod_echo_output = _flush_elog_mod_echo()
10684 if background and not failure_log_shown and \
10685 self._failed_pkgs_all and \
10686 self._failed_pkgs_die_msgs and \
10687 not mod_echo_output:
10689 printer = portage.output.EOutput()
10690 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10692 if mysettings["ROOT"] != "/":
10693 root_msg = " merged to %s" % mysettings["ROOT"]
10695 printer.einfo("Error messages for package %s%s:" % \
10696 (colorize("INFORM", key), root_msg))
10698 for phase in portage.const.EBUILD_PHASES:
10699 if phase not in logentries:
10701 for msgtype, msgcontent in logentries[phase]:
10702 if isinstance(msgcontent, basestring):
10703 msgcontent = [msgcontent]
10704 for line in msgcontent:
10705 printer.eerror(line.strip("\n"))
10707 if self._post_mod_echo_msgs:
10708 for msg in self._post_mod_echo_msgs:
10711 if len(self._failed_pkgs_all) > 1 or \
10712 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10713 if len(self._failed_pkgs_all) > 1:
10714 msg = "The following %d packages have " % \
10715 len(self._failed_pkgs_all) + \
10716 "failed to build or install:"
10718 msg = "The following package has " + \
10719 "failed to build or install:"
10720 prefix = bad(" * ")
10721 writemsg(prefix + "\n", noiselevel=-1)
10722 from textwrap import wrap
10723 for line in wrap(msg, 72):
10724 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10725 writemsg(prefix + "\n", noiselevel=-1)
10726 for failed_pkg in self._failed_pkgs_all:
10727 writemsg("%s\t%s\n" % (prefix,
10728 colorize("INFORM", str(failed_pkg.pkg))),
10730 writemsg(prefix + "\n", noiselevel=-1)
10734 def _elog_listener(self, mysettings, key, logentries, fulltext):
10735 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10737 self._failed_pkgs_die_msgs.append(
10738 (mysettings, key, errors))
10740 def _locate_failure_log(self, failed_pkg):
10742 build_dir = failed_pkg.build_dir
10745 log_paths = [failed_pkg.build_log]
10747 for log_path in log_paths:
10752 log_size = os.stat(log_path).st_size
10763 def _add_packages(self):
10764 pkg_queue = self._pkg_queue
10765 for pkg in self._mergelist:
10766 if isinstance(pkg, Package):
10767 pkg_queue.append(pkg)
10768 elif isinstance(pkg, Blocker):
10771 def _merge_wait_exit_handler(self, task):
10772 self._merge_wait_scheduled.remove(task)
10773 self._merge_exit(task)
10775 def _merge_exit(self, merge):
10776 self._do_merge_exit(merge)
10777 self._deallocate_config(merge.merge.settings)
10778 if merge.returncode == os.EX_OK and \
10779 not merge.merge.pkg.installed:
10780 self._status_display.curval += 1
10781 self._status_display.merges = len(self._task_queues.merge)
10784 def _do_merge_exit(self, merge):
10785 pkg = merge.merge.pkg
10786 if merge.returncode != os.EX_OK:
10787 settings = merge.merge.settings
10788 build_dir = settings.get("PORTAGE_BUILDDIR")
10789 build_log = settings.get("PORTAGE_LOG_FILE")
10791 self._failed_pkgs.append(self._failed_pkg(
10792 build_dir=build_dir, build_log=build_log,
10794 returncode=merge.returncode))
10795 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10797 self._status_display.failed = len(self._failed_pkgs)
10800 self._task_complete(pkg)
10801 pkg_to_replace = merge.merge.pkg_to_replace
10802 if pkg_to_replace is not None:
10803 # When a package is replaced, mark it's uninstall
10804 # task complete (if any).
10805 uninst_hash_key = \
10806 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10807 self._task_complete(uninst_hash_key)
10812 self._restart_if_necessary(pkg)
10814 # Call mtimedb.commit() after each merge so that
10815 # --resume still works after being interrupted
10816 # by reboot, sigkill or similar.
10817 mtimedb = self._mtimedb
10818 mtimedb["resume"]["mergelist"].remove(list(pkg))
10819 if not mtimedb["resume"]["mergelist"]:
10820 del mtimedb["resume"]
10823 def _build_exit(self, build):
10824 if build.returncode == os.EX_OK:
10826 merge = PackageMerge(merge=build)
10827 if not build.build_opts.buildpkgonly and \
10828 build.pkg in self._deep_system_deps:
10829 # Since dependencies on system packages are frequently
10830 # unspecified, merge them only when no builds are executing.
10831 self._merge_wait_queue.append(merge)
10833 merge.addExitListener(self._merge_exit)
10834 self._task_queues.merge.add(merge)
10835 self._status_display.merges = len(self._task_queues.merge)
10837 settings = build.settings
10838 build_dir = settings.get("PORTAGE_BUILDDIR")
10839 build_log = settings.get("PORTAGE_LOG_FILE")
10841 self._failed_pkgs.append(self._failed_pkg(
10842 build_dir=build_dir, build_log=build_log,
10844 returncode=build.returncode))
10845 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10847 self._status_display.failed = len(self._failed_pkgs)
10848 self._deallocate_config(build.settings)
10850 self._status_display.running = self._jobs
10853 def _extract_exit(self, build):
10854 self._build_exit(build)
10856 def _task_complete(self, pkg):
10857 self._completed_tasks.add(pkg)
10858 self._choose_pkg_return_early = False
10862 self._add_prefetchers()
10863 self._add_packages()
10864 pkg_queue = self._pkg_queue
10865 failed_pkgs = self._failed_pkgs
10866 portage.locks._quiet = self._background
10867 portage.elog._emerge_elog_listener = self._elog_listener
10873 self._main_loop_cleanup()
10874 portage.locks._quiet = False
10875 portage.elog._emerge_elog_listener = None
10877 rval = failed_pkgs[-1].returncode
10881 def _main_loop_cleanup(self):
10882 del self._pkg_queue[:]
10883 self._completed_tasks.clear()
10884 self._deep_system_deps.clear()
10885 self._choose_pkg_return_early = False
10886 self._status_display.reset()
10887 self._digraph = None
10888 self._task_queues.fetch.clear()
10890 def _choose_pkg(self):
10892 Choose a task that has all it's dependencies satisfied.
10895 if self._choose_pkg_return_early:
10898 if self._digraph is None:
10899 if (self._jobs or self._task_queues.merge) and \
10900 not ("--nodeps" in self.myopts and \
10901 (self._max_jobs is True or self._max_jobs > 1)):
10902 self._choose_pkg_return_early = True
10904 return self._pkg_queue.pop(0)
10906 if not (self._jobs or self._task_queues.merge):
10907 return self._pkg_queue.pop(0)
10909 self._prune_digraph()
10912 later = set(self._pkg_queue)
10913 for pkg in self._pkg_queue:
10915 if not self._dependent_on_scheduled_merges(pkg, later):
10919 if chosen_pkg is not None:
10920 self._pkg_queue.remove(chosen_pkg)
10922 if chosen_pkg is None:
10923 # There's no point in searching for a package to
10924 # choose until at least one of the existing jobs
10926 self._choose_pkg_return_early = True
10930 def _dependent_on_scheduled_merges(self, pkg, later):
10932 Traverse the subgraph of the given packages deep dependencies
10933 to see if it contains any scheduled merges.
10934 @param pkg: a package to check dependencies for
10936 @param later: packages for which dependence should be ignored
10937 since they will be merged later than pkg anyway and therefore
10938 delaying the merge of pkg will not result in a more optimal
10942 @returns: True if the package is dependent, False otherwise.
10945 graph = self._digraph
10946 completed_tasks = self._completed_tasks
10949 traversed_nodes = set([pkg])
10950 direct_deps = graph.child_nodes(pkg)
10951 node_stack = direct_deps
10952 direct_deps = frozenset(direct_deps)
10954 node = node_stack.pop()
10955 if node in traversed_nodes:
10957 traversed_nodes.add(node)
10958 if not ((node.installed and node.operation == "nomerge") or \
10959 (node.operation == "uninstall" and \
10960 node not in direct_deps) or \
10961 node in completed_tasks or \
10965 node_stack.extend(graph.child_nodes(node))
10969 def _allocate_config(self, root):
10971 Allocate a unique config instance for a task in order
10972 to prevent interference between parallel tasks.
10974 if self._config_pool[root]:
10975 temp_settings = self._config_pool[root].pop()
10977 temp_settings = portage.config(clone=self.pkgsettings[root])
10978 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10979 # performance reasons, call it here to make sure all settings from the
10980 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10981 temp_settings.reload()
10982 temp_settings.reset()
10983 return temp_settings
10985 def _deallocate_config(self, settings):
10986 self._config_pool[settings["ROOT"]].append(settings)
10988 def _main_loop(self):
10990 # Only allow 1 job max if a restart is scheduled
10991 # due to portage update.
10992 if self._is_restart_scheduled() or \
10993 self._opts_no_background.intersection(self.myopts):
10994 self._set_max_jobs(1)
10996 merge_queue = self._task_queues.merge
10998 while self._schedule():
10999 if self._poll_event_handlers:
11004 if not (self._jobs or merge_queue):
11006 if self._poll_event_handlers:
11009 def _keep_scheduling(self):
11010 return bool(self._pkg_queue and \
11011 not (self._failed_pkgs and not self._build_opts.fetchonly))
11013 def _schedule_tasks(self):
11015 # When the number of jobs drops to zero, process all waiting merges.
11016 if not self._jobs and self._merge_wait_queue:
11017 for task in self._merge_wait_queue:
11018 task.addExitListener(self._merge_wait_exit_handler)
11019 self._task_queues.merge.add(task)
11020 self._status_display.merges = len(self._task_queues.merge)
11021 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11022 del self._merge_wait_queue[:]
11024 self._schedule_tasks_imp()
11025 self._status_display.display()
11028 for q in self._task_queues.values():
11032 # Cancel prefetchers if they're the only reason
11033 # the main poll loop is still running.
11034 if self._failed_pkgs and not self._build_opts.fetchonly and \
11035 not (self._jobs or self._task_queues.merge) and \
11036 self._task_queues.fetch:
11037 self._task_queues.fetch.clear()
11041 self._schedule_tasks_imp()
11042 self._status_display.display()
11044 return self._keep_scheduling()
11046 def _job_delay(self):
11049 @returns: True if job scheduling should be delayed, False otherwise.
11052 if self._jobs and self._max_load is not None:
11054 current_time = time.time()
11056 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11057 if delay > self._job_delay_max:
11058 delay = self._job_delay_max
11059 if (current_time - self._previous_job_start_time) < delay:
11064 def _schedule_tasks_imp(self):
11067 @returns: True if state changed, False otherwise.
11074 if not self._keep_scheduling():
11075 return bool(state_change)
11077 if self._choose_pkg_return_early or \
11078 self._merge_wait_scheduled or \
11079 not self._can_add_job() or \
11081 return bool(state_change)
11083 pkg = self._choose_pkg()
11085 return bool(state_change)
11089 if not pkg.installed:
11090 self._pkg_count.curval += 1
11092 task = self._task(pkg)
11095 merge = PackageMerge(merge=task)
11096 merge.addExitListener(self._merge_exit)
11097 self._task_queues.merge.add(merge)
11101 self._previous_job_start_time = time.time()
11102 self._status_display.running = self._jobs
11103 task.addExitListener(self._extract_exit)
11104 self._task_queues.jobs.add(task)
11108 self._previous_job_start_time = time.time()
11109 self._status_display.running = self._jobs
11110 task.addExitListener(self._build_exit)
11111 self._task_queues.jobs.add(task)
11113 return bool(state_change)
11115 def _task(self, pkg):
11117 pkg_to_replace = None
11118 if pkg.operation != "uninstall":
11119 vardb = pkg.root_config.trees["vartree"].dbapi
11120 previous_cpv = vardb.match(pkg.slot_atom)
11122 previous_cpv = previous_cpv.pop()
11123 pkg_to_replace = self._pkg(previous_cpv,
11124 "installed", pkg.root_config, installed=True)
11126 task = MergeListItem(args_set=self._args_set,
11127 background=self._background, binpkg_opts=self._binpkg_opts,
11128 build_opts=self._build_opts,
11129 config_pool=self._ConfigPool(pkg.root,
11130 self._allocate_config, self._deallocate_config),
11131 emerge_opts=self.myopts,
11132 find_blockers=self._find_blockers(pkg), logger=self._logger,
11133 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11134 pkg_to_replace=pkg_to_replace,
11135 prefetcher=self._prefetchers.get(pkg),
11136 scheduler=self._sched_iface,
11137 settings=self._allocate_config(pkg.root),
11138 statusMessage=self._status_msg,
11139 world_atom=self._world_atom)
11143 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11144 pkg = failed_pkg.pkg
11145 msg = "%s to %s %s" % \
11146 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11147 if pkg.root != "/":
11148 msg += " %s %s" % (preposition, pkg.root)
11150 log_path = self._locate_failure_log(failed_pkg)
11151 if log_path is not None:
11152 msg += ", Log file:"
11153 self._status_msg(msg)
11155 if log_path is not None:
11156 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11158 def _status_msg(self, msg):
11160 Display a brief status message (no newlines) in the status display.
11161 This is called by tasks to provide feedback to the user. This
11162 delegates the resposibility of generating \r and \n control characters,
11163 to guarantee that lines are created or erased when necessary and
11167 @param msg: a brief status message (no newlines allowed)
11169 if not self._background:
11170 writemsg_level("\n")
11171 self._status_display.displayMessage(msg)
11173 def _save_resume_list(self):
11175 Do this before verifying the ebuild Manifests since it might
11176 be possible for the user to use --resume --skipfirst get past
11177 a non-essential package with a broken digest.
11179 mtimedb = self._mtimedb
11180 mtimedb["resume"]["mergelist"] = [list(x) \
11181 for x in self._mergelist \
11182 if isinstance(x, Package) and x.operation == "merge"]
11186 def _calc_resume_list(self):
11188 Use the current resume list to calculate a new one,
11189 dropping any packages with unsatisfied deps.
11191 @returns: True if successful, False otherwise.
11193 print colorize("GOOD", "*** Resuming merge...")
11195 if self._show_list():
11196 if "--tree" in self.myopts:
11197 portage.writemsg_stdout("\n" + \
11198 darkgreen("These are the packages that " + \
11199 "would be merged, in reverse order:\n\n"))
11202 portage.writemsg_stdout("\n" + \
11203 darkgreen("These are the packages that " + \
11204 "would be merged, in order:\n\n"))
11206 show_spinner = "--quiet" not in self.myopts and \
11207 "--nodeps" not in self.myopts
11210 print "Calculating dependencies ",
11212 myparams = create_depgraph_params(self.myopts, None)
11216 success, mydepgraph, dropped_tasks = resume_depgraph(
11217 self.settings, self.trees, self._mtimedb, self.myopts,
11218 myparams, self._spinner)
11219 except depgraph.UnsatisfiedResumeDep, exc:
11220 # rename variable to avoid python-3.0 error:
11221 # SyntaxError: can not delete variable 'e' referenced in nested
11224 mydepgraph = e.depgraph
11225 dropped_tasks = set()
11228 print "\b\b... done!"
11231 def unsatisfied_resume_dep_msg():
11232 mydepgraph.display_problems()
11233 out = portage.output.EOutput()
11234 out.eerror("One or more packages are either masked or " + \
11235 "have missing dependencies:")
11238 show_parents = set()
11239 for dep in e.value:
11240 if dep.parent in show_parents:
11242 show_parents.add(dep.parent)
11243 if dep.atom is None:
11244 out.eerror(indent + "Masked package:")
11245 out.eerror(2 * indent + str(dep.parent))
11248 out.eerror(indent + str(dep.atom) + " pulled in by:")
11249 out.eerror(2 * indent + str(dep.parent))
11251 msg = "The resume list contains packages " + \
11252 "that are either masked or have " + \
11253 "unsatisfied dependencies. " + \
11254 "Please restart/continue " + \
11255 "the operation manually, or use --skipfirst " + \
11256 "to skip the first package in the list and " + \
11257 "any other packages that may be " + \
11258 "masked or have missing dependencies."
11259 for line in textwrap.wrap(msg, 72):
11261 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11264 if success and self._show_list():
11265 mylist = mydepgraph.altlist()
11267 if "--tree" in self.myopts:
11269 mydepgraph.display(mylist, favorites=self._favorites)
11272 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11274 mydepgraph.display_problems()
11276 mylist = mydepgraph.altlist()
11277 mydepgraph.break_refs(mylist)
11278 mydepgraph.break_refs(dropped_tasks)
11279 self._mergelist = mylist
11280 self._set_digraph(mydepgraph.schedulerGraph())
11283 for task in dropped_tasks:
11284 if not (isinstance(task, Package) and task.operation == "merge"):
11287 msg = "emerge --keep-going:" + \
11289 if pkg.root != "/":
11290 msg += " for %s" % (pkg.root,)
11291 msg += " dropped due to unsatisfied dependency."
11292 for line in textwrap.wrap(msg, msg_width):
11293 eerror(line, phase="other", key=pkg.cpv)
11294 settings = self.pkgsettings[pkg.root]
11295 # Ensure that log collection from $T is disabled inside
11296 # elog_process(), since any logs that might exist are
11298 settings.pop("T", None)
11299 portage.elog.elog_process(pkg.cpv, settings)
11300 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11304 def _show_list(self):
11305 myopts = self.myopts
11306 if "--quiet" not in myopts and \
11307 ("--ask" in myopts or "--tree" in myopts or \
11308 "--verbose" in myopts):
11312 def _world_atom(self, pkg):
11314 Add the package to the world file, but only if
11315 it's supposed to be added. Otherwise, do nothing.
11318 if set(("--buildpkgonly", "--fetchonly",
11320 "--oneshot", "--onlydeps",
11321 "--pretend")).intersection(self.myopts):
11324 if pkg.root != self.target_root:
11327 args_set = self._args_set
11328 if not args_set.findAtomForPackage(pkg):
11331 logger = self._logger
11332 pkg_count = self._pkg_count
11333 root_config = pkg.root_config
11334 world_set = root_config.sets["world"]
11335 world_locked = False
11336 if hasattr(world_set, "lock"):
11338 world_locked = True
11341 if hasattr(world_set, "load"):
11342 world_set.load() # maybe it's changed on disk
11344 atom = create_world_atom(pkg, args_set, root_config)
11346 if hasattr(world_set, "add"):
11347 self._status_msg(('Recording %s in "world" ' + \
11348 'favorites file...') % atom)
11349 logger.log(" === (%s of %s) Updating world file (%s)" % \
11350 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11351 world_set.add(atom)
11353 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11354 (atom,), level=logging.WARN, noiselevel=-1)
11359 def _pkg(self, cpv, type_name, root_config, installed=False):
11361 Get a package instance from the cache, or create a new
11362 one if necessary. Raises KeyError from aux_get if it
11363 failures for some reason (package does not exist or is
11366 operation = "merge"
11368 operation = "nomerge"
11370 if self._digraph is not None:
11371 # Reuse existing instance when available.
11372 pkg = self._digraph.get(
11373 (type_name, root_config.root, cpv, operation))
11374 if pkg is not None:
11377 tree_type = depgraph.pkg_tree_map[type_name]
11378 db = root_config.trees[tree_type].dbapi
11379 db_keys = list(self.trees[root_config.root][
11380 tree_type].dbapi._aux_cache_keys)
11381 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11382 pkg = Package(cpv=cpv, metadata=metadata,
11383 root_config=root_config, installed=installed)
11384 if type_name == "ebuild":
11385 settings = self.pkgsettings[root_config.root]
11386 settings.setcpv(pkg)
11387 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11391 class MetadataRegen(PollScheduler):
11393 def __init__(self, portdb, max_jobs=None, max_load=None):
11394 PollScheduler.__init__(self)
11395 self._portdb = portdb
11397 if max_jobs is None:
11400 self._max_jobs = max_jobs
11401 self._max_load = max_load
11402 self._sched_iface = self._sched_iface_class(
11403 register=self._register,
11404 schedule=self._schedule_wait,
11405 unregister=self._unregister)
11407 self._valid_pkgs = set()
11408 self._process_iter = self._iter_metadata_processes()
11409 self.returncode = os.EX_OK
11410 self._error_count = 0
11412 def _iter_metadata_processes(self):
11413 portdb = self._portdb
11414 valid_pkgs = self._valid_pkgs
11415 every_cp = portdb.cp_all()
11416 every_cp.sort(reverse=True)
11419 cp = every_cp.pop()
11420 portage.writemsg_stdout("Processing %s\n" % cp)
11421 cpv_list = portdb.cp_list(cp)
11422 for cpv in cpv_list:
11423 valid_pkgs.add(cpv)
11424 ebuild_path, repo_path = portdb.findname2(cpv)
11425 metadata_process = portdb._metadata_process(
11426 cpv, ebuild_path, repo_path)
11427 if metadata_process is None:
11429 yield metadata_process
11433 portdb = self._portdb
11434 from portage.cache.cache_errors import CacheError
11437 for mytree in portdb.porttrees:
11439 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11440 except CacheError, e:
11441 portage.writemsg("Error listing cache entries for " + \
11442 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11447 while self._schedule():
11454 for y in self._valid_pkgs:
11455 for mytree in portdb.porttrees:
11456 if portdb.findname2(y, mytree=mytree)[0]:
11457 dead_nodes[mytree].discard(y)
11459 for mytree, nodes in dead_nodes.iteritems():
11460 auxdb = portdb.auxdb[mytree]
11464 except (KeyError, CacheError):
11467 def _schedule_tasks(self):
11470 @returns: True if there may be remaining tasks to schedule,
11473 while self._can_add_job():
11475 metadata_process = self._process_iter.next()
11476 except StopIteration:
11480 metadata_process.scheduler = self._sched_iface
11481 metadata_process.addExitListener(self._metadata_exit)
11482 metadata_process.start()
11485 def _metadata_exit(self, metadata_process):
11487 if metadata_process.returncode != os.EX_OK:
11488 self.returncode = 1
11489 self._error_count += 1
11490 self._valid_pkgs.discard(metadata_process.cpv)
11491 portage.writemsg("Error processing %s, continuing...\n" % \
11492 (metadata_process.cpv,))
11495 class UninstallFailure(portage.exception.PortageException):
11497 An instance of this class is raised by unmerge() when
11498 an uninstallation fails.
11501 def __init__(self, *pargs):
11502 portage.exception.PortageException.__init__(self, pargs)
11504 self.status = pargs[0]
11506 def unmerge(root_config, myopts, unmerge_action,
11507 unmerge_files, ldpath_mtimes, autoclean=0,
11508 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11509 scheduler=None, writemsg_level=portage.util.writemsg_level):
11511 quiet = "--quiet" in myopts
11512 settings = root_config.settings
11513 sets = root_config.sets
11514 vartree = root_config.trees["vartree"]
11515 candidate_catpkgs=[]
11517 xterm_titles = "notitles" not in settings.features
11518 out = portage.output.EOutput()
11520 db_keys = list(vartree.dbapi._aux_cache_keys)
11523 pkg = pkg_cache.get(cpv)
11525 pkg = Package(cpv=cpv, installed=True,
11526 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11527 root_config=root_config,
11528 type_name="installed")
11529 pkg_cache[cpv] = pkg
11532 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11534 # At least the parent needs to exist for the lock file.
11535 portage.util.ensure_dirs(vdb_path)
11536 except portage.exception.PortageException:
11540 if os.access(vdb_path, os.W_OK):
11541 vdb_lock = portage.locks.lockdir(vdb_path)
11542 realsyslist = sets["system"].getAtoms()
11544 for x in realsyslist:
11545 mycp = portage.dep_getkey(x)
11546 if mycp in settings.getvirtuals():
11548 for provider in settings.getvirtuals()[mycp]:
11549 if vartree.dbapi.match(provider):
11550 providers.append(provider)
11551 if len(providers) == 1:
11552 syslist.extend(providers)
11554 syslist.append(mycp)
11556 mysettings = portage.config(clone=settings)
11558 if not unmerge_files:
11559 if unmerge_action == "unmerge":
11561 print bold("emerge unmerge") + " can only be used with specific package names"
11567 localtree = vartree
11568 # process all arguments and add all
11569 # valid db entries to candidate_catpkgs
11571 if not unmerge_files:
11572 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11574 #we've got command-line arguments
11575 if not unmerge_files:
11576 print "\nNo packages to unmerge have been provided.\n"
11578 for x in unmerge_files:
11579 arg_parts = x.split('/')
11580 if x[0] not in [".","/"] and \
11581 arg_parts[-1][-7:] != ".ebuild":
11582 #possible cat/pkg or dep; treat as such
11583 candidate_catpkgs.append(x)
11584 elif unmerge_action in ["prune","clean"]:
11585 print "\n!!! Prune and clean do not accept individual" + \
11586 " ebuilds as arguments;\n skipping.\n"
11589 # it appears that the user is specifying an installed
11590 # ebuild and we're in "unmerge" mode, so it's ok.
11591 if not os.path.exists(x):
11592 print "\n!!! The path '"+x+"' doesn't exist.\n"
11595 absx = os.path.abspath(x)
11596 sp_absx = absx.split("/")
11597 if sp_absx[-1][-7:] == ".ebuild":
11599 absx = "/".join(sp_absx)
11601 sp_absx_len = len(sp_absx)
11603 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11604 vdb_len = len(vdb_path)
11606 sp_vdb = vdb_path.split("/")
11607 sp_vdb_len = len(sp_vdb)
11609 if not os.path.exists(absx+"/CONTENTS"):
11610 print "!!! Not a valid db dir: "+str(absx)
11613 if sp_absx_len <= sp_vdb_len:
11614 # The Path is shorter... so it can't be inside the vdb.
11617 print "\n!!!",x,"cannot be inside "+ \
11618 vdb_path+"; aborting.\n"
11621 for idx in range(0,sp_vdb_len):
11622 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11625 print "\n!!!", x, "is not inside "+\
11626 vdb_path+"; aborting.\n"
11629 print "="+"/".join(sp_absx[sp_vdb_len:])
11630 candidate_catpkgs.append(
11631 "="+"/".join(sp_absx[sp_vdb_len:]))
11634 if (not "--quiet" in myopts):
11636 if settings["ROOT"] != "/":
11637 writemsg_level(darkgreen(newline+ \
11638 ">>> Using system located in ROOT tree %s\n" % \
11641 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11642 not ("--quiet" in myopts):
11643 writemsg_level(darkgreen(newline+\
11644 ">>> These are the packages that would be unmerged:\n"))
11646 # Preservation of order is required for --depclean and --prune so
11647 # that dependencies are respected. Use all_selected to eliminate
11648 # duplicate packages since the same package may be selected by
11651 all_selected = set()
11652 for x in candidate_catpkgs:
11653 # cycle through all our candidate deps and determine
11654 # what will and will not get unmerged
11656 mymatch = vartree.dbapi.match(x)
11657 except portage.exception.AmbiguousPackageName, errpkgs:
11658 print "\n\n!!! The short ebuild name \"" + \
11659 x + "\" is ambiguous. Please specify"
11660 print "!!! one of the following fully-qualified " + \
11661 "ebuild names instead:\n"
11662 for i in errpkgs[0]:
11663 print " " + green(i)
11667 if not mymatch and x[0] not in "<>=~":
11668 mymatch = localtree.dep_match(x)
11670 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11671 (x, unmerge_action), noiselevel=-1)
11675 {"protected": set(), "selected": set(), "omitted": set()})
11676 mykey = len(pkgmap) - 1
11677 if unmerge_action=="unmerge":
11679 if y not in all_selected:
11680 pkgmap[mykey]["selected"].add(y)
11681 all_selected.add(y)
11682 elif unmerge_action == "prune":
11683 if len(mymatch) == 1:
11685 best_version = mymatch[0]
11686 best_slot = vartree.getslot(best_version)
11687 best_counter = vartree.dbapi.cpv_counter(best_version)
11688 for mypkg in mymatch[1:]:
11689 myslot = vartree.getslot(mypkg)
11690 mycounter = vartree.dbapi.cpv_counter(mypkg)
11691 if (myslot == best_slot and mycounter > best_counter) or \
11692 mypkg == portage.best([mypkg, best_version]):
11693 if myslot == best_slot:
11694 if mycounter < best_counter:
11695 # On slot collision, keep the one with the
11696 # highest counter since it is the most
11697 # recently installed.
11699 best_version = mypkg
11701 best_counter = mycounter
11702 pkgmap[mykey]["protected"].add(best_version)
11703 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11704 if mypkg != best_version and mypkg not in all_selected)
11705 all_selected.update(pkgmap[mykey]["selected"])
11707 # unmerge_action == "clean"
11709 for mypkg in mymatch:
11710 if unmerge_action == "clean":
11711 myslot = localtree.getslot(mypkg)
11713 # since we're pruning, we don't care about slots
11714 # and put all the pkgs in together
11716 if myslot not in slotmap:
11717 slotmap[myslot] = {}
11718 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11720 for mypkg in vartree.dbapi.cp_list(
11721 portage.dep_getkey(mymatch[0])):
11722 myslot = vartree.getslot(mypkg)
11723 if myslot not in slotmap:
11724 slotmap[myslot] = {}
11725 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11727 for myslot in slotmap:
11728 counterkeys = slotmap[myslot].keys()
11729 if not counterkeys:
11732 pkgmap[mykey]["protected"].add(
11733 slotmap[myslot][counterkeys[-1]])
11734 del counterkeys[-1]
11736 for counter in counterkeys[:]:
11737 mypkg = slotmap[myslot][counter]
11738 if mypkg not in mymatch:
11739 counterkeys.remove(counter)
11740 pkgmap[mykey]["protected"].add(
11741 slotmap[myslot][counter])
11743 #be pretty and get them in order of merge:
11744 for ckey in counterkeys:
11745 mypkg = slotmap[myslot][ckey]
11746 if mypkg not in all_selected:
11747 pkgmap[mykey]["selected"].add(mypkg)
11748 all_selected.add(mypkg)
11749 # ok, now the last-merged package
11750 # is protected, and the rest are selected
11751 numselected = len(all_selected)
11752 if global_unmerge and not numselected:
11753 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11756 if not numselected:
11757 portage.writemsg_stdout(
11758 "\n>>> No packages selected for removal by " + \
11759 unmerge_action + "\n")
11763 vartree.dbapi.flush_cache()
11764 portage.locks.unlockdir(vdb_lock)
11766 from portage.sets.base import EditablePackageSet
11768 # generate a list of package sets that are directly or indirectly listed in "world",
11769 # as there is no persistent list of "installed" sets
11770 installed_sets = ["world"]
11775 pos = len(installed_sets)
11776 for s in installed_sets[pos - 1:]:
11779 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11782 installed_sets += candidates
11783 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11786 # we don't want to unmerge packages that are still listed in user-editable package sets
11787 # listed in "world" as they would be remerged on the next update of "world" or the
11788 # relevant package sets.
11789 unknown_sets = set()
11790 for cp in xrange(len(pkgmap)):
11791 for cpv in pkgmap[cp]["selected"].copy():
11795 # It could have been uninstalled
11796 # by a concurrent process.
11799 if unmerge_action != "clean" and \
11800 root_config.root == "/" and \
11801 portage.match_from_list(
11802 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11803 msg = ("Not unmerging package %s since there is no valid " + \
11804 "reason for portage to unmerge itself.") % (pkg.cpv,)
11805 for line in textwrap.wrap(msg, 75):
11807 # adjust pkgmap so the display output is correct
11808 pkgmap[cp]["selected"].remove(cpv)
11809 all_selected.remove(cpv)
11810 pkgmap[cp]["protected"].add(cpv)
11814 for s in installed_sets:
11815 # skip sets that the user requested to unmerge, and skip world
11816 # unless we're unmerging a package set (as the package would be
11817 # removed from "world" later on)
11818 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11822 if s in unknown_sets:
11824 unknown_sets.add(s)
11825 out = portage.output.EOutput()
11826 out.eerror(("Unknown set '@%s' in " + \
11827 "%svar/lib/portage/world_sets") % \
11828 (s, root_config.root))
11831 # only check instances of EditablePackageSet as other classes are generally used for
11832 # special purposes and can be ignored here (and are usually generated dynamically, so the
11833 # user can't do much about them anyway)
11834 if isinstance(sets[s], EditablePackageSet):
11836 # This is derived from a snippet of code in the
11837 # depgraph._iter_atoms_for_pkg() method.
11838 for atom in sets[s].iterAtomsForPackage(pkg):
11839 inst_matches = vartree.dbapi.match(atom)
11840 inst_matches.reverse() # descending order
11842 for inst_cpv in inst_matches:
11844 inst_pkg = _pkg(inst_cpv)
11846 # It could have been uninstalled
11847 # by a concurrent process.
11850 if inst_pkg.cp != atom.cp:
11852 if pkg >= inst_pkg:
11853 # This is descending order, and we're not
11854 # interested in any versions <= pkg given.
11856 if pkg.slot_atom != inst_pkg.slot_atom:
11857 higher_slot = inst_pkg
11859 if higher_slot is None:
11863 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11864 #print colorize("WARN", "but still listed in the following package sets:")
11865 #print " %s\n" % ", ".join(parents)
11866 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11867 print colorize("WARN", "still referenced by the following package sets:")
11868 print " %s\n" % ", ".join(parents)
11869 # adjust pkgmap so the display output is correct
11870 pkgmap[cp]["selected"].remove(cpv)
11871 all_selected.remove(cpv)
11872 pkgmap[cp]["protected"].add(cpv)
11876 numselected = len(all_selected)
11877 if not numselected:
11879 "\n>>> No packages selected for removal by " + \
11880 unmerge_action + "\n")
11883 # Unmerge order only matters in some cases
11887 selected = d["selected"]
11890 cp = portage.cpv_getkey(iter(selected).next())
11891 cp_dict = unordered.get(cp)
11892 if cp_dict is None:
11894 unordered[cp] = cp_dict
11897 for k, v in d.iteritems():
11898 cp_dict[k].update(v)
11899 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11901 for x in xrange(len(pkgmap)):
11902 selected = pkgmap[x]["selected"]
11905 for mytype, mylist in pkgmap[x].iteritems():
11906 if mytype == "selected":
11908 mylist.difference_update(all_selected)
11909 cp = portage.cpv_getkey(iter(selected).next())
11910 for y in localtree.dep_match(cp):
11911 if y not in pkgmap[x]["omitted"] and \
11912 y not in pkgmap[x]["selected"] and \
11913 y not in pkgmap[x]["protected"] and \
11914 y not in all_selected:
11915 pkgmap[x]["omitted"].add(y)
11916 if global_unmerge and not pkgmap[x]["selected"]:
11917 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11919 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11920 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11921 "'%s' is part of your system profile.\n" % cp),
11922 level=logging.WARNING, noiselevel=-1)
11923 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11924 "be damaging to your system.\n\n"),
11925 level=logging.WARNING, noiselevel=-1)
11926 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11927 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11928 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11930 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11932 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11933 for mytype in ["selected","protected","omitted"]:
11935 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11936 if pkgmap[x][mytype]:
11937 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11938 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
11939 for pn, ver, rev in sorted_pkgs:
11943 myversion = ver + "-" + rev
11944 if mytype == "selected":
11946 colorize("UNMERGE_WARN", myversion + " "),
11950 colorize("GOOD", myversion + " "), noiselevel=-1)
11952 writemsg_level("none ", noiselevel=-1)
11954 writemsg_level("\n", noiselevel=-1)
11956 writemsg_level("\n", noiselevel=-1)
11958 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11959 " packages are slated for removal.\n")
11960 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11961 " and " + colorize("GOOD", "'omitted'") + \
11962 " packages will not be removed.\n\n")
11964 if "--pretend" in myopts:
11965 #we're done... return
11967 if "--ask" in myopts:
11968 if userquery("Would you like to unmerge these packages?")=="No":
11969 # enter pretend mode for correct formatting of results
11970 myopts["--pretend"] = True
11975 #the real unmerging begins, after a short delay....
11976 if clean_delay and not autoclean:
11977 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11979 for x in xrange(len(pkgmap)):
11980 for y in pkgmap[x]["selected"]:
11981 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11982 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11983 mysplit = y.split("/")
11985 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11986 mysettings, unmerge_action not in ["clean","prune"],
11987 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11988 scheduler=scheduler)
11990 if retval != os.EX_OK:
11991 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11993 raise UninstallFailure(retval)
11996 if clean_world and hasattr(sets["world"], "cleanPackage"):
11997 sets["world"].cleanPackage(vartree.dbapi, y)
11998 emergelog(xterm_titles, " >>> unmerge success: "+y)
11999 if clean_world and hasattr(sets["world"], "remove"):
12000 for s in root_config.setconfig.active:
12001 sets["world"].remove(SETPREFIX+s)
12004 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12006 if os.path.exists("/usr/bin/install-info"):
12007 out = portage.output.EOutput()
12012 inforoot=normpath(root+z)
12013 if os.path.isdir(inforoot):
12014 infomtime = long(os.stat(inforoot).st_mtime)
12015 if inforoot not in prev_mtimes or \
12016 prev_mtimes[inforoot] != infomtime:
12017 regen_infodirs.append(inforoot)
12019 if not regen_infodirs:
12020 portage.writemsg_stdout("\n")
12021 out.einfo("GNU info directory index is up-to-date.")
12023 portage.writemsg_stdout("\n")
12024 out.einfo("Regenerating GNU info directory index...")
12026 dir_extensions = ("", ".gz", ".bz2")
12030 for inforoot in regen_infodirs:
12034 if not os.path.isdir(inforoot) or \
12035 not os.access(inforoot, os.W_OK):
12038 file_list = os.listdir(inforoot)
12040 dir_file = os.path.join(inforoot, "dir")
12041 moved_old_dir = False
12042 processed_count = 0
12043 for x in file_list:
12044 if x.startswith(".") or \
12045 os.path.isdir(os.path.join(inforoot, x)):
12047 if x.startswith("dir"):
12049 for ext in dir_extensions:
12050 if x == "dir" + ext or \
12051 x == "dir" + ext + ".old":
12056 if processed_count == 0:
12057 for ext in dir_extensions:
12059 os.rename(dir_file + ext, dir_file + ext + ".old")
12060 moved_old_dir = True
12061 except EnvironmentError, e:
12062 if e.errno != errno.ENOENT:
12065 processed_count += 1
12066 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12067 existsstr="already exists, for file `"
12069 if re.search(existsstr,myso):
12070 # Already exists... Don't increment the count for this.
12072 elif myso[:44]=="install-info: warning: no info dir entry in ":
12073 # This info file doesn't contain a DIR-header: install-info produces this
12074 # (harmless) warning (the --quiet switch doesn't seem to work).
12075 # Don't increment the count for this.
12078 badcount=badcount+1
12079 errmsg += myso + "\n"
12082 if moved_old_dir and not os.path.exists(dir_file):
12083 # We didn't generate a new dir file, so put the old file
12084 # back where it was originally found.
12085 for ext in dir_extensions:
12087 os.rename(dir_file + ext + ".old", dir_file + ext)
12088 except EnvironmentError, e:
12089 if e.errno != errno.ENOENT:
12093 # Clean dir.old cruft so that they don't prevent
12094 # unmerge of otherwise empty directories.
12095 for ext in dir_extensions:
12097 os.unlink(dir_file + ext + ".old")
12098 except EnvironmentError, e:
12099 if e.errno != errno.ENOENT:
12103 #update mtime so we can potentially avoid regenerating.
12104 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12107 out.eerror("Processed %d info files; %d errors." % \
12108 (icount, badcount))
12109 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12112 out.einfo("Processed %d info files." % (icount,))
12115 def display_news_notification(root_config, myopts):
12116 target_root = root_config.root
12117 trees = root_config.trees
12118 settings = trees["vartree"].settings
12119 portdb = trees["porttree"].dbapi
12120 vardb = trees["vartree"].dbapi
12121 NEWS_PATH = os.path.join("metadata", "news")
12122 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12123 newsReaderDisplay = False
12124 update = "--pretend" not in myopts
12126 for repo in portdb.getRepositories():
12127 unreadItems = checkUpdatedNewsItems(
12128 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12130 if not newsReaderDisplay:
12131 newsReaderDisplay = True
12133 print colorize("WARN", " * IMPORTANT:"),
12134 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12137 if newsReaderDisplay:
12138 print colorize("WARN", " *"),
12139 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12142 def display_preserved_libs(vardbapi):
12145 # Ensure the registry is consistent with existing files.
12146 vardbapi.plib_registry.pruneNonExisting()
12148 if vardbapi.plib_registry.hasEntries():
12150 print colorize("WARN", "!!!") + " existing preserved libs:"
12151 plibdata = vardbapi.plib_registry.getPreservedLibs()
12152 linkmap = vardbapi.linkmap
12155 linkmap_broken = False
12159 except portage.exception.CommandNotFound, e:
12160 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12161 level=logging.ERROR, noiselevel=-1)
12163 linkmap_broken = True
12165 search_for_owners = set()
12166 for cpv in plibdata:
12167 internal_plib_keys = set(linkmap._obj_key(f) \
12168 for f in plibdata[cpv])
12169 for f in plibdata[cpv]:
12170 if f in consumer_map:
12173 for c in linkmap.findConsumers(f):
12174 # Filter out any consumers that are also preserved libs
12175 # belonging to the same package as the provider.
12176 if linkmap._obj_key(c) not in internal_plib_keys:
12177 consumers.append(c)
12179 consumer_map[f] = consumers
12180 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12182 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12184 for cpv in plibdata:
12185 print colorize("WARN", ">>>") + " package: %s" % cpv
12187 for f in plibdata[cpv]:
12188 obj_key = linkmap._obj_key(f)
12189 alt_paths = samefile_map.get(obj_key)
12190 if alt_paths is None:
12192 samefile_map[obj_key] = alt_paths
12195 for alt_paths in samefile_map.itervalues():
12196 alt_paths = sorted(alt_paths)
12197 for p in alt_paths:
12198 print colorize("WARN", " * ") + " - %s" % (p,)
12200 consumers = consumer_map.get(f, [])
12201 for c in consumers[:MAX_DISPLAY]:
12202 print colorize("WARN", " * ") + " used by %s (%s)" % \
12203 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12204 if len(consumers) == MAX_DISPLAY + 1:
12205 print colorize("WARN", " * ") + " used by %s (%s)" % \
12206 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12207 for x in owners.get(consumers[MAX_DISPLAY], [])))
12208 elif len(consumers) > MAX_DISPLAY:
12209 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12210 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12213 def _flush_elog_mod_echo():
12215 Dump the mod_echo output now so that our other
12216 notifications are shown last.
12218 @returns: True if messages were shown, False otherwise.
12220 messages_shown = False
12222 from portage.elog import mod_echo
12223 except ImportError:
12224 pass # happens during downgrade to a version without the module
12226 messages_shown = bool(mod_echo._items)
12227 mod_echo.finalize()
12228 return messages_shown
12230 def post_emerge(root_config, myopts, mtimedb, retval):
12232 Misc. things to run at the end of a merge session.
12235 Update Config Files
12238 Display preserved libs warnings
12241 @param trees: A dictionary mapping each ROOT to it's package databases
12243 @param mtimedb: The mtimeDB to store data needed across merge invocations
12244 @type mtimedb: MtimeDB class instance
12245 @param retval: Emerge's return value
12249 1. Calls sys.exit(retval)
12252 target_root = root_config.root
12253 trees = { target_root : root_config.trees }
12254 vardbapi = trees[target_root]["vartree"].dbapi
12255 settings = vardbapi.settings
12256 info_mtimes = mtimedb["info"]
12258 # Load the most current variables from ${ROOT}/etc/profile.env
12261 settings.regenerate()
12264 config_protect = settings.get("CONFIG_PROTECT","").split()
12265 infodirs = settings.get("INFOPATH","").split(":") + \
12266 settings.get("INFODIR","").split(":")
12270 if retval == os.EX_OK:
12271 exit_msg = " *** exiting successfully."
12273 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12274 emergelog("notitles" not in settings.features, exit_msg)
12276 _flush_elog_mod_echo()
12278 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12279 if "--pretend" in myopts or (counter_hash is not None and \
12280 counter_hash == vardbapi._counter_hash()):
12281 display_news_notification(root_config, myopts)
12282 # If vdb state has not changed then there's nothing else to do.
12285 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12286 portage.util.ensure_dirs(vdb_path)
12288 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12289 vdb_lock = portage.locks.lockdir(vdb_path)
12293 if "noinfo" not in settings.features:
12294 chk_updated_info_files(target_root,
12295 infodirs, info_mtimes, retval)
12299 portage.locks.unlockdir(vdb_lock)
12301 chk_updated_cfg_files(target_root, config_protect)
12303 display_news_notification(root_config, myopts)
12304 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12305 display_preserved_libs(vardbapi)
12310 def chk_updated_cfg_files(target_root, config_protect):
12312 #number of directories with some protect files in them
12314 for x in config_protect:
12315 x = os.path.join(target_root, x.lstrip(os.path.sep))
12316 if not os.access(x, os.W_OK):
12317 # Avoid Permission denied errors generated
12321 mymode = os.lstat(x).st_mode
12324 if stat.S_ISLNK(mymode):
12325 # We want to treat it like a directory if it
12326 # is a symlink to an existing directory.
12328 real_mode = os.stat(x).st_mode
12329 if stat.S_ISDIR(real_mode):
12333 if stat.S_ISDIR(mymode):
12334 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12336 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12337 os.path.split(x.rstrip(os.path.sep))
12338 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12339 a = commands.getstatusoutput(mycommand)
12341 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12343 # Show the error message alone, sending stdout to /dev/null.
12344 os.system(mycommand + " 1>/dev/null")
12346 files = a[1].split('\0')
12347 # split always produces an empty string as the last element
12348 if files and not files[-1]:
12352 print "\n"+colorize("WARN", " * IMPORTANT:"),
12353 if stat.S_ISDIR(mymode):
12354 print "%d config files in '%s' need updating." % \
12357 print "config file '%s' needs updating." % x
12360 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12361 " section of the " + bold("emerge")
12362 print " "+yellow("*")+" man page to learn how to update config files."
12364 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12367 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12368 Returns the number of unread (yet relevent) items.
12370 @param portdb: a portage tree database
12371 @type portdb: pordbapi
12372 @param vardb: an installed package database
12373 @type vardb: vardbapi
12376 @param UNREAD_PATH:
12382 1. The number of unread but relevant news items.
12385 from portage.news import NewsManager
12386 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12387 return manager.getUnreadItems( repo_id, update=update )
12389 def insert_category_into_atom(atom, category):
12390 alphanum = re.search(r'\w', atom)
12392 ret = atom[:alphanum.start()] + "%s/" % category + \
12393 atom[alphanum.start():]
12398 def is_valid_package_atom(x):
12400 alphanum = re.search(r'\w', x)
12402 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12403 return portage.isvalidatom(x)
12405 def show_blocker_docs_link():
12407 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12408 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12410 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12413 def show_mask_docs():
12414 print "For more information, see the MASKED PACKAGES section in the emerge"
12415 print "man page or refer to the Gentoo Handbook."
12417 def action_sync(settings, trees, mtimedb, myopts, myaction):
12418 xterm_titles = "notitles" not in settings.features
12419 emergelog(xterm_titles, " === sync")
12420 myportdir = settings.get("PORTDIR", None)
12421 out = portage.output.EOutput()
12423 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12425 if myportdir[-1]=="/":
12426 myportdir=myportdir[:-1]
12428 st = os.stat(myportdir)
12432 print ">>>",myportdir,"not found, creating it."
12433 os.makedirs(myportdir,0755)
12434 st = os.stat(myportdir)
12437 spawn_kwargs["env"] = settings.environ()
12438 if 'usersync' in settings.features and \
12439 portage.data.secpass >= 2 and \
12440 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12441 st.st_gid != os.getgid() and st.st_mode & 0070):
12443 homedir = pwd.getpwuid(st.st_uid).pw_dir
12447 # Drop privileges when syncing, in order to match
12448 # existing uid/gid settings.
12449 spawn_kwargs["uid"] = st.st_uid
12450 spawn_kwargs["gid"] = st.st_gid
12451 spawn_kwargs["groups"] = [st.st_gid]
12452 spawn_kwargs["env"]["HOME"] = homedir
12454 if not st.st_mode & 0020:
12455 umask = umask | 0020
12456 spawn_kwargs["umask"] = umask
12458 syncuri = settings.get("SYNC", "").strip()
12460 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12461 noiselevel=-1, level=logging.ERROR)
12464 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12465 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12468 dosyncuri = syncuri
12469 updatecache_flg = False
12470 if myaction == "metadata":
12471 print "skipping sync"
12472 updatecache_flg = True
12473 elif ".git" in vcs_dirs:
12474 # Update existing git repository, and ignore the syncuri. We are
12475 # going to trust the user and assume that the user is in the branch
12476 # that he/she wants updated. We'll let the user manage branches with
12478 if portage.process.find_binary("git") is None:
12479 msg = ["Command not found: git",
12480 "Type \"emerge dev-util/git\" to enable git support."]
12482 writemsg_level("!!! %s\n" % l,
12483 level=logging.ERROR, noiselevel=-1)
12485 msg = ">>> Starting git pull in %s..." % myportdir
12486 emergelog(xterm_titles, msg )
12487 writemsg_level(msg + "\n")
12488 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12489 (portage._shell_quote(myportdir),), **spawn_kwargs)
12490 if exitcode != os.EX_OK:
12491 msg = "!!! git pull error in %s." % myportdir
12492 emergelog(xterm_titles, msg)
12493 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12495 msg = ">>> Git pull in %s successful" % myportdir
12496 emergelog(xterm_titles, msg)
12497 writemsg_level(msg + "\n")
12498 exitcode = git_sync_timestamps(settings, myportdir)
12499 if exitcode == os.EX_OK:
12500 updatecache_flg = True
12501 elif syncuri[:8]=="rsync://":
12502 for vcs_dir in vcs_dirs:
12503 writemsg_level(("!!! %s appears to be under revision " + \
12504 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12505 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12507 if not os.path.exists("/usr/bin/rsync"):
12508 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12509 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12514 if settings["PORTAGE_RSYNC_OPTS"] == "":
12515 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12516 rsync_opts.extend([
12517 "--recursive", # Recurse directories
12518 "--links", # Consider symlinks
12519 "--safe-links", # Ignore links outside of tree
12520 "--perms", # Preserve permissions
12521 "--times", # Preserive mod times
12522 "--compress", # Compress the data transmitted
12523 "--force", # Force deletion on non-empty dirs
12524 "--whole-file", # Don't do block transfers, only entire files
12525 "--delete", # Delete files that aren't in the master tree
12526 "--stats", # Show final statistics about what was transfered
12527 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12528 "--exclude=/distfiles", # Exclude distfiles from consideration
12529 "--exclude=/local", # Exclude local from consideration
12530 "--exclude=/packages", # Exclude packages from consideration
12534 # The below validation is not needed when using the above hardcoded
12537 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12539 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12540 for opt in ("--recursive", "--times"):
12541 if opt not in rsync_opts:
12542 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12543 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12544 rsync_opts.append(opt)
12546 for exclude in ("distfiles", "local", "packages"):
12547 opt = "--exclude=/%s" % exclude
12548 if opt not in rsync_opts:
12549 portage.writemsg(yellow("WARNING:") + \
12550 " adding required option %s not included in " % opt + \
12551 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12552 rsync_opts.append(opt)
12554 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12555 def rsync_opt_startswith(opt_prefix):
12556 for x in rsync_opts:
12557 if x.startswith(opt_prefix):
12561 if not rsync_opt_startswith("--timeout="):
12562 rsync_opts.append("--timeout=%d" % mytimeout)
12564 for opt in ("--compress", "--whole-file"):
12565 if opt not in rsync_opts:
12566 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12567 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12568 rsync_opts.append(opt)
12570 if "--quiet" in myopts:
12571 rsync_opts.append("--quiet") # Shut up a lot
12573 rsync_opts.append("--verbose") # Print filelist
12575 if "--verbose" in myopts:
12576 rsync_opts.append("--progress") # Progress meter for each file
12578 if "--debug" in myopts:
12579 rsync_opts.append("--checksum") # Force checksum on all files
12581 # Real local timestamp file.
12582 servertimestampfile = os.path.join(
12583 myportdir, "metadata", "timestamp.chk")
12585 content = portage.util.grabfile(servertimestampfile)
12589 mytimestamp = time.mktime(time.strptime(content[0],
12590 "%a, %d %b %Y %H:%M:%S +0000"))
12591 except (OverflowError, ValueError):
12596 rsync_initial_timeout = \
12597 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12599 rsync_initial_timeout = 15
12602 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12603 except SystemExit, e:
12604 raise # Needed else can't exit
12606 maxretries=3 #default number of retries
12609 user_name, hostname, port = re.split(
12610 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12613 if user_name is None:
12615 updatecache_flg=True
12616 all_rsync_opts = set(rsync_opts)
12617 extra_rsync_opts = shlex.split(
12618 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12619 all_rsync_opts.update(extra_rsync_opts)
12620 family = socket.AF_INET
12621 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12622 family = socket.AF_INET
12623 elif socket.has_ipv6 and \
12624 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12625 family = socket.AF_INET6
12627 SERVER_OUT_OF_DATE = -1
12628 EXCEEDED_MAX_RETRIES = -2
12634 for addrinfo in socket.getaddrinfo(
12635 hostname, None, family, socket.SOCK_STREAM):
12636 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12637 # IPv6 addresses need to be enclosed in square brackets
12638 ips.append("[%s]" % addrinfo[4][0])
12640 ips.append(addrinfo[4][0])
12641 from random import shuffle
12643 except SystemExit, e:
12644 raise # Needed else can't exit
12645 except Exception, e:
12646 print "Notice:",str(e)
12651 dosyncuri = syncuri.replace(
12652 "//" + user_name + hostname + port + "/",
12653 "//" + user_name + ips[0] + port + "/", 1)
12654 except SystemExit, e:
12655 raise # Needed else can't exit
12656 except Exception, e:
12657 print "Notice:",str(e)
12661 if "--ask" in myopts:
12662 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12667 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12668 if "--quiet" not in myopts:
12669 print ">>> Starting rsync with "+dosyncuri+"..."
12671 emergelog(xterm_titles,
12672 ">>> Starting retry %d of %d with %s" % \
12673 (retries,maxretries,dosyncuri))
12674 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12676 if mytimestamp != 0 and "--quiet" not in myopts:
12677 print ">>> Checking server timestamp ..."
12679 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12681 if "--debug" in myopts:
12684 exitcode = os.EX_OK
12685 servertimestamp = 0
12686 # Even if there's no timestamp available locally, fetch the
12687 # timestamp anyway as an initial probe to verify that the server is
12688 # responsive. This protects us from hanging indefinitely on a
12689 # connection attempt to an unresponsive server which rsync's
12690 # --timeout option does not prevent.
12692 # Temporary file for remote server timestamp comparison.
12693 from tempfile import mkstemp
12694 fd, tmpservertimestampfile = mkstemp()
12696 mycommand = rsynccommand[:]
12697 mycommand.append(dosyncuri.rstrip("/") + \
12698 "/metadata/timestamp.chk")
12699 mycommand.append(tmpservertimestampfile)
12703 def timeout_handler(signum, frame):
12704 raise portage.exception.PortageException("timed out")
12705 signal.signal(signal.SIGALRM, timeout_handler)
12706 # Timeout here in case the server is unresponsive. The
12707 # --timeout rsync option doesn't apply to the initial
12708 # connection attempt.
12709 if rsync_initial_timeout:
12710 signal.alarm(rsync_initial_timeout)
12712 mypids.extend(portage.process.spawn(
12713 mycommand, env=settings.environ(), returnpid=True))
12714 exitcode = os.waitpid(mypids[0], 0)[1]
12715 content = portage.grabfile(tmpservertimestampfile)
12717 if rsync_initial_timeout:
12720 os.unlink(tmpservertimestampfile)
12723 except portage.exception.PortageException, e:
12727 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12728 os.kill(mypids[0], signal.SIGTERM)
12729 os.waitpid(mypids[0], 0)
12730 # This is the same code rsync uses for timeout.
12733 if exitcode != os.EX_OK:
12734 if exitcode & 0xff:
12735 exitcode = (exitcode & 0xff) << 8
12737 exitcode = exitcode >> 8
12739 portage.process.spawned_pids.remove(mypids[0])
12742 servertimestamp = time.mktime(time.strptime(
12743 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12744 except (OverflowError, ValueError):
12746 del mycommand, mypids, content
12747 if exitcode == os.EX_OK:
12748 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12749 emergelog(xterm_titles,
12750 ">>> Cancelling sync -- Already current.")
12753 print ">>> Timestamps on the server and in the local repository are the same."
12754 print ">>> Cancelling all further sync action. You are already up to date."
12756 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12760 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12761 emergelog(xterm_titles,
12762 ">>> Server out of date: %s" % dosyncuri)
12765 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12767 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12770 exitcode = SERVER_OUT_OF_DATE
12771 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12773 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12774 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12775 if exitcode in [0,1,3,4,11,14,20,21]:
12777 elif exitcode in [1,3,4,11,14,20,21]:
12780 # Code 2 indicates protocol incompatibility, which is expected
12781 # for servers with protocol < 29 that don't support
12782 # --prune-empty-directories. Retry for a server that supports
12783 # at least rsync protocol version 29 (>=rsync-2.6.4).
12788 if retries<=maxretries:
12789 print ">>> Retrying..."
12794 updatecache_flg=False
12795 exitcode = EXCEEDED_MAX_RETRIES
12799 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12800 elif exitcode == SERVER_OUT_OF_DATE:
12802 elif exitcode == EXCEEDED_MAX_RETRIES:
12804 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12809 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12810 msg.append("that your SYNC statement is proper.")
12811 msg.append("SYNC=" + settings["SYNC"])
12813 msg.append("Rsync has reported that there is a File IO error. Normally")
12814 msg.append("this means your disk is full, but can be caused by corruption")
12815 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12816 msg.append("and try again after the problem has been fixed.")
12817 msg.append("PORTDIR=" + settings["PORTDIR"])
12819 msg.append("Rsync was killed before it finished.")
12821 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12822 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12823 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12824 msg.append("temporary problem unless complications exist with your network")
12825 msg.append("(and possibly your system's filesystem) configuration.")
12829 elif syncuri[:6]=="cvs://":
12830 if not os.path.exists("/usr/bin/cvs"):
12831 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12832 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12834 cvsroot=syncuri[6:]
12835 cvsdir=os.path.dirname(myportdir)
12836 if not os.path.exists(myportdir+"/CVS"):
12838 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12839 if os.path.exists(cvsdir+"/gentoo-x86"):
12840 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12843 os.rmdir(myportdir)
12845 if e.errno != errno.ENOENT:
12847 "!!! existing '%s' directory; exiting.\n" % myportdir)
12850 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12851 print "!!! cvs checkout error; exiting."
12853 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12856 print ">>> Starting cvs update with "+syncuri+"..."
12857 retval = portage.process.spawn_bash(
12858 "cd %s; cvs -z0 -q update -dP" % \
12859 (portage._shell_quote(myportdir),), **spawn_kwargs)
12860 if retval != os.EX_OK:
12862 dosyncuri = syncuri
12864 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12865 noiselevel=-1, level=logging.ERROR)
12868 if updatecache_flg and \
12869 myaction != "metadata" and \
12870 "metadata-transfer" not in settings.features:
12871 updatecache_flg = False
12873 # Reload the whole config from scratch.
12874 settings, trees, mtimedb = load_emerge_config(trees=trees)
12875 root_config = trees[settings["ROOT"]]["root_config"]
12876 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12878 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12879 action_metadata(settings, portdb, myopts)
12881 if portage._global_updates(trees, mtimedb["updates"]):
12883 # Reload the whole config from scratch.
12884 settings, trees, mtimedb = load_emerge_config(trees=trees)
12885 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12886 root_config = trees[settings["ROOT"]]["root_config"]
12888 mybestpv = portdb.xmatch("bestmatch-visible",
12889 portage.const.PORTAGE_PACKAGE_ATOM)
12890 mypvs = portage.best(
12891 trees[settings["ROOT"]]["vartree"].dbapi.match(
12892 portage.const.PORTAGE_PACKAGE_ATOM))
12894 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12896 if myaction != "metadata":
12897 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12898 retval = portage.process.spawn(
12899 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12900 dosyncuri], env=settings.environ())
12901 if retval != os.EX_OK:
12902 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12904 if(mybestpv != mypvs) and not "--quiet" in myopts:
12906 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12907 print red(" * ")+"that you update portage now, before any other packages are updated."
12909 print red(" * ")+"To update portage, run 'emerge portage' now."
12912 display_news_notification(root_config, myopts)
12915 def git_sync_timestamps(settings, portdir):
12917 Since git doesn't preserve timestamps, synchronize timestamps between
12918 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12919 for a given file as long as the file in the working tree is not modified
12920 (relative to HEAD).
12922 cache_dir = os.path.join(portdir, "metadata", "cache")
12923 if not os.path.isdir(cache_dir):
12925 writemsg_level(">>> Synchronizing timestamps...\n")
12927 from portage.cache.cache_errors import CacheError
12929 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12930 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12931 except CacheError, e:
12932 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12933 level=logging.ERROR, noiselevel=-1)
12936 ec_dir = os.path.join(portdir, "eclass")
12938 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12939 if f.endswith(".eclass"))
12941 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12942 level=logging.ERROR, noiselevel=-1)
12945 args = [portage.const.BASH_BINARY, "-c",
12946 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12947 portage._shell_quote(portdir)]
12949 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12950 modified_files = set(l.rstrip("\n") for l in proc.stdout)
12952 if rval != os.EX_OK:
12955 modified_eclasses = set(ec for ec in ec_names \
12956 if os.path.join("eclass", ec + ".eclass") in modified_files)
12958 updated_ec_mtimes = {}
12960 for cpv in cache_db:
12961 cpv_split = portage.catpkgsplit(cpv)
12962 if cpv_split is None:
12963 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12964 level=logging.ERROR, noiselevel=-1)
12967 cat, pn, ver, rev = cpv_split
12968 cat, pf = portage.catsplit(cpv)
12969 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12970 if relative_eb_path in modified_files:
12974 cache_entry = cache_db[cpv]
12975 eb_mtime = cache_entry.get("_mtime_")
12976 ec_mtimes = cache_entry.get("_eclasses_")
12978 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12979 level=logging.ERROR, noiselevel=-1)
12981 except CacheError, e:
12982 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12983 (cpv, e), level=logging.ERROR, noiselevel=-1)
12986 if eb_mtime is None:
12987 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12988 level=logging.ERROR, noiselevel=-1)
12992 eb_mtime = long(eb_mtime)
12994 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12995 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12998 if ec_mtimes is None:
12999 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13000 level=logging.ERROR, noiselevel=-1)
13003 if modified_eclasses.intersection(ec_mtimes):
13006 missing_eclasses = set(ec_mtimes).difference(ec_names)
13007 if missing_eclasses:
13008 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13009 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13013 eb_path = os.path.join(portdir, relative_eb_path)
13015 current_eb_mtime = os.stat(eb_path)
13017 writemsg_level("!!! Missing ebuild: %s\n" % \
13018 (cpv,), level=logging.ERROR, noiselevel=-1)
13021 inconsistent = False
13022 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13023 updated_mtime = updated_ec_mtimes.get(ec)
13024 if updated_mtime is not None and updated_mtime != ec_mtime:
13025 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13026 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13027 inconsistent = True
13033 if current_eb_mtime != eb_mtime:
13034 os.utime(eb_path, (eb_mtime, eb_mtime))
13036 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13037 if ec in updated_ec_mtimes:
13039 ec_path = os.path.join(ec_dir, ec + ".eclass")
13040 current_mtime = long(os.stat(ec_path).st_mtime)
13041 if current_mtime != ec_mtime:
13042 os.utime(ec_path, (ec_mtime, ec_mtime))
13043 updated_ec_mtimes[ec] = ec_mtime
13047 def action_metadata(settings, portdb, myopts):
13048 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13049 old_umask = os.umask(0002)
13050 cachedir = os.path.normpath(settings.depcachedir)
13051 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13052 "/lib", "/opt", "/proc", "/root", "/sbin",
13053 "/sys", "/tmp", "/usr", "/var"]:
13054 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13055 "ROOT DIRECTORY ON YOUR SYSTEM."
13056 print >> sys.stderr, \
13057 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13059 if not os.path.exists(cachedir):
13062 ec = portage.eclass_cache.cache(portdb.porttree_root)
13063 myportdir = os.path.realpath(settings["PORTDIR"])
13064 cm = settings.load_best_module("portdbapi.metadbmodule")(
13065 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13067 from portage.cache import util
13069 class percentage_noise_maker(util.quiet_mirroring):
13070 def __init__(self, dbapi):
13072 self.cp_all = dbapi.cp_all()
13073 l = len(self.cp_all)
13074 self.call_update_min = 100000000
13075 self.min_cp_all = l/100.0
13079 def __iter__(self):
13080 for x in self.cp_all:
13082 if self.count > self.min_cp_all:
13083 self.call_update_min = 0
13085 for y in self.dbapi.cp_list(x):
13087 self.call_update_mine = 0
13089 def update(self, *arg):
13090 try: self.pstr = int(self.pstr) + 1
13091 except ValueError: self.pstr = 1
13092 sys.stdout.write("%s%i%%" % \
13093 ("\b" * (len(str(self.pstr))+1), self.pstr))
13095 self.call_update_min = 10000000
13097 def finish(self, *arg):
13098 sys.stdout.write("\b\b\b\b100%\n")
13101 if "--quiet" in myopts:
13102 def quicky_cpv_generator(cp_all_list):
13103 for x in cp_all_list:
13104 for y in portdb.cp_list(x):
13106 source = quicky_cpv_generator(portdb.cp_all())
13107 noise_maker = portage.cache.util.quiet_mirroring()
13109 noise_maker = source = percentage_noise_maker(portdb)
13110 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13111 eclass_cache=ec, verbose_instance=noise_maker)
13114 os.umask(old_umask)
13116 def action_regen(settings, portdb, max_jobs, max_load):
13117 xterm_titles = "notitles" not in settings.features
13118 emergelog(xterm_titles, " === regen")
13119 #regenerate cache entries
13120 portage.writemsg_stdout("Regenerating cache entries...\n")
13122 os.close(sys.stdin.fileno())
13123 except SystemExit, e:
13124 raise # Needed else can't exit
13129 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13132 portage.writemsg_stdout("done!\n")
13133 return regen.returncode
13135 def action_config(settings, trees, myopts, myfiles):
13136 if len(myfiles) != 1:
13137 print red("!!! config can only take a single package atom at this time\n")
13139 if not is_valid_package_atom(myfiles[0]):
13140 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13142 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13143 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13147 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13148 except portage.exception.AmbiguousPackageName, e:
13149 # Multiple matches thrown from cpv_expand
13152 print "No packages found.\n"
13154 elif len(pkgs) > 1:
13155 if "--ask" in myopts:
13157 print "Please select a package to configure:"
13161 options.append(str(idx))
13162 print options[-1]+") "+pkg
13164 options.append("X")
13165 idx = userquery("Selection?", options)
13168 pkg = pkgs[int(idx)-1]
13170 print "The following packages available:"
13173 print "\nPlease use a specific atom or the --ask option."
13179 if "--ask" in myopts:
13180 if userquery("Ready to configure "+pkg+"?") == "No":
13183 print "Configuring pkg..."
13185 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13186 mysettings = portage.config(clone=settings)
13187 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13188 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13189 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13191 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13192 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13193 if retval == os.EX_OK:
13194 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13195 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13198 def action_info(settings, trees, myopts, myfiles):
13199 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13200 settings.profile_path, settings["CHOST"],
13201 trees[settings["ROOT"]]["vartree"].dbapi)
13203 header_title = "System Settings"
13205 print header_width * "="
13206 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13207 print header_width * "="
13208 print "System uname: "+platform.platform(aliased=1)
13210 lastSync = portage.grabfile(os.path.join(
13211 settings["PORTDIR"], "metadata", "timestamp.chk"))
13212 print "Timestamp of tree:",
13218 output=commands.getstatusoutput("distcc --version")
13220 print str(output[1].split("\n",1)[0]),
13221 if "distcc" in settings.features:
13226 output=commands.getstatusoutput("ccache -V")
13228 print str(output[1].split("\n",1)[0]),
13229 if "ccache" in settings.features:
13234 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13235 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13236 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13237 myvars = portage.util.unique_array(myvars)
13241 if portage.isvalidatom(x):
13242 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13243 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13244 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13246 for pn, ver, rev in pkg_matches:
13248 pkgs.append(ver + "-" + rev)
13252 pkgs = ", ".join(pkgs)
13253 print "%-20s %s" % (x+":", pkgs)
13255 print "%-20s %s" % (x+":", "[NOT VALID]")
13257 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13259 if "--verbose" in myopts:
13260 myvars=settings.keys()
13262 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13263 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13264 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13265 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13267 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13269 myvars = portage.util.unique_array(myvars)
13275 print '%s="%s"' % (x, settings[x])
13277 use = set(settings["USE"].split())
13278 use_expand = settings["USE_EXPAND"].split()
13280 for varname in use_expand:
13281 flag_prefix = varname.lower() + "_"
13282 for f in list(use):
13283 if f.startswith(flag_prefix):
13287 print 'USE="%s"' % " ".join(use),
13288 for varname in use_expand:
13289 myval = settings.get(varname)
13291 print '%s="%s"' % (varname, myval),
13294 unset_vars.append(x)
13296 print "Unset: "+", ".join(unset_vars)
13299 if "--debug" in myopts:
13300 for x in dir(portage):
13301 module = getattr(portage, x)
13302 if "cvs_id_string" in dir(module):
13303 print "%s: %s" % (str(x), str(module.cvs_id_string))
13305 # See if we can find any packages installed matching the strings
13306 # passed on the command line
13308 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13309 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13311 mypkgs.extend(vardb.match(x))
13313 # If some packages were found...
13315 # Get our global settings (we only print stuff if it varies from
13316 # the current config)
13317 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13318 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13320 pkgsettings = portage.config(clone=settings)
13322 for myvar in mydesiredvars:
13323 global_vals[myvar] = set(settings.get(myvar, "").split())
13325 # Loop through each package
13326 # Only print settings if they differ from global settings
13327 header_title = "Package Settings"
13328 print header_width * "="
13329 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13330 print header_width * "="
13331 from portage.output import EOutput
13334 # Get all package specific variables
13335 auxvalues = vardb.aux_get(pkg, auxkeys)
13337 for i in xrange(len(auxkeys)):
13338 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13340 for myvar in mydesiredvars:
13341 # If the package variable doesn't match the
13342 # current global variable, something has changed
13343 # so set diff_found so we know to print
13344 if valuesmap[myvar] != global_vals[myvar]:
13345 diff_values[myvar] = valuesmap[myvar]
13346 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13347 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13348 pkgsettings.reset()
13349 # If a matching ebuild is no longer available in the tree, maybe it
13350 # would make sense to compare against the flags for the best
13351 # available version with the same slot?
13353 if portdb.cpv_exists(pkg):
13355 pkgsettings.setcpv(pkg, mydb=mydb)
13356 if valuesmap["IUSE"].intersection(
13357 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13358 diff_values["USE"] = valuesmap["USE"]
13359 # If a difference was found, print the info for
13362 # Print package info
13363 print "%s was built with the following:" % pkg
13364 for myvar in mydesiredvars + ["USE"]:
13365 if myvar in diff_values:
13366 mylist = list(diff_values[myvar])
13368 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13370 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13371 ebuildpath = vardb.findname(pkg)
13372 if not ebuildpath or not os.path.exists(ebuildpath):
13373 out.ewarn("No ebuild found for '%s'" % pkg)
13375 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13376 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13377 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13380 def action_search(root_config, myopts, myfiles, spinner):
13382 print "emerge: no search terms provided."
13384 searchinstance = search(root_config,
13385 spinner, "--searchdesc" in myopts,
13386 "--quiet" not in myopts, "--usepkg" in myopts,
13387 "--usepkgonly" in myopts)
13388 for mysearch in myfiles:
13390 searchinstance.execute(mysearch)
13391 except re.error, comment:
13392 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13394 searchinstance.output()
13396 def action_depclean(settings, trees, ldpath_mtimes,
13397 myopts, action, myfiles, spinner):
13398 # Kill packages that aren't explicitly merged or are required as a
13399 # dependency of another package. World file is explicit.
13401 # Global depclean or prune operations are not very safe when there are
13402 # missing dependencies since it's unknown how badly incomplete
13403 # the dependency graph is, and we might accidentally remove packages
13404 # that should have been pulled into the graph. On the other hand, it's
13405 # relatively safe to ignore missing deps when only asked to remove
13406 # specific packages.
13407 allow_missing_deps = len(myfiles) > 0
13410 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13411 msg.append("mistakes. Packages that are part of the world set will always\n")
13412 msg.append("be kept. They can be manually added to this set with\n")
13413 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13414 msg.append("package.provided (see portage(5)) will be removed by\n")
13415 msg.append("depclean, even if they are part of the world set.\n")
13417 msg.append("As a safety measure, depclean will not remove any packages\n")
13418 msg.append("unless *all* required dependencies have been resolved. As a\n")
13419 msg.append("consequence, it is often necessary to run %s\n" % \
13420 good("`emerge --update"))
13421 msg.append(good("--newuse --deep @system @world`") + \
13422 " prior to depclean.\n")
13424 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13425 portage.writemsg_stdout("\n")
13427 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13429 xterm_titles = "notitles" not in settings.features
13430 myroot = settings["ROOT"]
13431 root_config = trees[myroot]["root_config"]
13432 getSetAtoms = root_config.setconfig.getSetAtoms
13433 vardb = trees[myroot]["vartree"].dbapi
13435 required_set_names = ("system", "world")
13439 for s in required_set_names:
13440 required_sets[s] = InternalPackageSet(
13441 initial_atoms=getSetAtoms(s))
13444 # When removing packages, use a temporary version of world
13445 # which excludes packages that are intended to be eligible for
13447 world_temp_set = required_sets["world"]
13448 system_set = required_sets["system"]
13450 if not system_set or not world_temp_set:
13453 writemsg_level("!!! You have no system list.\n",
13454 level=logging.ERROR, noiselevel=-1)
13456 if not world_temp_set:
13457 writemsg_level("!!! You have no world file.\n",
13458 level=logging.WARNING, noiselevel=-1)
13460 writemsg_level("!!! Proceeding is likely to " + \
13461 "break your installation.\n",
13462 level=logging.WARNING, noiselevel=-1)
13463 if "--pretend" not in myopts:
13464 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13466 if action == "depclean":
13467 emergelog(xterm_titles, " >>> depclean")
13470 args_set = InternalPackageSet()
13473 if not is_valid_package_atom(x):
13474 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13475 level=logging.ERROR, noiselevel=-1)
13476 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13479 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13480 except portage.exception.AmbiguousPackageName, e:
13481 msg = "The short ebuild name \"" + x + \
13482 "\" is ambiguous. Please specify " + \
13483 "one of the following " + \
13484 "fully-qualified ebuild names instead:"
13485 for line in textwrap.wrap(msg, 70):
13486 writemsg_level("!!! %s\n" % (line,),
13487 level=logging.ERROR, noiselevel=-1)
13489 writemsg_level(" %s\n" % colorize("INFORM", i),
13490 level=logging.ERROR, noiselevel=-1)
13491 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13494 matched_packages = False
13497 matched_packages = True
13499 if not matched_packages:
13500 writemsg_level(">>> No packages selected for removal by %s\n" % \
13504 writemsg_level("\nCalculating dependencies ")
13505 resolver_params = create_depgraph_params(myopts, "remove")
13506 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13507 vardb = resolver.trees[myroot]["vartree"].dbapi
13509 if action == "depclean":
13512 # Pull in everything that's installed but not matched
13513 # by an argument atom since we don't want to clean any
13514 # package if something depends on it.
13516 world_temp_set.clear()
13521 if args_set.findAtomForPackage(pkg) is None:
13522 world_temp_set.add("=" + pkg.cpv)
13524 except portage.exception.InvalidDependString, e:
13525 show_invalid_depstring_notice(pkg,
13526 pkg.metadata["PROVIDE"], str(e))
13528 world_temp_set.add("=" + pkg.cpv)
13531 elif action == "prune":
13533 # Pull in everything that's installed since we don't
13534 # to prune a package if something depends on it.
13535 world_temp_set.clear()
13536 world_temp_set.update(vardb.cp_all())
13540 # Try to prune everything that's slotted.
13541 for cp in vardb.cp_all():
13542 if len(vardb.cp_list(cp)) > 1:
13545 # Remove atoms from world that match installed packages
13546 # that are also matched by argument atoms, but do not remove
13547 # them if they match the highest installed version.
13550 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13551 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13552 raise AssertionError("package expected in matches: " + \
13553 "cp = %s, cpv = %s matches = %s" % \
13554 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13556 highest_version = pkgs_for_cp[-1]
13557 if pkg == highest_version:
13558 # pkg is the highest version
13559 world_temp_set.add("=" + pkg.cpv)
13562 if len(pkgs_for_cp) <= 1:
13563 raise AssertionError("more packages expected: " + \
13564 "cp = %s, cpv = %s matches = %s" % \
13565 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13568 if args_set.findAtomForPackage(pkg) is None:
13569 world_temp_set.add("=" + pkg.cpv)
13571 except portage.exception.InvalidDependString, e:
13572 show_invalid_depstring_notice(pkg,
13573 pkg.metadata["PROVIDE"], str(e))
13575 world_temp_set.add("=" + pkg.cpv)
13579 for s, package_set in required_sets.iteritems():
13580 set_atom = SETPREFIX + s
13581 set_arg = SetArg(arg=set_atom, set=package_set,
13582 root_config=resolver.roots[myroot])
13583 set_args[s] = set_arg
13584 for atom in set_arg.set:
13585 resolver._dep_stack.append(
13586 Dependency(atom=atom, root=myroot, parent=set_arg))
13587 resolver.digraph.add(set_arg, None)
13589 success = resolver._complete_graph()
13590 writemsg_level("\b\b... done!\n")
13592 resolver.display_problems()
13597 def unresolved_deps():
13599 unresolvable = set()
13600 for dep in resolver._initially_unsatisfied_deps:
13601 if isinstance(dep.parent, Package) and \
13602 (dep.priority > UnmergeDepPriority.SOFT):
13603 unresolvable.add((dep.atom, dep.parent.cpv))
13605 if not unresolvable:
13608 if unresolvable and not allow_missing_deps:
13609 prefix = bad(" * ")
13611 msg.append("Dependencies could not be completely resolved due to")
13612 msg.append("the following required packages not being installed:")
13614 for atom, parent in unresolvable:
13615 msg.append(" %s pulled in by:" % (atom,))
13616 msg.append(" %s" % (parent,))
13618 msg.append("Have you forgotten to run " + \
13619 good("`emerge --update --newuse --deep @system @world`") + " prior")
13620 msg.append(("to %s? It may be necessary to manually " + \
13621 "uninstall packages that no longer") % action)
13622 msg.append("exist in the portage tree since " + \
13623 "it may not be possible to satisfy their")
13624 msg.append("dependencies. Also, be aware of " + \
13625 "the --with-bdeps option that is documented")
13626 msg.append("in " + good("`man emerge`") + ".")
13627 if action == "prune":
13629 msg.append("If you would like to ignore " + \
13630 "dependencies then use %s." % good("--nodeps"))
13631 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13632 level=logging.ERROR, noiselevel=-1)
13636 if unresolved_deps():
13639 graph = resolver.digraph.copy()
13640 required_pkgs_total = 0
13642 if isinstance(node, Package):
13643 required_pkgs_total += 1
13645 def show_parents(child_node):
13646 parent_nodes = graph.parent_nodes(child_node)
13647 if not parent_nodes:
13648 # With --prune, the highest version can be pulled in without any
13649 # real parent since all installed packages are pulled in. In that
13650 # case there's nothing to show here.
13653 for node in parent_nodes:
13654 parent_strs.append(str(getattr(node, "cpv", node)))
13657 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13658 for parent_str in parent_strs:
13659 msg.append(" %s\n" % (parent_str,))
13661 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13663 def cmp_pkg_cpv(pkg1, pkg2):
13664 """Sort Package instances by cpv."""
13665 if pkg1.cpv > pkg2.cpv:
13667 elif pkg1.cpv == pkg2.cpv:
13672 def create_cleanlist():
13673 pkgs_to_remove = []
13675 if action == "depclean":
13678 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13681 arg_atom = args_set.findAtomForPackage(pkg)
13682 except portage.exception.InvalidDependString:
13683 # this error has already been displayed by now
13687 if pkg not in graph:
13688 pkgs_to_remove.append(pkg)
13689 elif "--verbose" in myopts:
13693 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13694 if pkg not in graph:
13695 pkgs_to_remove.append(pkg)
13696 elif "--verbose" in myopts:
13699 elif action == "prune":
13700 # Prune really uses all installed instead of world. It's not
13701 # a real reverse dependency so don't display it as such.
13702 graph.remove(set_args["world"])
13704 for atom in args_set:
13705 for pkg in vardb.match_pkgs(atom):
13706 if pkg not in graph:
13707 pkgs_to_remove.append(pkg)
13708 elif "--verbose" in myopts:
13711 if not pkgs_to_remove:
13713 ">>> No packages selected for removal by %s\n" % action)
13714 if "--verbose" not in myopts:
13716 ">>> To see reverse dependencies, use %s\n" % \
13718 if action == "prune":
13720 ">>> To ignore dependencies, use %s\n" % \
13723 return pkgs_to_remove
13725 cleanlist = create_cleanlist()
13728 clean_set = set(cleanlist)
13730 # Check if any of these package are the sole providers of libraries
13731 # with consumers that have not been selected for removal. If so, these
13732 # packages and any dependencies need to be added to the graph.
13733 real_vardb = trees[myroot]["vartree"].dbapi
13734 linkmap = real_vardb.linkmap
13735 liblist = linkmap.listLibraryObjects()
13736 consumer_cache = {}
13737 provider_cache = {}
13741 writemsg_level(">>> Checking for lib consumers...\n")
13743 for pkg in cleanlist:
13744 pkg_dblink = real_vardb._dblink(pkg.cpv)
13745 provided_libs = set()
13747 for lib in liblist:
13748 if pkg_dblink.isowner(lib, myroot):
13749 provided_libs.add(lib)
13751 if not provided_libs:
13755 for lib in provided_libs:
13756 lib_consumers = consumer_cache.get(lib)
13757 if lib_consumers is None:
13758 lib_consumers = linkmap.findConsumers(lib)
13759 consumer_cache[lib] = lib_consumers
13761 consumers[lib] = lib_consumers
13766 for lib, lib_consumers in consumers.items():
13767 for consumer_file in list(lib_consumers):
13768 if pkg_dblink.isowner(consumer_file, myroot):
13769 lib_consumers.remove(consumer_file)
13770 if not lib_consumers:
13776 for lib, lib_consumers in consumers.iteritems():
13778 soname = soname_cache.get(lib)
13780 soname = linkmap.getSoname(lib)
13781 soname_cache[lib] = soname
13783 consumer_providers = []
13784 for lib_consumer in lib_consumers:
13785 providers = provider_cache.get(lib)
13786 if providers is None:
13787 providers = linkmap.findProviders(lib_consumer)
13788 provider_cache[lib_consumer] = providers
13789 if soname not in providers:
13790 # Why does this happen?
13792 consumer_providers.append(
13793 (lib_consumer, providers[soname]))
13795 consumers[lib] = consumer_providers
13797 consumer_map[pkg] = consumers
13801 search_files = set()
13802 for consumers in consumer_map.itervalues():
13803 for lib, consumer_providers in consumers.iteritems():
13804 for lib_consumer, providers in consumer_providers:
13805 search_files.add(lib_consumer)
13806 search_files.update(providers)
13808 writemsg_level(">>> Assigning files to packages...\n")
13809 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13811 for pkg, consumers in consumer_map.items():
13812 for lib, consumer_providers in consumers.items():
13813 lib_consumers = set()
13815 for lib_consumer, providers in consumer_providers:
13816 owner_set = file_owners.get(lib_consumer)
13817 provider_dblinks = set()
13818 provider_pkgs = set()
13820 if len(providers) > 1:
13821 for provider in providers:
13822 provider_set = file_owners.get(provider)
13823 if provider_set is not None:
13824 provider_dblinks.update(provider_set)
13826 if len(provider_dblinks) > 1:
13827 for provider_dblink in provider_dblinks:
13828 pkg_key = ("installed", myroot,
13829 provider_dblink.mycpv, "nomerge")
13830 if pkg_key not in clean_set:
13831 provider_pkgs.add(vardb.get(pkg_key))
13836 if owner_set is not None:
13837 lib_consumers.update(owner_set)
13839 for consumer_dblink in list(lib_consumers):
13840 if ("installed", myroot, consumer_dblink.mycpv,
13841 "nomerge") in clean_set:
13842 lib_consumers.remove(consumer_dblink)
13846 consumers[lib] = lib_consumers
13850 del consumer_map[pkg]
13853 # TODO: Implement a package set for rebuilding consumer packages.
13855 msg = "In order to avoid breakage of link level " + \
13856 "dependencies, one or more packages will not be removed. " + \
13857 "This can be solved by rebuilding " + \
13858 "the packages that pulled them in."
13860 prefix = bad(" * ")
13861 from textwrap import wrap
13862 writemsg_level("".join(prefix + "%s\n" % line for \
13863 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13866 for pkg, consumers in consumer_map.iteritems():
13867 unique_consumers = set(chain(*consumers.values()))
13868 unique_consumers = sorted(consumer.mycpv \
13869 for consumer in unique_consumers)
13871 msg.append(" %s pulled in by:" % (pkg.cpv,))
13872 for consumer in unique_consumers:
13873 msg.append(" %s" % (consumer,))
13875 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13876 level=logging.WARNING, noiselevel=-1)
13878 # Add lib providers to the graph as children of lib consumers,
13879 # and also add any dependencies pulled in by the provider.
13880 writemsg_level(">>> Adding lib providers to graph...\n")
13882 for pkg, consumers in consumer_map.iteritems():
13883 for consumer_dblink in set(chain(*consumers.values())):
13884 consumer_pkg = vardb.get(("installed", myroot,
13885 consumer_dblink.mycpv, "nomerge"))
13886 if not resolver._add_pkg(pkg,
13887 Dependency(parent=consumer_pkg,
13888 priority=UnmergeDepPriority(runtime=True),
13890 resolver.display_problems()
13893 writemsg_level("\nCalculating dependencies ")
13894 success = resolver._complete_graph()
13895 writemsg_level("\b\b... done!\n")
13896 resolver.display_problems()
13899 if unresolved_deps():
13902 graph = resolver.digraph.copy()
13903 required_pkgs_total = 0
13905 if isinstance(node, Package):
13906 required_pkgs_total += 1
13907 cleanlist = create_cleanlist()
13910 clean_set = set(cleanlist)
13912 # Use a topological sort to create an unmerge order such that
13913 # each package is unmerged before it's dependencies. This is
13914 # necessary to avoid breaking things that may need to run
13915 # during pkg_prerm or pkg_postrm phases.
13917 # Create a new graph to account for dependencies between the
13918 # packages being unmerged.
13922 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13923 runtime = UnmergeDepPriority(runtime=True)
13924 runtime_post = UnmergeDepPriority(runtime_post=True)
13925 buildtime = UnmergeDepPriority(buildtime=True)
13927 "RDEPEND": runtime,
13928 "PDEPEND": runtime_post,
13929 "DEPEND": buildtime,
13932 for node in clean_set:
13933 graph.add(node, None)
13935 node_use = node.metadata["USE"].split()
13936 for dep_type in dep_keys:
13937 depstr = node.metadata[dep_type]
13941 portage.dep._dep_check_strict = False
13942 success, atoms = portage.dep_check(depstr, None, settings,
13943 myuse=node_use, trees=resolver._graph_trees,
13946 portage.dep._dep_check_strict = True
13948 # Ignore invalid deps of packages that will
13949 # be uninstalled anyway.
13952 priority = priority_map[dep_type]
13954 if not isinstance(atom, portage.dep.Atom):
13955 # Ignore invalid atoms returned from dep_check().
13959 matches = vardb.match_pkgs(atom)
13962 for child_node in matches:
13963 if child_node in clean_set:
13964 graph.add(child_node, node, priority=priority)
13967 if len(graph.order) == len(graph.root_nodes()):
13968 # If there are no dependencies between packages
13969 # let unmerge() group them by cat/pn.
13971 cleanlist = [pkg.cpv for pkg in graph.order]
13973 # Order nodes from lowest to highest overall reference count for
13974 # optimal root node selection.
13975 node_refcounts = {}
13976 for node in graph.order:
13977 node_refcounts[node] = len(graph.parent_nodes(node))
13978 def cmp_reference_count(node1, node2):
13979 return node_refcounts[node1] - node_refcounts[node2]
13980 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
13982 ignore_priority_range = [None]
13983 ignore_priority_range.extend(
13984 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13985 while not graph.empty():
13986 for ignore_priority in ignore_priority_range:
13987 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13991 raise AssertionError("no root nodes")
13992 if ignore_priority is not None:
13993 # Some deps have been dropped due to circular dependencies,
13994 # so only pop one node in order do minimize the number that
13999 cleanlist.append(node.cpv)
14001 unmerge(root_config, myopts, "unmerge", cleanlist,
14002 ldpath_mtimes, ordered=ordered)
14004 if action == "prune":
14007 if not cleanlist and "--quiet" in myopts:
14010 print "Packages installed: "+str(len(vardb.cpv_all()))
14011 print "Packages in world: " + \
14012 str(len(root_config.sets["world"].getAtoms()))
14013 print "Packages in system: " + \
14014 str(len(root_config.sets["system"].getAtoms()))
14015 print "Required packages: "+str(required_pkgs_total)
14016 if "--pretend" in myopts:
14017 print "Number to remove: "+str(len(cleanlist))
14019 print "Number removed: "+str(len(cleanlist))
14021 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14023 Construct a depgraph for the given resume list. This will raise
14024 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14026 @returns: (success, depgraph, dropped_tasks)
14029 skip_unsatisfied = True
14030 mergelist = mtimedb["resume"]["mergelist"]
14031 dropped_tasks = set()
14033 mydepgraph = depgraph(settings, trees,
14034 myopts, myparams, spinner)
14036 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14037 skip_masked=skip_masked)
14038 except depgraph.UnsatisfiedResumeDep, e:
14039 if not skip_unsatisfied:
14042 graph = mydepgraph.digraph
14043 unsatisfied_parents = dict((dep.parent, dep.parent) \
14044 for dep in e.value)
14045 traversed_nodes = set()
14046 unsatisfied_stack = list(unsatisfied_parents)
14047 while unsatisfied_stack:
14048 pkg = unsatisfied_stack.pop()
14049 if pkg in traversed_nodes:
14051 traversed_nodes.add(pkg)
14053 # If this package was pulled in by a parent
14054 # package scheduled for merge, removing this
14055 # package may cause the the parent package's
14056 # dependency to become unsatisfied.
14057 for parent_node in graph.parent_nodes(pkg):
14058 if not isinstance(parent_node, Package) \
14059 or parent_node.operation not in ("merge", "nomerge"):
14062 graph.child_nodes(parent_node,
14063 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14064 if pkg in unsatisfied:
14065 unsatisfied_parents[parent_node] = parent_node
14066 unsatisfied_stack.append(parent_node)
14068 pruned_mergelist = []
14069 for x in mergelist:
14070 if isinstance(x, list) and \
14071 tuple(x) not in unsatisfied_parents:
14072 pruned_mergelist.append(x)
14074 # If the mergelist doesn't shrink then this loop is infinite.
14075 if len(pruned_mergelist) == len(mergelist):
14076 # This happens if a package can't be dropped because
14077 # it's already installed, but it has unsatisfied PDEPEND.
14079 mergelist[:] = pruned_mergelist
14081 # Exclude installed packages that have been removed from the graph due
14082 # to failure to build/install runtime dependencies after the dependent
14083 # package has already been installed.
14084 dropped_tasks.update(pkg for pkg in \
14085 unsatisfied_parents if pkg.operation != "nomerge")
14086 mydepgraph.break_refs(unsatisfied_parents)
14088 del e, graph, traversed_nodes, \
14089 unsatisfied_parents, unsatisfied_stack
14093 return (success, mydepgraph, dropped_tasks)
14095 def action_build(settings, trees, mtimedb,
14096 myopts, myaction, myfiles, spinner):
14098 # validate the state of the resume data
14099 # so that we can make assumptions later.
14100 for k in ("resume", "resume_backup"):
14101 if k not in mtimedb:
14103 resume_data = mtimedb[k]
14104 if not isinstance(resume_data, dict):
14107 mergelist = resume_data.get("mergelist")
14108 if not isinstance(mergelist, list):
14111 for x in mergelist:
14112 if not (isinstance(x, list) and len(x) == 4):
14114 pkg_type, pkg_root, pkg_key, pkg_action = x
14115 if pkg_root not in trees:
14116 # Current $ROOT setting differs,
14117 # so the list must be stale.
14123 resume_opts = resume_data.get("myopts")
14124 if not isinstance(resume_opts, (dict, list)):
14127 favorites = resume_data.get("favorites")
14128 if not isinstance(favorites, list):
14133 if "--resume" in myopts and \
14134 ("resume" in mtimedb or
14135 "resume_backup" in mtimedb):
14137 if "resume" not in mtimedb:
14138 mtimedb["resume"] = mtimedb["resume_backup"]
14139 del mtimedb["resume_backup"]
14141 # "myopts" is a list for backward compatibility.
14142 resume_opts = mtimedb["resume"].get("myopts", [])
14143 if isinstance(resume_opts, list):
14144 resume_opts = dict((k,True) for k in resume_opts)
14145 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14146 resume_opts.pop(opt, None)
14147 myopts.update(resume_opts)
14149 if "--debug" in myopts:
14150 writemsg_level("myopts %s\n" % (myopts,))
14152 # Adjust config according to options of the command being resumed.
14153 for myroot in trees:
14154 mysettings = trees[myroot]["vartree"].settings
14155 mysettings.unlock()
14156 adjust_config(myopts, mysettings)
14158 del myroot, mysettings
14160 ldpath_mtimes = mtimedb["ldpath"]
14163 buildpkgonly = "--buildpkgonly" in myopts
14164 pretend = "--pretend" in myopts
14165 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14166 ask = "--ask" in myopts
14167 nodeps = "--nodeps" in myopts
14168 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14169 tree = "--tree" in myopts
14170 if nodeps and tree:
14172 del myopts["--tree"]
14173 portage.writemsg(colorize("WARN", " * ") + \
14174 "--tree is broken with --nodeps. Disabling...\n")
14175 debug = "--debug" in myopts
14176 verbose = "--verbose" in myopts
14177 quiet = "--quiet" in myopts
14178 if pretend or fetchonly:
14179 # make the mtimedb readonly
14180 mtimedb.filename = None
14181 if "--digest" in myopts:
14182 msg = "The --digest option can prevent corruption from being" + \
14183 " noticed. The `repoman manifest` command is the preferred" + \
14184 " way to generate manifests and it is capable of doing an" + \
14185 " entire repository or category at once."
14186 prefix = bad(" * ")
14187 writemsg(prefix + "\n")
14188 from textwrap import wrap
14189 for line in wrap(msg, 72):
14190 writemsg("%s%s\n" % (prefix, line))
14191 writemsg(prefix + "\n")
14193 if "--quiet" not in myopts and \
14194 ("--pretend" in myopts or "--ask" in myopts or \
14195 "--tree" in myopts or "--verbose" in myopts):
14197 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14199 elif "--buildpkgonly" in myopts:
14203 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14205 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14209 print darkgreen("These are the packages that would be %s, in order:") % action
14212 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14213 if not show_spinner:
14214 spinner.update = spinner.update_quiet
14217 favorites = mtimedb["resume"].get("favorites")
14218 if not isinstance(favorites, list):
14222 print "Calculating dependencies ",
14223 myparams = create_depgraph_params(myopts, myaction)
14225 resume_data = mtimedb["resume"]
14226 mergelist = resume_data["mergelist"]
14227 if mergelist and "--skipfirst" in myopts:
14228 for i, task in enumerate(mergelist):
14229 if isinstance(task, list) and \
14230 task and task[-1] == "merge":
14237 success, mydepgraph, dropped_tasks = resume_depgraph(
14238 settings, trees, mtimedb, myopts, myparams, spinner)
14239 except (portage.exception.PackageNotFound,
14240 depgraph.UnsatisfiedResumeDep), e:
14241 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14242 mydepgraph = e.depgraph
14245 from textwrap import wrap
14246 from portage.output import EOutput
14249 resume_data = mtimedb["resume"]
14250 mergelist = resume_data.get("mergelist")
14251 if not isinstance(mergelist, list):
14253 if mergelist and debug or (verbose and not quiet):
14254 out.eerror("Invalid resume list:")
14257 for task in mergelist:
14258 if isinstance(task, list):
14259 out.eerror(indent + str(tuple(task)))
14262 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14263 out.eerror("One or more packages are either masked or " + \
14264 "have missing dependencies:")
14267 for dep in e.value:
14268 if dep.atom is None:
14269 out.eerror(indent + "Masked package:")
14270 out.eerror(2 * indent + str(dep.parent))
14273 out.eerror(indent + str(dep.atom) + " pulled in by:")
14274 out.eerror(2 * indent + str(dep.parent))
14276 msg = "The resume list contains packages " + \
14277 "that are either masked or have " + \
14278 "unsatisfied dependencies. " + \
14279 "Please restart/continue " + \
14280 "the operation manually, or use --skipfirst " + \
14281 "to skip the first package in the list and " + \
14282 "any other packages that may be " + \
14283 "masked or have missing dependencies."
14284 for line in wrap(msg, 72):
14286 elif isinstance(e, portage.exception.PackageNotFound):
14287 out.eerror("An expected package is " + \
14288 "not available: %s" % str(e))
14290 msg = "The resume list contains one or more " + \
14291 "packages that are no longer " + \
14292 "available. Please restart/continue " + \
14293 "the operation manually."
14294 for line in wrap(msg, 72):
14298 print "\b\b... done!"
14302 portage.writemsg("!!! One or more packages have been " + \
14303 "dropped due to\n" + \
14304 "!!! masking or unsatisfied dependencies:\n\n",
14306 for task in dropped_tasks:
14307 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14308 portage.writemsg("\n", noiselevel=-1)
14311 if mydepgraph is not None:
14312 mydepgraph.display_problems()
14313 if not (ask or pretend):
14314 # delete the current list and also the backup
14315 # since it's probably stale too.
14316 for k in ("resume", "resume_backup"):
14317 mtimedb.pop(k, None)
14322 if ("--resume" in myopts):
14323 print darkgreen("emerge: It seems we have nothing to resume...")
14326 myparams = create_depgraph_params(myopts, myaction)
14327 if "--quiet" not in myopts and "--nodeps" not in myopts:
14328 print "Calculating dependencies ",
14330 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14332 retval, favorites = mydepgraph.select_files(myfiles)
14333 except portage.exception.PackageNotFound, e:
14334 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14336 except portage.exception.PackageSetNotFound, e:
14337 root_config = trees[settings["ROOT"]]["root_config"]
14338 display_missing_pkg_set(root_config, e.value)
14341 print "\b\b... done!"
14343 mydepgraph.display_problems()
14346 if "--pretend" not in myopts and \
14347 ("--ask" in myopts or "--tree" in myopts or \
14348 "--verbose" in myopts) and \
14349 not ("--quiet" in myopts and "--ask" not in myopts):
14350 if "--resume" in myopts:
14351 mymergelist = mydepgraph.altlist()
14352 if len(mymergelist) == 0:
14353 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14355 favorites = mtimedb["resume"]["favorites"]
14356 retval = mydepgraph.display(
14357 mydepgraph.altlist(reversed=tree),
14358 favorites=favorites)
14359 mydepgraph.display_problems()
14360 if retval != os.EX_OK:
14362 prompt="Would you like to resume merging these packages?"
14364 retval = mydepgraph.display(
14365 mydepgraph.altlist(reversed=("--tree" in myopts)),
14366 favorites=favorites)
14367 mydepgraph.display_problems()
14368 if retval != os.EX_OK:
14371 for x in mydepgraph.altlist():
14372 if isinstance(x, Package) and x.operation == "merge":
14376 sets = trees[settings["ROOT"]]["root_config"].sets
14377 world_candidates = None
14378 if "--noreplace" in myopts and \
14379 not oneshot and favorites:
14380 # Sets that are not world candidates are filtered
14381 # out here since the favorites list needs to be
14382 # complete for depgraph.loadResumeCommand() to
14383 # operate correctly.
14384 world_candidates = [x for x in favorites \
14385 if not (x.startswith(SETPREFIX) and \
14386 not sets[x[1:]].world_candidate)]
14387 if "--noreplace" in myopts and \
14388 not oneshot and world_candidates:
14390 for x in world_candidates:
14391 print " %s %s" % (good("*"), x)
14392 prompt="Would you like to add these packages to your world favorites?"
14393 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14394 prompt="Nothing to merge; would you like to auto-clean packages?"
14397 print "Nothing to merge; quitting."
14400 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14401 prompt="Would you like to fetch the source files for these packages?"
14403 prompt="Would you like to merge these packages?"
14405 if "--ask" in myopts and userquery(prompt) == "No":
14410 # Don't ask again (e.g. when auto-cleaning packages after merge)
14411 myopts.pop("--ask", None)
14413 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14414 if ("--resume" in myopts):
14415 mymergelist = mydepgraph.altlist()
14416 if len(mymergelist) == 0:
14417 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14419 favorites = mtimedb["resume"]["favorites"]
14420 retval = mydepgraph.display(
14421 mydepgraph.altlist(reversed=tree),
14422 favorites=favorites)
14423 mydepgraph.display_problems()
14424 if retval != os.EX_OK:
14427 retval = mydepgraph.display(
14428 mydepgraph.altlist(reversed=("--tree" in myopts)),
14429 favorites=favorites)
14430 mydepgraph.display_problems()
14431 if retval != os.EX_OK:
14433 if "--buildpkgonly" in myopts:
14434 graph_copy = mydepgraph.digraph.clone()
14435 removed_nodes = set()
14436 for node in list(graph_copy.order):
14437 if not isinstance(node, Package) or \
14438 node.operation == "nomerge":
14439 removed_nodes.add(node)
14440 graph_copy.difference_update(removed_nodes)
14441 if not graph_copy.hasallzeros(ignore_priority = \
14442 DepPrioritySatisfiedRange.ignore_medium):
14443 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14444 print "!!! You have to merge the dependencies before you can build this package.\n"
14447 if "--buildpkgonly" in myopts:
14448 graph_copy = mydepgraph.digraph.clone()
14449 removed_nodes = set()
14450 for node in list(graph_copy.order):
14451 if not isinstance(node, Package) or \
14452 node.operation == "nomerge":
14453 removed_nodes.add(node)
14454 graph_copy.difference_update(removed_nodes)
14455 if not graph_copy.hasallzeros(ignore_priority = \
14456 DepPrioritySatisfiedRange.ignore_medium):
14457 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14458 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14461 if ("--resume" in myopts):
14462 favorites=mtimedb["resume"]["favorites"]
14463 mymergelist = mydepgraph.altlist()
14464 mydepgraph.break_refs(mymergelist)
14465 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14466 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14467 del mydepgraph, mymergelist
14468 clear_caches(trees)
14470 retval = mergetask.merge()
14471 merge_count = mergetask.curval
14473 if "resume" in mtimedb and \
14474 "mergelist" in mtimedb["resume"] and \
14475 len(mtimedb["resume"]["mergelist"]) > 1:
14476 mtimedb["resume_backup"] = mtimedb["resume"]
14477 del mtimedb["resume"]
14479 mtimedb["resume"]={}
14480 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14481 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14482 # a list type for options.
14483 mtimedb["resume"]["myopts"] = myopts.copy()
14485 # Convert Atom instances to plain str.
14486 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14488 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14489 for pkgline in mydepgraph.altlist():
14490 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14491 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14492 tmpsettings = portage.config(clone=settings)
14494 if settings.get("PORTAGE_DEBUG", "") == "1":
14496 retval = portage.doebuild(
14497 y, "digest", settings["ROOT"], tmpsettings, edebug,
14498 ("--pretend" in myopts),
14499 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14502 pkglist = mydepgraph.altlist()
14503 mydepgraph.saveNomergeFavorites()
14504 mydepgraph.break_refs(pkglist)
14505 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14506 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14507 del mydepgraph, pkglist
14508 clear_caches(trees)
14510 retval = mergetask.merge()
14511 merge_count = mergetask.curval
14513 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14514 if "yes" == settings.get("AUTOCLEAN"):
14515 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14516 unmerge(trees[settings["ROOT"]]["root_config"],
14517 myopts, "clean", [],
14518 ldpath_mtimes, autoclean=1)
14520 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14521 + " AUTOCLEAN is disabled. This can cause serious"
14522 + " problems due to overlapping packages.\n")
14523 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14527 def multiple_actions(action1, action2):
14528 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14529 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14532 def insert_optional_args(args):
14534 Parse optional arguments and insert a value if one has
14535 not been provided. This is done before feeding the args
14536 to the optparse parser since that parser does not support
14537 this feature natively.
14541 jobs_opts = ("-j", "--jobs")
14542 arg_stack = args[:]
14543 arg_stack.reverse()
14545 arg = arg_stack.pop()
14547 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14548 if not (short_job_opt or arg in jobs_opts):
14549 new_args.append(arg)
14552 # Insert an empty placeholder in order to
14553 # satisfy the requirements of optparse.
14555 new_args.append("--jobs")
14558 if short_job_opt and len(arg) > 2:
14559 if arg[:2] == "-j":
14561 job_count = int(arg[2:])
14563 saved_opts = arg[2:]
14566 saved_opts = arg[1:].replace("j", "")
14568 if job_count is None and arg_stack:
14570 job_count = int(arg_stack[-1])
14574 # Discard the job count from the stack
14575 # since we're consuming it here.
14578 if job_count is None:
14579 # unlimited number of jobs
14580 new_args.append("True")
14582 new_args.append(str(job_count))
14584 if saved_opts is not None:
14585 new_args.append("-" + saved_opts)
14589 def parse_opts(tmpcmdline, silent=False):
14594 global actions, options, shortmapping
14596 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14597 argument_options = {
14599 "help":"specify the location for portage configuration files",
14603 "help":"enable or disable color output",
14605 "choices":("y", "n")
14610 "help" : "Specifies the number of packages to build " + \
14616 "--load-average": {
14618 "help" :"Specifies that no new builds should be started " + \
14619 "if there are other builds running and the load average " + \
14620 "is at least LOAD (a floating-point number).",
14626 "help":"include unnecessary build time dependencies",
14628 "choices":("y", "n")
14631 "help":"specify conditions to trigger package reinstallation",
14633 "choices":["changed-use"]
14637 from optparse import OptionParser
14638 parser = OptionParser()
14639 if parser.has_option("--help"):
14640 parser.remove_option("--help")
14642 for action_opt in actions:
14643 parser.add_option("--" + action_opt, action="store_true",
14644 dest=action_opt.replace("-", "_"), default=False)
14645 for myopt in options:
14646 parser.add_option(myopt, action="store_true",
14647 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14648 for shortopt, longopt in shortmapping.iteritems():
14649 parser.add_option("-" + shortopt, action="store_true",
14650 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14651 for myalias, myopt in longopt_aliases.iteritems():
14652 parser.add_option(myalias, action="store_true",
14653 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14655 for myopt, kwargs in argument_options.iteritems():
14656 parser.add_option(myopt,
14657 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14659 tmpcmdline = insert_optional_args(tmpcmdline)
14661 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14665 if myoptions.jobs == "True":
14669 jobs = int(myoptions.jobs)
14673 if jobs is not True and \
14677 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14678 (myoptions.jobs,), noiselevel=-1)
14680 myoptions.jobs = jobs
14682 if myoptions.load_average:
14684 load_average = float(myoptions.load_average)
14688 if load_average <= 0.0:
14689 load_average = None
14691 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14692 (myoptions.load_average,), noiselevel=-1)
14694 myoptions.load_average = load_average
14696 for myopt in options:
14697 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14699 myopts[myopt] = True
14701 for myopt in argument_options:
14702 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14706 for action_opt in actions:
14707 v = getattr(myoptions, action_opt.replace("-", "_"))
14710 multiple_actions(myaction, action_opt)
14712 myaction = action_opt
14716 return myaction, myopts, myfiles
14718 def validate_ebuild_environment(trees):
14719 for myroot in trees:
14720 settings = trees[myroot]["vartree"].settings
14721 settings.validate()
14723 def clear_caches(trees):
14724 for d in trees.itervalues():
14725 d["porttree"].dbapi.melt()
14726 d["porttree"].dbapi._aux_cache.clear()
14727 d["bintree"].dbapi._aux_cache.clear()
14728 d["bintree"].dbapi._clear_cache()
14729 d["vartree"].dbapi.linkmap._clear_cache()
14730 portage.dircache.clear()
14733 def load_emerge_config(trees=None):
14735 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14736 v = os.environ.get(envvar, None)
14737 if v and v.strip():
14739 trees = portage.create_trees(trees=trees, **kwargs)
14741 for root, root_trees in trees.iteritems():
14742 settings = root_trees["vartree"].settings
14743 setconfig = load_default_config(settings, root_trees)
14744 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14746 settings = trees["/"]["vartree"].settings
14748 for myroot in trees:
14750 settings = trees[myroot]["vartree"].settings
14753 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14754 mtimedb = portage.MtimeDB(mtimedbfile)
14756 return settings, trees, mtimedb
14758 def adjust_config(myopts, settings):
14759 """Make emerge specific adjustments to the config."""
14761 # To enhance usability, make some vars case insensitive by forcing them to
14763 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14764 if myvar in settings:
14765 settings[myvar] = settings[myvar].lower()
14766 settings.backup_changes(myvar)
14769 # Kill noauto as it will break merges otherwise.
14770 if "noauto" in settings.features:
14771 while "noauto" in settings.features:
14772 settings.features.remove("noauto")
14773 settings["FEATURES"] = " ".join(settings.features)
14774 settings.backup_changes("FEATURES")
14778 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14779 except ValueError, e:
14780 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14781 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14782 settings["CLEAN_DELAY"], noiselevel=-1)
14783 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14784 settings.backup_changes("CLEAN_DELAY")
14786 EMERGE_WARNING_DELAY = 10
14788 EMERGE_WARNING_DELAY = int(settings.get(
14789 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14790 except ValueError, e:
14791 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14792 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14793 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14794 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14795 settings.backup_changes("EMERGE_WARNING_DELAY")
14797 if "--quiet" in myopts:
14798 settings["PORTAGE_QUIET"]="1"
14799 settings.backup_changes("PORTAGE_QUIET")
14801 if "--verbose" in myopts:
14802 settings["PORTAGE_VERBOSE"] = "1"
14803 settings.backup_changes("PORTAGE_VERBOSE")
14805 # Set so that configs will be merged regardless of remembered status
14806 if ("--noconfmem" in myopts):
14807 settings["NOCONFMEM"]="1"
14808 settings.backup_changes("NOCONFMEM")
14810 # Set various debug markers... They should be merged somehow.
14813 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14814 if PORTAGE_DEBUG not in (0, 1):
14815 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14816 PORTAGE_DEBUG, noiselevel=-1)
14817 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14820 except ValueError, e:
14821 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14822 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14823 settings["PORTAGE_DEBUG"], noiselevel=-1)
14825 if "--debug" in myopts:
14827 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14828 settings.backup_changes("PORTAGE_DEBUG")
14830 if settings.get("NOCOLOR") not in ("yes","true"):
14831 portage.output.havecolor = 1
14833 """The explicit --color < y | n > option overrides the NOCOLOR environment
14834 variable and stdout auto-detection."""
14835 if "--color" in myopts:
14836 if "y" == myopts["--color"]:
14837 portage.output.havecolor = 1
14838 settings["NOCOLOR"] = "false"
14840 portage.output.havecolor = 0
14841 settings["NOCOLOR"] = "true"
14842 settings.backup_changes("NOCOLOR")
14843 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14844 portage.output.havecolor = 0
14845 settings["NOCOLOR"] = "true"
14846 settings.backup_changes("NOCOLOR")
14848 def apply_priorities(settings):
14852 def nice(settings):
14854 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14855 except (OSError, ValueError), e:
14856 out = portage.output.EOutput()
14857 out.eerror("Failed to change nice value to '%s'" % \
14858 settings["PORTAGE_NICENESS"])
14859 out.eerror("%s\n" % str(e))
14861 def ionice(settings):
14863 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14865 ionice_cmd = shlex.split(ionice_cmd)
14869 from portage.util import varexpand
14870 variables = {"PID" : str(os.getpid())}
14871 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14874 rval = portage.process.spawn(cmd, env=os.environ)
14875 except portage.exception.CommandNotFound:
14876 # The OS kernel probably doesn't support ionice,
14877 # so return silently.
14880 if rval != os.EX_OK:
14881 out = portage.output.EOutput()
14882 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14883 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14885 def display_missing_pkg_set(root_config, set_name):
14888 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14889 "The following sets exist:") % \
14890 colorize("INFORM", set_name))
14893 for s in sorted(root_config.sets):
14894 msg.append(" %s" % s)
14897 writemsg_level("".join("%s\n" % l for l in msg),
14898 level=logging.ERROR, noiselevel=-1)
14900 def expand_set_arguments(myfiles, myaction, root_config):
14902 setconfig = root_config.setconfig
14904 sets = setconfig.getSets()
14906 # In order to know exactly which atoms/sets should be added to the
14907 # world file, the depgraph performs set expansion later. It will get
14908 # confused about where the atoms came from if it's not allowed to
14909 # expand them itself.
14910 do_not_expand = (None, )
14913 if a in ("system", "world"):
14914 newargs.append(SETPREFIX+a)
14921 # separators for set arguments
14925 # WARNING: all operators must be of equal length
14927 DIFF_OPERATOR = "-@"
14928 UNION_OPERATOR = "+@"
14930 for i in range(0, len(myfiles)):
14931 if myfiles[i].startswith(SETPREFIX):
14934 x = myfiles[i][len(SETPREFIX):]
14937 start = x.find(ARG_START)
14938 end = x.find(ARG_END)
14939 if start > 0 and start < end:
14940 namepart = x[:start]
14941 argpart = x[start+1:end]
14943 # TODO: implement proper quoting
14944 args = argpart.split(",")
14948 k, v = a.split("=", 1)
14951 options[a] = "True"
14952 setconfig.update(namepart, options)
14953 newset += (x[:start-len(namepart)]+namepart)
14954 x = x[end+len(ARG_END):]
14958 myfiles[i] = SETPREFIX+newset
14960 sets = setconfig.getSets()
14962 # display errors that occured while loading the SetConfig instance
14963 for e in setconfig.errors:
14964 print colorize("BAD", "Error during set creation: %s" % e)
14966 # emerge relies on the existance of sets with names "world" and "system"
14967 required_sets = ("world", "system")
14970 for s in required_sets:
14972 missing_sets.append(s)
14974 if len(missing_sets) > 2:
14975 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14976 missing_sets_str += ', and "%s"' % missing_sets[-1]
14977 elif len(missing_sets) == 2:
14978 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14980 missing_sets_str = '"%s"' % missing_sets[-1]
14981 msg = ["emerge: incomplete set configuration, " + \
14982 "missing set(s): %s" % missing_sets_str]
14984 msg.append(" sets defined: %s" % ", ".join(sets))
14985 msg.append(" This usually means that '%s'" % \
14986 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14987 msg.append(" is missing or corrupt.")
14989 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14991 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14994 if a.startswith(SETPREFIX):
14995 # support simple set operations (intersection, difference and union)
14996 # on the commandline. Expressions are evaluated strictly left-to-right
14997 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14998 expression = a[len(SETPREFIX):]
15001 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15002 is_pos = expression.rfind(IS_OPERATOR)
15003 diff_pos = expression.rfind(DIFF_OPERATOR)
15004 union_pos = expression.rfind(UNION_OPERATOR)
15005 op_pos = max(is_pos, diff_pos, union_pos)
15006 s1 = expression[:op_pos]
15007 s2 = expression[op_pos+len(IS_OPERATOR):]
15008 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15010 display_missing_pkg_set(root_config, s2)
15012 expr_sets.insert(0, s2)
15013 expr_ops.insert(0, op)
15015 if not expression in sets:
15016 display_missing_pkg_set(root_config, expression)
15018 expr_sets.insert(0, expression)
15019 result = set(setconfig.getSetAtoms(expression))
15020 for i in range(0, len(expr_ops)):
15021 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15022 if expr_ops[i] == IS_OPERATOR:
15023 result.intersection_update(s2)
15024 elif expr_ops[i] == DIFF_OPERATOR:
15025 result.difference_update(s2)
15026 elif expr_ops[i] == UNION_OPERATOR:
15029 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15030 newargs.extend(result)
15032 s = a[len(SETPREFIX):]
15034 display_missing_pkg_set(root_config, s)
15036 setconfig.active.append(s)
15038 set_atoms = setconfig.getSetAtoms(s)
15039 except portage.exception.PackageSetNotFound, e:
15040 writemsg_level(("emerge: the given set '%s' " + \
15041 "contains a non-existent set named '%s'.\n") % \
15042 (s, e), level=logging.ERROR, noiselevel=-1)
15044 if myaction in unmerge_actions and \
15045 not sets[s].supportsOperation("unmerge"):
15046 sys.stderr.write("emerge: the given set '%s' does " % s + \
15047 "not support unmerge operations\n")
15049 elif not set_atoms:
15050 print "emerge: '%s' is an empty set" % s
15051 elif myaction not in do_not_expand:
15052 newargs.extend(set_atoms)
15054 newargs.append(SETPREFIX+s)
15055 for e in sets[s].errors:
15059 return (newargs, retval)
15061 def repo_name_check(trees):
15062 missing_repo_names = set()
15063 for root, root_trees in trees.iteritems():
15064 if "porttree" in root_trees:
15065 portdb = root_trees["porttree"].dbapi
15066 missing_repo_names.update(portdb.porttrees)
15067 repos = portdb.getRepositories()
15069 missing_repo_names.discard(portdb.getRepositoryPath(r))
15070 if portdb.porttree_root in missing_repo_names and \
15071 not os.path.exists(os.path.join(
15072 portdb.porttree_root, "profiles")):
15073 # This is normal if $PORTDIR happens to be empty,
15074 # so don't warn about it.
15075 missing_repo_names.remove(portdb.porttree_root)
15077 if missing_repo_names:
15079 msg.append("WARNING: One or more repositories " + \
15080 "have missing repo_name entries:")
15082 for p in missing_repo_names:
15083 msg.append("\t%s/profiles/repo_name" % (p,))
15085 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15086 "should be a plain text file containing a unique " + \
15087 "name for the repository on the first line.", 70))
15088 writemsg_level("".join("%s\n" % l for l in msg),
15089 level=logging.WARNING, noiselevel=-1)
15091 return bool(missing_repo_names)
15093 def config_protect_check(trees):
15094 for root, root_trees in trees.iteritems():
15095 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15096 msg = "!!! CONFIG_PROTECT is empty"
15098 msg += " for '%s'" % root
15099 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15101 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15103 if "--quiet" in myopts:
15104 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15105 print "!!! one of the following fully-qualified ebuild names instead:\n"
15106 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15107 print " " + colorize("INFORM", cp)
15110 s = search(root_config, spinner, "--searchdesc" in myopts,
15111 "--quiet" not in myopts, "--usepkg" in myopts,
15112 "--usepkgonly" in myopts)
15113 null_cp = portage.dep_getkey(insert_category_into_atom(
15115 cat, atom_pn = portage.catsplit(null_cp)
15116 s.searchkey = atom_pn
15117 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15120 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15121 print "!!! one of the above fully-qualified ebuild names instead.\n"
15123 def profile_check(trees, myaction, myopts):
15124 if myaction in ("info", "sync"):
15126 elif "--version" in myopts or "--help" in myopts:
15128 for root, root_trees in trees.iteritems():
15129 if root_trees["root_config"].settings.profiles:
15131 # generate some profile related warning messages
15132 validate_ebuild_environment(trees)
15133 msg = "If you have just changed your profile configuration, you " + \
15134 "should revert back to the previous configuration. Due to " + \
15135 "your current profile being invalid, allowed actions are " + \
15136 "limited to --help, --info, --sync, and --version."
15137 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15138 level=logging.ERROR, noiselevel=-1)
15143 global portage # NFC why this is necessary now - genone
15144 portage._disable_legacy_globals()
15145 # Disable color until we're sure that it should be enabled (after
15146 # EMERGE_DEFAULT_OPTS has been parsed).
15147 portage.output.havecolor = 0
15148 # This first pass is just for options that need to be known as early as
15149 # possible, such as --config-root. They will be parsed again later,
15150 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15151 # the value of --config-root).
15152 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15153 if "--debug" in myopts:
15154 os.environ["PORTAGE_DEBUG"] = "1"
15155 if "--config-root" in myopts:
15156 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15158 # Portage needs to ensure a sane umask for the files it creates.
15160 settings, trees, mtimedb = load_emerge_config()
15161 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15162 rval = profile_check(trees, myaction, myopts)
15163 if rval != os.EX_OK:
15166 if portage._global_updates(trees, mtimedb["updates"]):
15168 # Reload the whole config from scratch.
15169 settings, trees, mtimedb = load_emerge_config(trees=trees)
15170 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15172 xterm_titles = "notitles" not in settings.features
15175 if "--ignore-default-opts" not in myopts:
15176 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15177 tmpcmdline.extend(sys.argv[1:])
15178 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15180 if "--digest" in myopts:
15181 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15182 # Reload the whole config from scratch so that the portdbapi internal
15183 # config is updated with new FEATURES.
15184 settings, trees, mtimedb = load_emerge_config(trees=trees)
15185 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15187 for myroot in trees:
15188 mysettings = trees[myroot]["vartree"].settings
15189 mysettings.unlock()
15190 adjust_config(myopts, mysettings)
15191 if "--pretend" not in myopts:
15192 mysettings["PORTAGE_COUNTER_HASH"] = \
15193 trees[myroot]["vartree"].dbapi._counter_hash()
15194 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15196 del myroot, mysettings
15198 apply_priorities(settings)
15200 spinner = stdout_spinner()
15201 if "candy" in settings.features:
15202 spinner.update = spinner.update_scroll
15204 if "--quiet" not in myopts:
15205 portage.deprecated_profile_check(settings=settings)
15206 repo_name_check(trees)
15207 config_protect_check(trees)
15209 eclasses_overridden = {}
15210 for mytrees in trees.itervalues():
15211 mydb = mytrees["porttree"].dbapi
15212 # Freeze the portdbapi for performance (memoize all xmatch results).
15214 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15217 if eclasses_overridden and \
15218 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15219 prefix = bad(" * ")
15220 if len(eclasses_overridden) == 1:
15221 writemsg(prefix + "Overlay eclass overrides " + \
15222 "eclass from PORTDIR:\n", noiselevel=-1)
15224 writemsg(prefix + "Overlay eclasses override " + \
15225 "eclasses from PORTDIR:\n", noiselevel=-1)
15226 writemsg(prefix + "\n", noiselevel=-1)
15227 for eclass_name in sorted(eclasses_overridden):
15228 writemsg(prefix + " '%s/%s.eclass'\n" % \
15229 (eclasses_overridden[eclass_name], eclass_name),
15231 writemsg(prefix + "\n", noiselevel=-1)
15232 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15233 "because it will trigger invalidation of cached ebuild metadata " + \
15234 "that is distributed with the portage tree. If you must " + \
15235 "override eclasses from PORTDIR then you are advised to add " + \
15236 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15237 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15238 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15239 "you would like to disable this warning."
15240 from textwrap import wrap
15241 for line in wrap(msg, 72):
15242 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15244 if "moo" in myfiles:
15247 Larry loves Gentoo (""" + platform.system() + """)
15249 _______________________
15250 < Have you mooed today? >
15251 -----------------------
15261 ext = os.path.splitext(x)[1]
15262 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15263 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15266 root_config = trees[settings["ROOT"]]["root_config"]
15267 if myaction == "list-sets":
15268 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15272 # only expand sets for actions taking package arguments
15273 oldargs = myfiles[:]
15274 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15275 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15276 if retval != os.EX_OK:
15279 # Need to handle empty sets specially, otherwise emerge will react
15280 # with the help message for empty argument lists
15281 if oldargs and not myfiles:
15282 print "emerge: no targets left after set expansion"
15285 if ("--tree" in myopts) and ("--columns" in myopts):
15286 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15289 if ("--quiet" in myopts):
15290 spinner.update = spinner.update_quiet
15291 portage.util.noiselimit = -1
15293 # Always create packages if FEATURES=buildpkg
15294 # Imply --buildpkg if --buildpkgonly
15295 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15296 if "--buildpkg" not in myopts:
15297 myopts["--buildpkg"] = True
15299 # Also allow -S to invoke search action (-sS)
15300 if ("--searchdesc" in myopts):
15301 if myaction and myaction != "search":
15302 myfiles.append(myaction)
15303 if "--search" not in myopts:
15304 myopts["--search"] = True
15305 myaction = "search"
15307 # Always try and fetch binary packages if FEATURES=getbinpkg
15308 if ("getbinpkg" in settings.features):
15309 myopts["--getbinpkg"] = True
15311 if "--buildpkgonly" in myopts:
15312 # --buildpkgonly will not merge anything, so
15313 # it cancels all binary package options.
15314 for opt in ("--getbinpkg", "--getbinpkgonly",
15315 "--usepkg", "--usepkgonly"):
15316 myopts.pop(opt, None)
15318 if "--fetch-all-uri" in myopts:
15319 myopts["--fetchonly"] = True
15321 if "--skipfirst" in myopts and "--resume" not in myopts:
15322 myopts["--resume"] = True
15324 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15325 myopts["--usepkgonly"] = True
15327 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15328 myopts["--getbinpkg"] = True
15330 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15331 myopts["--usepkg"] = True
15333 # Also allow -K to apply --usepkg/-k
15334 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15335 myopts["--usepkg"] = True
15337 # Allow -p to remove --ask
15338 if ("--pretend" in myopts) and ("--ask" in myopts):
15339 print ">>> --pretend disables --ask... removing --ask from options."
15340 del myopts["--ask"]
15342 # forbid --ask when not in a terminal
15343 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15344 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15345 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15349 if settings.get("PORTAGE_DEBUG", "") == "1":
15350 spinner.update = spinner.update_quiet
15352 if "python-trace" in settings.features:
15353 import portage.debug
15354 portage.debug.set_trace(True)
15356 if not ("--quiet" in myopts):
15357 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15358 spinner.update = spinner.update_basic
15360 if "--version" in myopts:
15361 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15362 settings.profile_path, settings["CHOST"],
15363 trees[settings["ROOT"]]["vartree"].dbapi)
15365 elif "--help" in myopts:
15366 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15369 if "--debug" in myopts:
15370 print "myaction", myaction
15371 print "myopts", myopts
15373 if not myaction and not myfiles and "--resume" not in myopts:
15374 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15377 pretend = "--pretend" in myopts
15378 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15379 buildpkgonly = "--buildpkgonly" in myopts
15381 # check if root user is the current user for the actions where emerge needs this
15382 if portage.secpass < 2:
15383 # We've already allowed "--version" and "--help" above.
15384 if "--pretend" not in myopts and myaction not in ("search","info"):
15385 need_superuser = not \
15387 (buildpkgonly and secpass >= 1) or \
15388 myaction in ("metadata", "regen") or \
15389 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15390 if portage.secpass < 1 or \
15393 access_desc = "superuser"
15395 access_desc = "portage group"
15396 # Always show portage_group_warning() when only portage group
15397 # access is required but the user is not in the portage group.
15398 from portage.data import portage_group_warning
15399 if "--ask" in myopts:
15400 myopts["--pretend"] = True
15401 del myopts["--ask"]
15402 print ("%s access is required... " + \
15403 "adding --pretend to options.\n") % access_desc
15404 if portage.secpass < 1 and not need_superuser:
15405 portage_group_warning()
15407 sys.stderr.write(("emerge: %s access is " + \
15408 "required.\n\n") % access_desc)
15409 if portage.secpass < 1 and not need_superuser:
15410 portage_group_warning()
15413 disable_emergelog = False
15414 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15416 disable_emergelog = True
15418 if myaction in ("search", "info"):
15419 disable_emergelog = True
15420 if disable_emergelog:
15421 """ Disable emergelog for everything except build or unmerge
15422 operations. This helps minimize parallel emerge.log entries that can
15423 confuse log parsers. We especially want it disabled during
15424 parallel-fetch, which uses --resume --fetchonly."""
15426 def emergelog(*pargs, **kargs):
15429 if not "--pretend" in myopts:
15430 emergelog(xterm_titles, "Started emerge on: "+\
15431 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15434 myelogstr=" ".join(myopts)
15436 myelogstr+=" "+myaction
15438 myelogstr += " " + " ".join(oldargs)
15439 emergelog(xterm_titles, " *** emerge " + myelogstr)
15442 def emergeexitsig(signum, frame):
15443 signal.signal(signal.SIGINT, signal.SIG_IGN)
15444 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15445 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15446 sys.exit(100+signum)
15447 signal.signal(signal.SIGINT, emergeexitsig)
15448 signal.signal(signal.SIGTERM, emergeexitsig)
15451 """This gets out final log message in before we quit."""
15452 if "--pretend" not in myopts:
15453 emergelog(xterm_titles, " *** terminating.")
15454 if "notitles" not in settings.features:
15456 portage.atexit_register(emergeexit)
15458 if myaction in ("config", "metadata", "regen", "sync"):
15459 if "--pretend" in myopts:
15460 sys.stderr.write(("emerge: The '%s' action does " + \
15461 "not support '--pretend'.\n") % myaction)
15464 if "sync" == myaction:
15465 return action_sync(settings, trees, mtimedb, myopts, myaction)
15466 elif "metadata" == myaction:
15467 action_metadata(settings, portdb, myopts)
15468 elif myaction=="regen":
15469 validate_ebuild_environment(trees)
15470 return action_regen(settings, portdb, myopts.get("--jobs"),
15471 myopts.get("--load-average"))
15473 elif "config"==myaction:
15474 validate_ebuild_environment(trees)
15475 action_config(settings, trees, myopts, myfiles)
15478 elif "search"==myaction:
15479 validate_ebuild_environment(trees)
15480 action_search(trees[settings["ROOT"]]["root_config"],
15481 myopts, myfiles, spinner)
15482 elif myaction in ("clean", "unmerge") or \
15483 (myaction == "prune" and "--nodeps" in myopts):
15484 validate_ebuild_environment(trees)
15486 # Ensure atoms are valid before calling unmerge().
15487 # For backward compat, leading '=' is not required.
15489 if is_valid_package_atom(x) or \
15490 is_valid_package_atom("=" + x):
15493 msg.append("'%s' is not a valid package atom." % (x,))
15494 msg.append("Please check ebuild(5) for full details.")
15495 writemsg_level("".join("!!! %s\n" % line for line in msg),
15496 level=logging.ERROR, noiselevel=-1)
15499 # When given a list of atoms, unmerge
15500 # them in the order given.
15501 ordered = myaction == "unmerge"
15502 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15503 mtimedb["ldpath"], ordered=ordered):
15504 if not (buildpkgonly or fetchonly or pretend):
15505 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15507 elif myaction in ("depclean", "info", "prune"):
15509 # Ensure atoms are valid before calling unmerge().
15510 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15513 if is_valid_package_atom(x):
15515 valid_atoms.append(
15516 portage.dep_expand(x, mydb=vardb, settings=settings))
15517 except portage.exception.AmbiguousPackageName, e:
15518 msg = "The short ebuild name \"" + x + \
15519 "\" is ambiguous. Please specify " + \
15520 "one of the following " + \
15521 "fully-qualified ebuild names instead:"
15522 for line in textwrap.wrap(msg, 70):
15523 writemsg_level("!!! %s\n" % (line,),
15524 level=logging.ERROR, noiselevel=-1)
15526 writemsg_level(" %s\n" % colorize("INFORM", i),
15527 level=logging.ERROR, noiselevel=-1)
15528 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15532 msg.append("'%s' is not a valid package atom." % (x,))
15533 msg.append("Please check ebuild(5) for full details.")
15534 writemsg_level("".join("!!! %s\n" % line for line in msg),
15535 level=logging.ERROR, noiselevel=-1)
15538 if myaction == "info":
15539 return action_info(settings, trees, myopts, valid_atoms)
15541 validate_ebuild_environment(trees)
15542 action_depclean(settings, trees, mtimedb["ldpath"],
15543 myopts, myaction, valid_atoms, spinner)
15544 if not (buildpkgonly or fetchonly or pretend):
15545 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15546 # "update", "system", or just process files:
15548 validate_ebuild_environment(trees)
15549 if "--pretend" not in myopts:
15550 display_news_notification(root_config, myopts)
15551 retval = action_build(settings, trees, mtimedb,
15552 myopts, myaction, myfiles, spinner)
15553 root_config = trees[settings["ROOT"]]["root_config"]
15554 post_emerge(root_config, myopts, mtimedb, retval)