2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
214 "--verbose", "--version"
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1391 if not pkgsettings._accept_chost(pkg):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1419 if not pkgsettings._accept_chost(pkg):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 if metadata is None:
1440 mreasons = ["corruption"]
1442 pkg = Package(type_name=pkg_type, root_config=root_config,
1443 cpv=cpv, built=built, installed=installed, metadata=metadata)
1444 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1445 return metadata, mreasons
1447 def show_masked_packages(masked_packages):
1448 shown_licenses = set()
1449 shown_comments = set()
1450 # Maybe there is both an ebuild and a binary. Only
1451 # show one of them to avoid redundant appearance.
1453 have_eapi_mask = False
1454 for (root_config, pkgsettings, cpv,
1455 metadata, mreasons) in masked_packages:
1456 if cpv in shown_cpvs:
1459 comment, filename = None, None
1460 if "package.mask" in mreasons:
1461 comment, filename = \
1462 portage.getmaskingreason(
1463 cpv, metadata=metadata,
1464 settings=pkgsettings,
1465 portdb=root_config.trees["porttree"].dbapi,
1466 return_location=True)
1467 missing_licenses = []
1469 if not portage.eapi_is_supported(metadata["EAPI"]):
1470 have_eapi_mask = True
1472 missing_licenses = \
1473 pkgsettings._getMissingLicenses(
1475 except portage.exception.InvalidDependString:
1476 # This will have already been reported
1477 # above via mreasons.
1480 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1481 if comment and comment not in shown_comments:
1484 shown_comments.add(comment)
1485 portdb = root_config.trees["porttree"].dbapi
1486 for l in missing_licenses:
1487 l_path = portdb.findLicensePath(l)
1488 if l in shown_licenses:
1490 msg = ("A copy of the '%s' license" + \
1491 " is located at '%s'.") % (l, l_path)
1494 shown_licenses.add(l)
1495 return have_eapi_mask
1497 class Task(SlotObject):
1498 __slots__ = ("_hash_key", "_hash_value")
1500 def _get_hash_key(self):
1501 hash_key = getattr(self, "_hash_key", None)
1502 if hash_key is None:
1503 raise NotImplementedError(self)
1506 def __eq__(self, other):
1507 return self._get_hash_key() == other
1509 def __ne__(self, other):
1510 return self._get_hash_key() != other
1513 hash_value = getattr(self, "_hash_value", None)
1514 if hash_value is None:
1515 self._hash_value = hash(self._get_hash_key())
1516 return self._hash_value
1519 return len(self._get_hash_key())
1521 def __getitem__(self, key):
1522 return self._get_hash_key()[key]
1525 return iter(self._get_hash_key())
1527 def __contains__(self, key):
1528 return key in self._get_hash_key()
1531 return str(self._get_hash_key())
1533 class Blocker(Task):
1535 __hash__ = Task.__hash__
1536 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538 def __init__(self, **kwargs):
1539 Task.__init__(self, **kwargs)
1540 self.cp = portage.dep_getkey(self.atom)
1542 def _get_hash_key(self):
1543 hash_key = getattr(self, "_hash_key", None)
1544 if hash_key is None:
1546 ("blocks", self.root, self.atom, self.eapi)
1547 return self._hash_key
1549 class Package(Task):
1551 __hash__ = Task.__hash__
1552 __slots__ = ("built", "cpv", "depth",
1553 "installed", "metadata", "onlydeps", "operation",
1554 "root_config", "type_name",
1555 "category", "counter", "cp", "cpv_split",
1556 "inherited", "iuse", "mtime",
1557 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1560 "CHOST", "COUNTER", "DEPEND", "EAPI",
1561 "INHERITED", "IUSE", "KEYWORDS",
1562 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1563 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565 def __init__(self, **kwargs):
1566 Task.__init__(self, **kwargs)
1567 self.root = self.root_config.root
1568 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1569 self.cp = portage.cpv_getkey(self.cpv)
1570 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1571 self.category, self.pf = portage.catsplit(self.cpv)
1572 self.cpv_split = portage.catpkgsplit(self.cpv)
1573 self.pv_split = self.cpv_split[1:]
1577 __slots__ = ("__weakref__", "enabled")
1579 def __init__(self, use):
1580 self.enabled = frozenset(use)
1582 class _iuse(object):
1584 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1586 def __init__(self, tokens, iuse_implicit):
1587 self.tokens = tuple(tokens)
1588 self.iuse_implicit = iuse_implicit
1595 enabled.append(x[1:])
1597 disabled.append(x[1:])
1600 self.enabled = frozenset(enabled)
1601 self.disabled = frozenset(disabled)
1602 self.all = frozenset(chain(enabled, disabled, other))
1604 def __getattribute__(self, name):
1607 return object.__getattribute__(self, "regex")
1608 except AttributeError:
1609 all = object.__getattribute__(self, "all")
1610 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1611 # Escape anything except ".*" which is supposed
1612 # to pass through from _get_implicit_iuse()
1613 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1614 regex = "^(%s)$" % "|".join(regex)
1615 regex = regex.replace("\\.\\*", ".*")
1616 self.regex = re.compile(regex)
1617 return object.__getattribute__(self, name)
1619 def _get_hash_key(self):
1620 hash_key = getattr(self, "_hash_key", None)
1621 if hash_key is None:
1622 if self.operation is None:
1623 self.operation = "merge"
1624 if self.onlydeps or self.installed:
1625 self.operation = "nomerge"
1627 (self.type_name, self.root, self.cpv, self.operation)
1628 return self._hash_key
1630 def __lt__(self, other):
1631 if other.cp != self.cp:
1633 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1637 def __le__(self, other):
1638 if other.cp != self.cp:
1640 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1644 def __gt__(self, other):
1645 if other.cp != self.cp:
1647 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1651 def __ge__(self, other):
1652 if other.cp != self.cp:
1654 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1658 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1659 if not x.startswith("UNUSED_"))
1660 _all_metadata_keys.discard("CDEPEND")
1661 _all_metadata_keys.update(Package.metadata_keys)
1663 from portage.cache.mappings import slot_dict_class
1664 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1666 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1668 Detect metadata updates and synchronize Package attributes.
1671 __slots__ = ("_pkg",)
1672 _wrapped_keys = frozenset(
1673 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1675 def __init__(self, pkg, metadata):
1676 _PackageMetadataWrapperBase.__init__(self)
1678 self.update(metadata)
1680 def __setitem__(self, k, v):
1681 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1682 if k in self._wrapped_keys:
1683 getattr(self, "_set_" + k.lower())(k, v)
1685 def _set_inherited(self, k, v):
1686 if isinstance(v, basestring):
1687 v = frozenset(v.split())
1688 self._pkg.inherited = v
1690 def _set_iuse(self, k, v):
1691 self._pkg.iuse = self._pkg._iuse(
1692 v.split(), self._pkg.root_config.iuse_implicit)
1694 def _set_slot(self, k, v):
1697 def _set_use(self, k, v):
1698 self._pkg.use = self._pkg._use(v.split())
1700 def _set_counter(self, k, v):
1701 if isinstance(v, basestring):
1706 self._pkg.counter = v
1708 def _set__mtime_(self, k, v):
1709 if isinstance(v, basestring):
1716 class EbuildFetchonly(SlotObject):
1718 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1721 settings = self.settings
1723 portdb = pkg.root_config.trees["porttree"].dbapi
1724 ebuild_path = portdb.findname(pkg.cpv)
1725 settings.setcpv(pkg)
1726 debug = settings.get("PORTAGE_DEBUG") == "1"
1727 use_cache = 1 # always true
1728 portage.doebuild_environment(ebuild_path, "fetch",
1729 settings["ROOT"], settings, debug, use_cache, portdb)
1730 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1733 rval = self._execute_with_builddir()
1735 rval = portage.doebuild(ebuild_path, "fetch",
1736 settings["ROOT"], settings, debug=debug,
1737 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1738 mydbapi=portdb, tree="porttree")
1740 if rval != os.EX_OK:
1741 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1742 eerror(msg, phase="unpack", key=pkg.cpv)
1746 def _execute_with_builddir(self):
1747 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1748 # ensuring sane $PWD (bug #239560) and storing elog
1749 # messages. Use a private temp directory, in order
1750 # to avoid locking the main one.
1751 settings = self.settings
1752 global_tmpdir = settings["PORTAGE_TMPDIR"]
1753 from tempfile import mkdtemp
1755 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1757 if e.errno != portage.exception.PermissionDenied.errno:
1759 raise portage.exception.PermissionDenied(global_tmpdir)
1760 settings["PORTAGE_TMPDIR"] = private_tmpdir
1761 settings.backup_changes("PORTAGE_TMPDIR")
1763 retval = self._execute()
1765 settings["PORTAGE_TMPDIR"] = global_tmpdir
1766 settings.backup_changes("PORTAGE_TMPDIR")
1767 shutil.rmtree(private_tmpdir)
1771 settings = self.settings
1773 root_config = pkg.root_config
1774 portdb = root_config.trees["porttree"].dbapi
1775 ebuild_path = portdb.findname(pkg.cpv)
1776 debug = settings.get("PORTAGE_DEBUG") == "1"
1777 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1779 retval = portage.doebuild(ebuild_path, "fetch",
1780 self.settings["ROOT"], self.settings, debug=debug,
1781 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1782 mydbapi=portdb, tree="porttree")
1784 if retval != os.EX_OK:
1785 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1786 eerror(msg, phase="unpack", key=pkg.cpv)
1788 portage.elog.elog_process(self.pkg.cpv, self.settings)
1791 class PollConstants(object):
1794 Provides POLL* constants that are equivalent to those from the
1795 select module, for use by PollSelectAdapter.
1798 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1801 locals()[k] = getattr(select, k, v)
1805 class AsynchronousTask(SlotObject):
1807 Subclasses override _wait() and _poll() so that calls
1808 to public methods can be wrapped for implementing
1809 hooks such as exit listener notification.
1811 Sublasses should call self.wait() to notify exit listeners after
1812 the task is complete and self.returncode has been set.
1815 __slots__ = ("background", "cancelled", "returncode") + \
1816 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1820 Start an asynchronous task and then return as soon as possible.
1826 raise NotImplementedError(self)
1829 return self.returncode is None
1836 return self.returncode
1839 if self.returncode is None:
1842 return self.returncode
1845 return self.returncode
1848 self.cancelled = True
1851 def addStartListener(self, f):
1853 The function will be called with one argument, a reference to self.
1855 if self._start_listeners is None:
1856 self._start_listeners = []
1857 self._start_listeners.append(f)
1859 def removeStartListener(self, f):
1860 if self._start_listeners is None:
1862 self._start_listeners.remove(f)
1864 def _start_hook(self):
1865 if self._start_listeners is not None:
1866 start_listeners = self._start_listeners
1867 self._start_listeners = None
1869 for f in start_listeners:
1872 def addExitListener(self, f):
1874 The function will be called with one argument, a reference to self.
1876 if self._exit_listeners is None:
1877 self._exit_listeners = []
1878 self._exit_listeners.append(f)
1880 def removeExitListener(self, f):
1881 if self._exit_listeners is None:
1882 if self._exit_listener_stack is not None:
1883 self._exit_listener_stack.remove(f)
1885 self._exit_listeners.remove(f)
1887 def _wait_hook(self):
1889 Call this method after the task completes, just before returning
1890 the returncode from wait() or poll(). This hook is
1891 used to trigger exit listeners when the returncode first
1894 if self.returncode is not None and \
1895 self._exit_listeners is not None:
1897 # This prevents recursion, in case one of the
1898 # exit handlers triggers this method again by
1899 # calling wait(). Use a stack that gives
1900 # removeExitListener() an opportunity to consume
1901 # listeners from the stack, before they can get
1902 # called below. This is necessary because a call
1903 # to one exit listener may result in a call to
1904 # removeExitListener() for another listener on
1905 # the stack. That listener needs to be removed
1906 # from the stack since it would be inconsistent
1907 # to call it after it has been been passed into
1908 # removeExitListener().
1909 self._exit_listener_stack = self._exit_listeners
1910 self._exit_listeners = None
1912 self._exit_listener_stack.reverse()
1913 while self._exit_listener_stack:
1914 self._exit_listener_stack.pop()(self)
1916 class AbstractPollTask(AsynchronousTask):
1918 __slots__ = ("scheduler",) + \
1922 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1923 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1926 def _unregister(self):
1927 raise NotImplementedError(self)
1929 def _unregister_if_appropriate(self, event):
1930 if self._registered:
1931 if event & self._exceptional_events:
1934 elif event & PollConstants.POLLHUP:
1938 class PipeReader(AbstractPollTask):
1941 Reads output from one or more files and saves it in memory,
1942 for retrieval via the getvalue() method. This is driven by
1943 the scheduler's poll() loop, so it runs entirely within the
1947 __slots__ = ("input_files",) + \
1948 ("_read_data", "_reg_ids")
1951 self._reg_ids = set()
1952 self._read_data = []
1953 for k, f in self.input_files.iteritems():
1954 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1955 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1956 self._reg_ids.add(self.scheduler.register(f.fileno(),
1957 self._registered_events, self._output_handler))
1958 self._registered = True
1961 return self._registered
1964 if self.returncode is None:
1966 self.cancelled = True
1970 if self.returncode is not None:
1971 return self.returncode
1973 if self._registered:
1974 self.scheduler.schedule(self._reg_ids)
1977 self.returncode = os.EX_OK
1978 return self.returncode
1981 """Retrieve the entire contents"""
1982 return "".join(self._read_data)
1985 """Free the memory buffer."""
1986 self._read_data = None
1988 def _output_handler(self, fd, event):
1990 if event & PollConstants.POLLIN:
1992 for f in self.input_files.itervalues():
1993 if fd == f.fileno():
1996 buf = array.array('B')
1998 buf.fromfile(f, self._bufsize)
2003 self._read_data.append(buf.tostring())
2008 self._unregister_if_appropriate(event)
2009 return self._registered
2011 def _unregister(self):
2013 Unregister from the scheduler and close open files.
2016 self._registered = False
2018 if self._reg_ids is not None:
2019 for reg_id in self._reg_ids:
2020 self.scheduler.unregister(reg_id)
2021 self._reg_ids = None
2023 if self.input_files is not None:
2024 for f in self.input_files.itervalues():
2026 self.input_files = None
2028 class CompositeTask(AsynchronousTask):
2030 __slots__ = ("scheduler",) + ("_current_task",)
2033 return self._current_task is not None
2036 self.cancelled = True
2037 if self._current_task is not None:
2038 self._current_task.cancel()
2042 This does a loop calling self._current_task.poll()
2043 repeatedly as long as the value of self._current_task
2044 keeps changing. It calls poll() a maximum of one time
2045 for a given self._current_task instance. This is useful
2046 since calling poll() on a task can trigger advance to
2047 the next task could eventually lead to the returncode
2048 being set in cases when polling only a single task would
2049 not have the same effect.
2054 task = self._current_task
2055 if task is None or task is prev:
2056 # don't poll the same task more than once
2061 return self.returncode
2067 task = self._current_task
2069 # don't wait for the same task more than once
2072 # Before the task.wait() method returned, an exit
2073 # listener should have set self._current_task to either
2074 # a different task or None. Something is wrong.
2075 raise AssertionError("self._current_task has not " + \
2076 "changed since calling wait", self, task)
2080 return self.returncode
2082 def _assert_current(self, task):
2084 Raises an AssertionError if the given task is not the
2085 same one as self._current_task. This can be useful
2088 if task is not self._current_task:
2089 raise AssertionError("Unrecognized task: %s" % (task,))
2091 def _default_exit(self, task):
2093 Calls _assert_current() on the given task and then sets the
2094 composite returncode attribute if task.returncode != os.EX_OK.
2095 If the task failed then self._current_task will be set to None.
2096 Subclasses can use this as a generic task exit callback.
2099 @returns: The task.returncode attribute.
2101 self._assert_current(task)
2102 if task.returncode != os.EX_OK:
2103 self.returncode = task.returncode
2104 self._current_task = None
2105 return task.returncode
2107 def _final_exit(self, task):
2109 Assumes that task is the final task of this composite task.
2110 Calls _default_exit() and sets self.returncode to the task's
2111 returncode and sets self._current_task to None.
2113 self._default_exit(task)
2114 self._current_task = None
2115 self.returncode = task.returncode
2116 return self.returncode
2118 def _default_final_exit(self, task):
2120 This calls _final_exit() and then wait().
2122 Subclasses can use this as a generic final task exit callback.
2125 self._final_exit(task)
2128 def _start_task(self, task, exit_handler):
2130 Register exit handler for the given task, set it
2131 as self._current_task, and call task.start().
2133 Subclasses can use this as a generic way to start
2137 task.addExitListener(exit_handler)
2138 self._current_task = task
2141 class TaskSequence(CompositeTask):
2143 A collection of tasks that executes sequentially. Each task
2144 must have a addExitListener() method that can be used as
2145 a means to trigger movement from one task to the next.
2148 __slots__ = ("_task_queue",)
2150 def __init__(self, **kwargs):
2151 AsynchronousTask.__init__(self, **kwargs)
2152 self._task_queue = deque()
2154 def add(self, task):
2155 self._task_queue.append(task)
2158 self._start_next_task()
2161 self._task_queue.clear()
2162 CompositeTask.cancel(self)
2164 def _start_next_task(self):
2165 self._start_task(self._task_queue.popleft(),
2166 self._task_exit_handler)
2168 def _task_exit_handler(self, task):
2169 if self._default_exit(task) != os.EX_OK:
2171 elif self._task_queue:
2172 self._start_next_task()
2174 self._final_exit(task)
2177 class SubProcess(AbstractPollTask):
2179 __slots__ = ("pid",) + \
2180 ("_files", "_reg_id")
2182 # A file descriptor is required for the scheduler to monitor changes from
2183 # inside a poll() loop. When logging is not enabled, create a pipe just to
2184 # serve this purpose alone.
2188 if self.returncode is not None:
2189 return self.returncode
2190 if self.pid is None:
2191 return self.returncode
2192 if self._registered:
2193 return self.returncode
2196 retval = os.waitpid(self.pid, os.WNOHANG)
2198 if e.errno != errno.ECHILD:
2201 retval = (self.pid, 1)
2203 if retval == (0, 0):
2205 self._set_returncode(retval)
2206 return self.returncode
2211 os.kill(self.pid, signal.SIGTERM)
2213 if e.errno != errno.ESRCH:
2217 self.cancelled = True
2218 if self.pid is not None:
2220 return self.returncode
2223 return self.pid is not None and \
2224 self.returncode is None
2228 if self.returncode is not None:
2229 return self.returncode
2231 if self._registered:
2232 self.scheduler.schedule(self._reg_id)
2234 if self.returncode is not None:
2235 return self.returncode
2238 wait_retval = os.waitpid(self.pid, 0)
2240 if e.errno != errno.ECHILD:
2243 self._set_returncode((self.pid, 1))
2245 self._set_returncode(wait_retval)
2247 return self.returncode
2249 def _unregister(self):
2251 Unregister from the scheduler and close open files.
2254 self._registered = False
2256 if self._reg_id is not None:
2257 self.scheduler.unregister(self._reg_id)
2260 if self._files is not None:
2261 for f in self._files.itervalues():
2265 def _set_returncode(self, wait_retval):
2267 retval = wait_retval[1]
2269 if retval != os.EX_OK:
2271 retval = (retval & 0xff) << 8
2273 retval = retval >> 8
2275 self.returncode = retval
2277 class SpawnProcess(SubProcess):
2280 Constructor keyword args are passed into portage.process.spawn().
2281 The required "args" keyword argument will be passed as the first
2285 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2286 "uid", "gid", "groups", "umask", "logfile",
2287 "path_lookup", "pre_exec")
2289 __slots__ = ("args",) + \
2292 _file_names = ("log", "process", "stdout")
2293 _files_dict = slot_dict_class(_file_names, prefix="")
2300 if self.fd_pipes is None:
2302 fd_pipes = self.fd_pipes
2303 fd_pipes.setdefault(0, sys.stdin.fileno())
2304 fd_pipes.setdefault(1, sys.stdout.fileno())
2305 fd_pipes.setdefault(2, sys.stderr.fileno())
2307 # flush any pending output
2308 for fd in fd_pipes.itervalues():
2309 if fd == sys.stdout.fileno():
2311 if fd == sys.stderr.fileno():
2314 logfile = self.logfile
2315 self._files = self._files_dict()
2318 master_fd, slave_fd = self._pipe(fd_pipes)
2319 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2320 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2323 fd_pipes_orig = fd_pipes.copy()
2325 # TODO: Use job control functions like tcsetpgrp() to control
2326 # access to stdin. Until then, use /dev/null so that any
2327 # attempts to read from stdin will immediately return EOF
2328 # instead of blocking indefinitely.
2329 null_input = open('/dev/null', 'rb')
2330 fd_pipes[0] = null_input.fileno()
2332 fd_pipes[0] = fd_pipes_orig[0]
2334 files.process = os.fdopen(master_fd, 'rb')
2335 if logfile is not None:
2337 fd_pipes[1] = slave_fd
2338 fd_pipes[2] = slave_fd
2340 files.log = open(logfile, mode='ab')
2341 portage.util.apply_secpass_permissions(logfile,
2342 uid=portage.portage_uid, gid=portage.portage_gid,
2345 if not self.background:
2346 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2348 output_handler = self._output_handler
2352 # Create a dummy pipe so the scheduler can monitor
2353 # the process from inside a poll() loop.
2354 fd_pipes[self._dummy_pipe_fd] = slave_fd
2356 fd_pipes[1] = slave_fd
2357 fd_pipes[2] = slave_fd
2358 output_handler = self._dummy_handler
2361 for k in self._spawn_kwarg_names:
2362 v = getattr(self, k)
2366 kwargs["fd_pipes"] = fd_pipes
2367 kwargs["returnpid"] = True
2368 kwargs.pop("logfile", None)
2370 self._reg_id = self.scheduler.register(files.process.fileno(),
2371 self._registered_events, output_handler)
2372 self._registered = True
2374 retval = self._spawn(self.args, **kwargs)
2377 if null_input is not None:
2380 if isinstance(retval, int):
2383 self.returncode = retval
2387 self.pid = retval[0]
2388 portage.process.spawned_pids.remove(self.pid)
2390 def _pipe(self, fd_pipes):
2392 @type fd_pipes: dict
2393 @param fd_pipes: pipes from which to copy terminal size if desired.
2397 def _spawn(self, args, **kwargs):
2398 return portage.process.spawn(args, **kwargs)
2400 def _output_handler(self, fd, event):
2402 if event & PollConstants.POLLIN:
2405 buf = array.array('B')
2407 buf.fromfile(files.process, self._bufsize)
2412 if not self.background:
2413 buf.tofile(files.stdout)
2414 files.stdout.flush()
2415 buf.tofile(files.log)
2421 self._unregister_if_appropriate(event)
2422 return self._registered
2424 def _dummy_handler(self, fd, event):
2426 This method is mainly interested in detecting EOF, since
2427 the only purpose of the pipe is to allow the scheduler to
2428 monitor the process from inside a poll() loop.
2431 if event & PollConstants.POLLIN:
2433 buf = array.array('B')
2435 buf.fromfile(self._files.process, self._bufsize)
2445 self._unregister_if_appropriate(event)
2446 return self._registered
2448 class MiscFunctionsProcess(SpawnProcess):
2450 Spawns misc-functions.sh with an existing ebuild environment.
2453 __slots__ = ("commands", "phase", "pkg", "settings")
2456 settings = self.settings
2457 settings.pop("EBUILD_PHASE", None)
2458 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2459 misc_sh_binary = os.path.join(portage_bin_path,
2460 os.path.basename(portage.const.MISC_SH_BINARY))
2462 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2463 self.logfile = settings.get("PORTAGE_LOG_FILE")
2465 portage._doebuild_exit_status_unlink(
2466 settings.get("EBUILD_EXIT_STATUS_FILE"))
2468 SpawnProcess._start(self)
2470 def _spawn(self, args, **kwargs):
2471 settings = self.settings
2472 debug = settings.get("PORTAGE_DEBUG") == "1"
2473 return portage.spawn(" ".join(args), settings,
2474 debug=debug, **kwargs)
2476 def _set_returncode(self, wait_retval):
2477 SpawnProcess._set_returncode(self, wait_retval)
2478 self.returncode = portage._doebuild_exit_status_check_and_log(
2479 self.settings, self.phase, self.returncode)
2481 class EbuildFetcher(SpawnProcess):
2483 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2488 root_config = self.pkg.root_config
2489 portdb = root_config.trees["porttree"].dbapi
2490 ebuild_path = portdb.findname(self.pkg.cpv)
2491 settings = self.config_pool.allocate()
2492 settings.setcpv(self.pkg)
2494 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2495 # should not be touched since otherwise it could interfere with
2496 # another instance of the same cpv concurrently being built for a
2497 # different $ROOT (currently, builds only cooperate with prefetchers
2498 # that are spawned for the same $ROOT).
2499 if not self.prefetch:
2500 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2501 self._build_dir.lock()
2502 self._build_dir.clean()
2503 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2504 if self.logfile is None:
2505 self.logfile = settings.get("PORTAGE_LOG_FILE")
2511 # If any incremental variables have been overridden
2512 # via the environment, those values need to be passed
2513 # along here so that they are correctly considered by
2514 # the config instance in the subproccess.
2515 fetch_env = os.environ.copy()
2517 nocolor = settings.get("NOCOLOR")
2518 if nocolor is not None:
2519 fetch_env["NOCOLOR"] = nocolor
2521 fetch_env["PORTAGE_NICENESS"] = "0"
2523 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2525 ebuild_binary = os.path.join(
2526 settings["PORTAGE_BIN_PATH"], "ebuild")
2528 fetch_args = [ebuild_binary, ebuild_path, phase]
2529 debug = settings.get("PORTAGE_DEBUG") == "1"
2531 fetch_args.append("--debug")
2533 self.args = fetch_args
2534 self.env = fetch_env
2535 SpawnProcess._start(self)
2537 def _pipe(self, fd_pipes):
2538 """When appropriate, use a pty so that fetcher progress bars,
2539 like wget has, will work properly."""
2540 if self.background or not sys.stdout.isatty():
2541 # When the output only goes to a log file,
2542 # there's no point in creating a pty.
2544 stdout_pipe = fd_pipes.get(1)
2545 got_pty, master_fd, slave_fd = \
2546 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2547 return (master_fd, slave_fd)
2549 def _set_returncode(self, wait_retval):
2550 SpawnProcess._set_returncode(self, wait_retval)
2551 # Collect elog messages that might have been
2552 # created by the pkg_nofetch phase.
2553 if self._build_dir is not None:
2554 # Skip elog messages for prefetch, in order to avoid duplicates.
2555 if not self.prefetch and self.returncode != os.EX_OK:
2557 if self.logfile is not None:
2559 elog_out = open(self.logfile, 'a')
2560 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2561 if self.logfile is not None:
2562 msg += ", Log file:"
2563 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2564 if self.logfile is not None:
2565 eerror(" '%s'" % (self.logfile,),
2566 phase="unpack", key=self.pkg.cpv, out=elog_out)
2567 if elog_out is not None:
2569 if not self.prefetch:
2570 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2571 features = self._build_dir.settings.features
2572 if self.returncode == os.EX_OK:
2573 self._build_dir.clean()
2574 self._build_dir.unlock()
2575 self.config_pool.deallocate(self._build_dir.settings)
2576 self._build_dir = None
2578 class EbuildBuildDir(SlotObject):
2580 __slots__ = ("dir_path", "pkg", "settings",
2581 "locked", "_catdir", "_lock_obj")
2583 def __init__(self, **kwargs):
2584 SlotObject.__init__(self, **kwargs)
2589 This raises an AlreadyLocked exception if lock() is called
2590 while a lock is already held. In order to avoid this, call
2591 unlock() or check whether the "locked" attribute is True
2592 or False before calling lock().
2594 if self._lock_obj is not None:
2595 raise self.AlreadyLocked((self._lock_obj,))
2597 dir_path = self.dir_path
2598 if dir_path is None:
2599 root_config = self.pkg.root_config
2600 portdb = root_config.trees["porttree"].dbapi
2601 ebuild_path = portdb.findname(self.pkg.cpv)
2602 settings = self.settings
2603 settings.setcpv(self.pkg)
2604 debug = settings.get("PORTAGE_DEBUG") == "1"
2605 use_cache = 1 # always true
2606 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2607 self.settings, debug, use_cache, portdb)
2608 dir_path = self.settings["PORTAGE_BUILDDIR"]
2610 catdir = os.path.dirname(dir_path)
2611 self._catdir = catdir
2613 portage.util.ensure_dirs(os.path.dirname(catdir),
2614 gid=portage.portage_gid,
2618 catdir_lock = portage.locks.lockdir(catdir)
2619 portage.util.ensure_dirs(catdir,
2620 gid=portage.portage_gid,
2622 self._lock_obj = portage.locks.lockdir(dir_path)
2624 self.locked = self._lock_obj is not None
2625 if catdir_lock is not None:
2626 portage.locks.unlockdir(catdir_lock)
2629 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2630 by keepwork or keeptemp in FEATURES."""
2631 settings = self.settings
2632 features = settings.features
2633 if not ("keepwork" in features or "keeptemp" in features):
2635 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2636 except EnvironmentError, e:
2637 if e.errno != errno.ENOENT:
2642 if self._lock_obj is None:
2645 portage.locks.unlockdir(self._lock_obj)
2646 self._lock_obj = None
2649 catdir = self._catdir
2652 catdir_lock = portage.locks.lockdir(catdir)
2658 if e.errno not in (errno.ENOENT,
2659 errno.ENOTEMPTY, errno.EEXIST):
2662 portage.locks.unlockdir(catdir_lock)
2664 class AlreadyLocked(portage.exception.PortageException):
2667 class EbuildBuild(CompositeTask):
2669 __slots__ = ("args_set", "config_pool", "find_blockers",
2670 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2671 "prefetcher", "settings", "world_atom") + \
2672 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2676 logger = self.logger
2679 settings = self.settings
2680 world_atom = self.world_atom
2681 root_config = pkg.root_config
2684 portdb = root_config.trees[tree].dbapi
2685 settings.setcpv(pkg)
2686 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2687 ebuild_path = portdb.findname(self.pkg.cpv)
2688 self._ebuild_path = ebuild_path
2690 prefetcher = self.prefetcher
2691 if prefetcher is None:
2693 elif not prefetcher.isAlive():
2695 elif prefetcher.poll() is None:
2697 waiting_msg = "Fetching files " + \
2698 "in the background. " + \
2699 "To view fetch progress, run `tail -f " + \
2700 "/var/log/emerge-fetch.log` in another " + \
2702 msg_prefix = colorize("GOOD", " * ")
2703 from textwrap import wrap
2704 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2705 for line in wrap(waiting_msg, 65))
2706 if not self.background:
2707 writemsg(waiting_msg, noiselevel=-1)
2709 self._current_task = prefetcher
2710 prefetcher.addExitListener(self._prefetch_exit)
2713 self._prefetch_exit(prefetcher)
2715 def _prefetch_exit(self, prefetcher):
2719 settings = self.settings
2722 fetcher = EbuildFetchonly(
2723 fetch_all=opts.fetch_all_uri,
2724 pkg=pkg, pretend=opts.pretend,
2726 retval = fetcher.execute()
2727 self.returncode = retval
2731 fetcher = EbuildFetcher(config_pool=self.config_pool,
2732 fetchall=opts.fetch_all_uri,
2733 fetchonly=opts.fetchonly,
2734 background=self.background,
2735 pkg=pkg, scheduler=self.scheduler)
2737 self._start_task(fetcher, self._fetch_exit)
2739 def _fetch_exit(self, fetcher):
2743 fetch_failed = False
2745 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2747 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2749 if fetch_failed and fetcher.logfile is not None and \
2750 os.path.exists(fetcher.logfile):
2751 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2753 if not fetch_failed and fetcher.logfile is not None:
2754 # Fetch was successful, so remove the fetch log.
2756 os.unlink(fetcher.logfile)
2760 if fetch_failed or opts.fetchonly:
2764 logger = self.logger
2766 pkg_count = self.pkg_count
2767 scheduler = self.scheduler
2768 settings = self.settings
2769 features = settings.features
2770 ebuild_path = self._ebuild_path
2771 system_set = pkg.root_config.sets["system"]
2773 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2774 self._build_dir.lock()
2776 # Cleaning is triggered before the setup
2777 # phase, in portage.doebuild().
2778 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2779 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2780 short_msg = "emerge: (%s of %s) %s Clean" % \
2781 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2782 logger.log(msg, short_msg=short_msg)
2784 #buildsyspkg: Check if we need to _force_ binary package creation
2785 self._issyspkg = "buildsyspkg" in features and \
2786 system_set.findAtomForPackage(pkg) and \
2789 if opts.buildpkg or self._issyspkg:
2791 self._buildpkg = True
2793 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2794 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2795 short_msg = "emerge: (%s of %s) %s Compile" % \
2796 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2797 logger.log(msg, short_msg=short_msg)
2800 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2801 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2802 short_msg = "emerge: (%s of %s) %s Compile" % \
2803 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2804 logger.log(msg, short_msg=short_msg)
2806 build = EbuildExecuter(background=self.background, pkg=pkg,
2807 scheduler=scheduler, settings=settings)
2808 self._start_task(build, self._build_exit)
2810 def _unlock_builddir(self):
2811 portage.elog.elog_process(self.pkg.cpv, self.settings)
2812 self._build_dir.unlock()
2814 def _build_exit(self, build):
2815 if self._default_exit(build) != os.EX_OK:
2816 self._unlock_builddir()
2821 buildpkg = self._buildpkg
2824 self._final_exit(build)
2829 msg = ">>> This is a system package, " + \
2830 "let's pack a rescue tarball.\n"
2832 log_path = self.settings.get("PORTAGE_LOG_FILE")
2833 if log_path is not None:
2834 log_file = open(log_path, 'a')
2840 if not self.background:
2841 portage.writemsg_stdout(msg, noiselevel=-1)
2843 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2844 scheduler=self.scheduler, settings=self.settings)
2846 self._start_task(packager, self._buildpkg_exit)
2848 def _buildpkg_exit(self, packager):
2850 Released build dir lock when there is a failure or
2851 when in buildpkgonly mode. Otherwise, the lock will
2852 be released when merge() is called.
2855 if self._default_exit(packager) != os.EX_OK:
2856 self._unlock_builddir()
2860 if self.opts.buildpkgonly:
2861 # Need to call "clean" phase for buildpkgonly mode
2862 portage.elog.elog_process(self.pkg.cpv, self.settings)
2864 clean_phase = EbuildPhase(background=self.background,
2865 pkg=self.pkg, phase=phase,
2866 scheduler=self.scheduler, settings=self.settings,
2868 self._start_task(clean_phase, self._clean_exit)
2871 # Continue holding the builddir lock until
2872 # after the package has been installed.
2873 self._current_task = None
2874 self.returncode = packager.returncode
2877 def _clean_exit(self, clean_phase):
2878 if self._final_exit(clean_phase) != os.EX_OK or \
2879 self.opts.buildpkgonly:
2880 self._unlock_builddir()
2885 Install the package and then clean up and release locks.
2886 Only call this after the build has completed successfully
2887 and neither fetchonly nor buildpkgonly mode are enabled.
2890 find_blockers = self.find_blockers
2891 ldpath_mtimes = self.ldpath_mtimes
2892 logger = self.logger
2894 pkg_count = self.pkg_count
2895 settings = self.settings
2896 world_atom = self.world_atom
2897 ebuild_path = self._ebuild_path
2900 merge = EbuildMerge(find_blockers=self.find_blockers,
2901 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2902 pkg_count=pkg_count, pkg_path=ebuild_path,
2903 scheduler=self.scheduler,
2904 settings=settings, tree=tree, world_atom=world_atom)
2906 msg = " === (%s of %s) Merging (%s::%s)" % \
2907 (pkg_count.curval, pkg_count.maxval,
2908 pkg.cpv, ebuild_path)
2909 short_msg = "emerge: (%s of %s) %s Merge" % \
2910 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2911 logger.log(msg, short_msg=short_msg)
2914 rval = merge.execute()
2916 self._unlock_builddir()
2920 class EbuildExecuter(CompositeTask):
2922 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2924 _phases = ("prepare", "configure", "compile", "test", "install")
2926 _live_eclasses = frozenset([
2936 self._tree = "porttree"
2939 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2940 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2941 self._start_task(clean_phase, self._clean_phase_exit)
2943 def _clean_phase_exit(self, clean_phase):
2945 if self._default_exit(clean_phase) != os.EX_OK:
2950 scheduler = self.scheduler
2951 settings = self.settings
2954 # This initializes PORTAGE_LOG_FILE.
2955 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2957 setup_phase = EbuildPhase(background=self.background,
2958 pkg=pkg, phase="setup", scheduler=scheduler,
2959 settings=settings, tree=self._tree)
2961 setup_phase.addExitListener(self._setup_exit)
2962 self._current_task = setup_phase
2963 self.scheduler.scheduleSetup(setup_phase)
2965 def _setup_exit(self, setup_phase):
2967 if self._default_exit(setup_phase) != os.EX_OK:
2971 unpack_phase = EbuildPhase(background=self.background,
2972 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2973 settings=self.settings, tree=self._tree)
2975 if self._live_eclasses.intersection(self.pkg.inherited):
2976 # Serialize $DISTDIR access for live ebuilds since
2977 # otherwise they can interfere with eachother.
2979 unpack_phase.addExitListener(self._unpack_exit)
2980 self._current_task = unpack_phase
2981 self.scheduler.scheduleUnpack(unpack_phase)
2984 self._start_task(unpack_phase, self._unpack_exit)
2986 def _unpack_exit(self, unpack_phase):
2988 if self._default_exit(unpack_phase) != os.EX_OK:
2992 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2995 phases = self._phases
2996 eapi = pkg.metadata["EAPI"]
2997 if eapi in ("0", "1"):
2998 # skip src_prepare and src_configure
3001 for phase in phases:
3002 ebuild_phases.add(EbuildPhase(background=self.background,
3003 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3004 settings=self.settings, tree=self._tree))
3006 self._start_task(ebuild_phases, self._default_final_exit)
3008 class EbuildMetadataPhase(SubProcess):
3011 Asynchronous interface for the ebuild "depend" phase which is
3012 used to extract metadata from the ebuild.
3015 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3016 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3019 _file_names = ("ebuild",)
3020 _files_dict = slot_dict_class(_file_names, prefix="")
3024 settings = self.settings
3026 ebuild_path = self.ebuild_path
3027 debug = settings.get("PORTAGE_DEBUG") == "1"
3031 if self.fd_pipes is not None:
3032 fd_pipes = self.fd_pipes.copy()
3036 fd_pipes.setdefault(0, sys.stdin.fileno())
3037 fd_pipes.setdefault(1, sys.stdout.fileno())
3038 fd_pipes.setdefault(2, sys.stderr.fileno())
3040 # flush any pending output
3041 for fd in fd_pipes.itervalues():
3042 if fd == sys.stdout.fileno():
3044 if fd == sys.stderr.fileno():
3047 fd_pipes_orig = fd_pipes.copy()
3048 self._files = self._files_dict()
3051 master_fd, slave_fd = os.pipe()
3052 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3053 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3055 fd_pipes[self._metadata_fd] = slave_fd
3057 self._raw_metadata = []
3058 files.ebuild = os.fdopen(master_fd, 'r')
3059 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3060 self._registered_events, self._output_handler)
3061 self._registered = True
3063 retval = portage.doebuild(ebuild_path, "depend",
3064 settings["ROOT"], settings, debug,
3065 mydbapi=self.portdb, tree="porttree",
3066 fd_pipes=fd_pipes, returnpid=True)
3070 if isinstance(retval, int):
3071 # doebuild failed before spawning
3073 self.returncode = retval
3077 self.pid = retval[0]
3078 portage.process.spawned_pids.remove(self.pid)
3080 def _output_handler(self, fd, event):
3082 if event & PollConstants.POLLIN:
3083 self._raw_metadata.append(self._files.ebuild.read())
3084 if not self._raw_metadata[-1]:
3088 self._unregister_if_appropriate(event)
3089 return self._registered
3091 def _set_returncode(self, wait_retval):
3092 SubProcess._set_returncode(self, wait_retval)
3093 if self.returncode == os.EX_OK:
3094 metadata_lines = "".join(self._raw_metadata).splitlines()
3095 if len(portage.auxdbkeys) != len(metadata_lines):
3096 # Don't trust bash's returncode if the
3097 # number of lines is incorrect.
3100 metadata = izip(portage.auxdbkeys, metadata_lines)
3101 self.metadata_callback(self.cpv, self.ebuild_path,
3102 self.repo_path, metadata, self.ebuild_mtime)
3104 class EbuildProcess(SpawnProcess):
3106 __slots__ = ("phase", "pkg", "settings", "tree")
3109 # Don't open the log file during the clean phase since the
3110 # open file can result in an nfs lock on $T/build.log which
3111 # prevents the clean phase from removing $T.
3112 if self.phase not in ("clean", "cleanrm"):
3113 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3114 SpawnProcess._start(self)
3116 def _pipe(self, fd_pipes):
3117 stdout_pipe = fd_pipes.get(1)
3118 got_pty, master_fd, slave_fd = \
3119 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3120 return (master_fd, slave_fd)
3122 def _spawn(self, args, **kwargs):
3124 root_config = self.pkg.root_config
3126 mydbapi = root_config.trees[tree].dbapi
3127 settings = self.settings
3128 ebuild_path = settings["EBUILD"]
3129 debug = settings.get("PORTAGE_DEBUG") == "1"
3131 rval = portage.doebuild(ebuild_path, self.phase,
3132 root_config.root, settings, debug,
3133 mydbapi=mydbapi, tree=tree, **kwargs)
3137 def _set_returncode(self, wait_retval):
3138 SpawnProcess._set_returncode(self, wait_retval)
3140 if self.phase not in ("clean", "cleanrm"):
3141 self.returncode = portage._doebuild_exit_status_check_and_log(
3142 self.settings, self.phase, self.returncode)
3144 if self.phase == "test" and self.returncode != os.EX_OK and \
3145 "test-fail-continue" in self.settings.features:
3146 self.returncode = os.EX_OK
3148 portage._post_phase_userpriv_perms(self.settings)
3150 class EbuildPhase(CompositeTask):
3152 __slots__ = ("background", "pkg", "phase",
3153 "scheduler", "settings", "tree")
3155 _post_phase_cmds = portage._post_phase_cmds
3159 ebuild_process = EbuildProcess(background=self.background,
3160 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3161 settings=self.settings, tree=self.tree)
3163 self._start_task(ebuild_process, self._ebuild_exit)
3165 def _ebuild_exit(self, ebuild_process):
3167 if self.phase == "install":
3169 log_path = self.settings.get("PORTAGE_LOG_FILE")
3171 if self.background and log_path is not None:
3172 log_file = open(log_path, 'a')
3175 portage._check_build_log(self.settings, out=out)
3177 if log_file is not None:
3180 if self._default_exit(ebuild_process) != os.EX_OK:
3184 settings = self.settings
3186 if self.phase == "install":
3187 portage._post_src_install_uid_fix(settings)
3189 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3190 if post_phase_cmds is not None:
3191 post_phase = MiscFunctionsProcess(background=self.background,
3192 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3193 scheduler=self.scheduler, settings=settings)
3194 self._start_task(post_phase, self._post_phase_exit)
3197 self.returncode = ebuild_process.returncode
3198 self._current_task = None
3201 def _post_phase_exit(self, post_phase):
3202 if self._final_exit(post_phase) != os.EX_OK:
3203 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3205 self._current_task = None
3209 class EbuildBinpkg(EbuildProcess):
3211 This assumes that src_install() has successfully completed.
3213 __slots__ = ("_binpkg_tmpfile",)
3216 self.phase = "package"
3217 self.tree = "porttree"
3219 root_config = pkg.root_config
3220 portdb = root_config.trees["porttree"].dbapi
3221 bintree = root_config.trees["bintree"]
3222 ebuild_path = portdb.findname(self.pkg.cpv)
3223 settings = self.settings
3224 debug = settings.get("PORTAGE_DEBUG") == "1"
3226 bintree.prevent_collision(pkg.cpv)
3227 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3228 pkg.cpv + ".tbz2." + str(os.getpid()))
3229 self._binpkg_tmpfile = binpkg_tmpfile
3230 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3231 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3234 EbuildProcess._start(self)
3236 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3238 def _set_returncode(self, wait_retval):
3239 EbuildProcess._set_returncode(self, wait_retval)
3242 bintree = pkg.root_config.trees["bintree"]
3243 binpkg_tmpfile = self._binpkg_tmpfile
3244 if self.returncode == os.EX_OK:
3245 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3247 class EbuildMerge(SlotObject):
3249 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3250 "pkg", "pkg_count", "pkg_path", "pretend",
3251 "scheduler", "settings", "tree", "world_atom")
3254 root_config = self.pkg.root_config
3255 settings = self.settings
3256 retval = portage.merge(settings["CATEGORY"],
3257 settings["PF"], settings["D"],
3258 os.path.join(settings["PORTAGE_BUILDDIR"],
3259 "build-info"), root_config.root, settings,
3260 myebuild=settings["EBUILD"],
3261 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3262 vartree=root_config.trees["vartree"],
3263 prev_mtimes=self.ldpath_mtimes,
3264 scheduler=self.scheduler,
3265 blockers=self.find_blockers)
3267 if retval == os.EX_OK:
3268 self.world_atom(self.pkg)
3273 def _log_success(self):
3275 pkg_count = self.pkg_count
3276 pkg_path = self.pkg_path
3277 logger = self.logger
3278 if "noclean" not in self.settings.features:
3279 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3280 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3281 logger.log((" === (%s of %s) " + \
3282 "Post-Build Cleaning (%s::%s)") % \
3283 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3284 short_msg=short_msg)
3285 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3286 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3288 class PackageUninstall(AsynchronousTask):
3290 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3294 unmerge(self.pkg.root_config, self.opts, "unmerge",
3295 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3296 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3297 writemsg_level=self._writemsg_level)
3298 except UninstallFailure, e:
3299 self.returncode = e.status
3301 self.returncode = os.EX_OK
3304 def _writemsg_level(self, msg, level=0, noiselevel=0):
3306 log_path = self.settings.get("PORTAGE_LOG_FILE")
3307 background = self.background
3309 if log_path is None:
3310 if not (background and level < logging.WARNING):
3311 portage.util.writemsg_level(msg,
3312 level=level, noiselevel=noiselevel)
3315 portage.util.writemsg_level(msg,
3316 level=level, noiselevel=noiselevel)
3318 f = open(log_path, 'a')
3324 class Binpkg(CompositeTask):
3326 __slots__ = ("find_blockers",
3327 "ldpath_mtimes", "logger", "opts",
3328 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3329 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3330 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3332 def _writemsg_level(self, msg, level=0, noiselevel=0):
3334 if not self.background:
3335 portage.util.writemsg_level(msg,
3336 level=level, noiselevel=noiselevel)
3338 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339 if log_path is not None:
3340 f = open(log_path, 'a')
3349 settings = self.settings
3350 settings.setcpv(pkg)
3351 self._tree = "bintree"
3352 self._bintree = self.pkg.root_config.trees[self._tree]
3353 self._verify = not self.opts.pretend
3355 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3356 "portage", pkg.category, pkg.pf)
3357 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3358 pkg=pkg, settings=settings)
3359 self._image_dir = os.path.join(dir_path, "image")
3360 self._infloc = os.path.join(dir_path, "build-info")
3361 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3362 settings["EBUILD"] = self._ebuild_path
3363 debug = settings.get("PORTAGE_DEBUG") == "1"
3364 portage.doebuild_environment(self._ebuild_path, "setup",
3365 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3366 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3368 # The prefetcher has already completed or it
3369 # could be running now. If it's running now,
3370 # wait for it to complete since it holds
3371 # a lock on the file being fetched. The
3372 # portage.locks functions are only designed
3373 # to work between separate processes. Since
3374 # the lock is held by the current process,
3375 # use the scheduler and fetcher methods to
3376 # synchronize with the fetcher.
3377 prefetcher = self.prefetcher
3378 if prefetcher is None:
3380 elif not prefetcher.isAlive():
3382 elif prefetcher.poll() is None:
3384 waiting_msg = ("Fetching '%s' " + \
3385 "in the background. " + \
3386 "To view fetch progress, run `tail -f " + \
3387 "/var/log/emerge-fetch.log` in another " + \
3388 "terminal.") % prefetcher.pkg_path
3389 msg_prefix = colorize("GOOD", " * ")
3390 from textwrap import wrap
3391 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3392 for line in wrap(waiting_msg, 65))
3393 if not self.background:
3394 writemsg(waiting_msg, noiselevel=-1)
3396 self._current_task = prefetcher
3397 prefetcher.addExitListener(self._prefetch_exit)
3400 self._prefetch_exit(prefetcher)
3402 def _prefetch_exit(self, prefetcher):
3405 pkg_count = self.pkg_count
3406 if not (self.opts.pretend or self.opts.fetchonly):
3407 self._build_dir.lock()
3409 shutil.rmtree(self._build_dir.dir_path)
3410 except EnvironmentError, e:
3411 if e.errno != errno.ENOENT:
3414 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3415 fetcher = BinpkgFetcher(background=self.background,
3416 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3417 pretend=self.opts.pretend, scheduler=self.scheduler)
3418 pkg_path = fetcher.pkg_path
3419 self._pkg_path = pkg_path
3421 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3423 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3424 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3425 short_msg = "emerge: (%s of %s) %s Fetch" % \
3426 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3427 self.logger.log(msg, short_msg=short_msg)
3428 self._start_task(fetcher, self._fetcher_exit)
3431 self._fetcher_exit(fetcher)
3433 def _fetcher_exit(self, fetcher):
3435 # The fetcher only has a returncode when
3436 # --getbinpkg is enabled.
3437 if fetcher.returncode is not None:
3438 self._fetched_pkg = True
3439 if self._default_exit(fetcher) != os.EX_OK:
3440 self._unlock_builddir()
3444 if self.opts.pretend:
3445 self._current_task = None
3446 self.returncode = os.EX_OK
3454 logfile = self.settings.get("PORTAGE_LOG_FILE")
3455 verifier = BinpkgVerifier(background=self.background,
3456 logfile=logfile, pkg=self.pkg)
3457 self._start_task(verifier, self._verifier_exit)
3460 self._verifier_exit(verifier)
3462 def _verifier_exit(self, verifier):
3463 if verifier is not None and \
3464 self._default_exit(verifier) != os.EX_OK:
3465 self._unlock_builddir()
3469 logger = self.logger
3471 pkg_count = self.pkg_count
3472 pkg_path = self._pkg_path
3474 if self._fetched_pkg:
3475 self._bintree.inject(pkg.cpv, filename=pkg_path)
3477 if self.opts.fetchonly:
3478 self._current_task = None
3479 self.returncode = os.EX_OK
3483 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3484 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3485 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3486 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3487 logger.log(msg, short_msg=short_msg)
3490 settings = self.settings
3491 ebuild_phase = EbuildPhase(background=self.background,
3492 pkg=pkg, phase=phase, scheduler=self.scheduler,
3493 settings=settings, tree=self._tree)
3495 self._start_task(ebuild_phase, self._clean_exit)
3497 def _clean_exit(self, clean_phase):
3498 if self._default_exit(clean_phase) != os.EX_OK:
3499 self._unlock_builddir()
3503 dir_path = self._build_dir.dir_path
3506 shutil.rmtree(dir_path)
3507 except (IOError, OSError), e:
3508 if e.errno != errno.ENOENT:
3512 infloc = self._infloc
3514 pkg_path = self._pkg_path
3517 for mydir in (dir_path, self._image_dir, infloc):
3518 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3519 gid=portage.data.portage_gid, mode=dir_mode)
3521 # This initializes PORTAGE_LOG_FILE.
3522 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3523 self._writemsg_level(">>> Extracting info\n")
3525 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3526 check_missing_metadata = ("CATEGORY", "PF")
3527 missing_metadata = set()
3528 for k in check_missing_metadata:
3529 v = pkg_xpak.getfile(k)
3531 missing_metadata.add(k)
3533 pkg_xpak.unpackinfo(infloc)
3534 for k in missing_metadata:
3542 f = open(os.path.join(infloc, k), 'wb')
3548 # Store the md5sum in the vdb.
3549 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3551 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3555 # This gives bashrc users an opportunity to do various things
3556 # such as remove binary packages after they're installed.
3557 settings = self.settings
3558 settings.setcpv(self.pkg)
3559 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3560 settings.backup_changes("PORTAGE_BINPKG_FILE")
3563 setup_phase = EbuildPhase(background=self.background,
3564 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3565 settings=settings, tree=self._tree)
3567 setup_phase.addExitListener(self._setup_exit)
3568 self._current_task = setup_phase
3569 self.scheduler.scheduleSetup(setup_phase)
3571 def _setup_exit(self, setup_phase):
3572 if self._default_exit(setup_phase) != os.EX_OK:
3573 self._unlock_builddir()
3577 extractor = BinpkgExtractorAsync(background=self.background,
3578 image_dir=self._image_dir,
3579 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3580 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3581 self._start_task(extractor, self._extractor_exit)
3583 def _extractor_exit(self, extractor):
3584 if self._final_exit(extractor) != os.EX_OK:
3585 self._unlock_builddir()
3586 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3590 def _unlock_builddir(self):
3591 if self.opts.pretend or self.opts.fetchonly:
3593 portage.elog.elog_process(self.pkg.cpv, self.settings)
3594 self._build_dir.unlock()
3598 # This gives bashrc users an opportunity to do various things
3599 # such as remove binary packages after they're installed.
3600 settings = self.settings
3601 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3602 settings.backup_changes("PORTAGE_BINPKG_FILE")
3604 merge = EbuildMerge(find_blockers=self.find_blockers,
3605 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3606 pkg=self.pkg, pkg_count=self.pkg_count,
3607 pkg_path=self._pkg_path, scheduler=self.scheduler,
3608 settings=settings, tree=self._tree, world_atom=self.world_atom)
3611 retval = merge.execute()
3613 settings.pop("PORTAGE_BINPKG_FILE", None)
3614 self._unlock_builddir()
3617 class BinpkgFetcher(SpawnProcess):
3619 __slots__ = ("pkg", "pretend",
3620 "locked", "pkg_path", "_lock_obj")
3622 def __init__(self, **kwargs):
3623 SpawnProcess.__init__(self, **kwargs)
3625 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3633 pretend = self.pretend
3634 bintree = pkg.root_config.trees["bintree"]
3635 settings = bintree.settings
3636 use_locks = "distlocks" in settings.features
3637 pkg_path = self.pkg_path
3640 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3643 exists = os.path.exists(pkg_path)
3644 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3645 if not (pretend or resume):
3646 # Remove existing file or broken symlink.
3652 # urljoin doesn't work correctly with
3653 # unrecognized protocols like sftp
3654 if bintree._remote_has_index:
3655 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3657 rel_uri = pkg.cpv + ".tbz2"
3658 uri = bintree._remote_base_uri.rstrip("/") + \
3659 "/" + rel_uri.lstrip("/")
3661 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3662 "/" + pkg.pf + ".tbz2"
3665 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3666 self.returncode = os.EX_OK
3670 protocol = urlparse.urlparse(uri)[0]
3671 fcmd_prefix = "FETCHCOMMAND"
3673 fcmd_prefix = "RESUMECOMMAND"
3674 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3676 fcmd = settings.get(fcmd_prefix)
3679 "DISTDIR" : os.path.dirname(pkg_path),
3681 "FILE" : os.path.basename(pkg_path)
3684 fetch_env = dict(settings.iteritems())
3685 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3686 for x in shlex.split(fcmd)]
3688 if self.fd_pipes is None:
3690 fd_pipes = self.fd_pipes
3692 # Redirect all output to stdout since some fetchers like
3693 # wget pollute stderr (if portage detects a problem then it
3694 # can send it's own message to stderr).
3695 fd_pipes.setdefault(0, sys.stdin.fileno())
3696 fd_pipes.setdefault(1, sys.stdout.fileno())
3697 fd_pipes.setdefault(2, sys.stdout.fileno())
3699 self.args = fetch_args
3700 self.env = fetch_env
3701 SpawnProcess._start(self)
3703 def _set_returncode(self, wait_retval):
3704 SpawnProcess._set_returncode(self, wait_retval)
3705 if self.returncode == os.EX_OK:
3706 # If possible, update the mtime to match the remote package if
3707 # the fetcher didn't already do it automatically.
3708 bintree = self.pkg.root_config.trees["bintree"]
3709 if bintree._remote_has_index:
3710 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3711 if remote_mtime is not None:
3713 remote_mtime = long(remote_mtime)
3718 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3722 if remote_mtime != local_mtime:
3724 os.utime(self.pkg_path,
3725 (remote_mtime, remote_mtime))
3734 This raises an AlreadyLocked exception if lock() is called
3735 while a lock is already held. In order to avoid this, call
3736 unlock() or check whether the "locked" attribute is True
3737 or False before calling lock().
3739 if self._lock_obj is not None:
3740 raise self.AlreadyLocked((self._lock_obj,))
3742 self._lock_obj = portage.locks.lockfile(
3743 self.pkg_path, wantnewlockfile=1)
3746 class AlreadyLocked(portage.exception.PortageException):
3750 if self._lock_obj is None:
3752 portage.locks.unlockfile(self._lock_obj)
3753 self._lock_obj = None
3756 class BinpkgVerifier(AsynchronousTask):
3757 __slots__ = ("logfile", "pkg",)
3761 Note: Unlike a normal AsynchronousTask.start() method,
3762 this one does all work is synchronously. The returncode
3763 attribute will be set before it returns.
3767 root_config = pkg.root_config
3768 bintree = root_config.trees["bintree"]
3770 stdout_orig = sys.stdout
3771 stderr_orig = sys.stderr
3773 if self.background and self.logfile is not None:
3774 log_file = open(self.logfile, 'a')
3776 if log_file is not None:
3777 sys.stdout = log_file
3778 sys.stderr = log_file
3780 bintree.digestCheck(pkg)
3781 except portage.exception.FileNotFound:
3782 writemsg("!!! Fetching Binary failed " + \
3783 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3785 except portage.exception.DigestException, e:
3786 writemsg("\n!!! Digest verification failed:\n",
3788 writemsg("!!! %s\n" % e.value[0],
3790 writemsg("!!! Reason: %s\n" % e.value[1],
3792 writemsg("!!! Got: %s\n" % e.value[2],
3794 writemsg("!!! Expected: %s\n" % e.value[3],
3797 if rval != os.EX_OK:
3798 pkg_path = bintree.getname(pkg.cpv)
3799 head, tail = os.path.split(pkg_path)
3800 temp_filename = portage._checksum_failure_temp_file(head, tail)
3801 writemsg("File renamed to '%s'\n" % (temp_filename,),
3804 sys.stdout = stdout_orig
3805 sys.stderr = stderr_orig
3806 if log_file is not None:
3809 self.returncode = rval
3812 class BinpkgPrefetcher(CompositeTask):
3814 __slots__ = ("pkg",) + \
3815 ("pkg_path", "_bintree",)
3818 self._bintree = self.pkg.root_config.trees["bintree"]
3819 fetcher = BinpkgFetcher(background=self.background,
3820 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3821 scheduler=self.scheduler)
3822 self.pkg_path = fetcher.pkg_path
3823 self._start_task(fetcher, self._fetcher_exit)
3825 def _fetcher_exit(self, fetcher):
3827 if self._default_exit(fetcher) != os.EX_OK:
3831 verifier = BinpkgVerifier(background=self.background,
3832 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3833 self._start_task(verifier, self._verifier_exit)
3835 def _verifier_exit(self, verifier):
3836 if self._default_exit(verifier) != os.EX_OK:
3840 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3842 self._current_task = None
3843 self.returncode = os.EX_OK
3846 class BinpkgExtractorAsync(SpawnProcess):
3848 __slots__ = ("image_dir", "pkg", "pkg_path")
3850 _shell_binary = portage.const.BASH_BINARY
3853 self.args = [self._shell_binary, "-c",
3854 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3855 (portage._shell_quote(self.pkg_path),
3856 portage._shell_quote(self.image_dir))]
3858 self.env = self.pkg.root_config.settings.environ()
3859 SpawnProcess._start(self)
3861 class MergeListItem(CompositeTask):
3864 TODO: For parallel scheduling, everything here needs asynchronous
3865 execution support (start, poll, and wait methods).
3868 __slots__ = ("args_set",
3869 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3870 "find_blockers", "logger", "mtimedb", "pkg",
3871 "pkg_count", "pkg_to_replace", "prefetcher",
3872 "settings", "statusMessage", "world_atom") + \
3878 build_opts = self.build_opts
3881 # uninstall, executed by self.merge()
3882 self.returncode = os.EX_OK
3886 args_set = self.args_set
3887 find_blockers = self.find_blockers
3888 logger = self.logger
3889 mtimedb = self.mtimedb
3890 pkg_count = self.pkg_count
3891 scheduler = self.scheduler
3892 settings = self.settings
3893 world_atom = self.world_atom
3894 ldpath_mtimes = mtimedb["ldpath"]
3896 action_desc = "Emerging"
3898 if pkg.type_name == "binary":
3899 action_desc += " binary"
3901 if build_opts.fetchonly:
3902 action_desc = "Fetching"
3904 msg = "%s (%s of %s) %s" % \
3906 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3907 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3908 colorize("GOOD", pkg.cpv))
3910 portdb = pkg.root_config.trees["porttree"].dbapi
3911 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3912 if portdir_repo_name:
3913 pkg_repo_name = pkg.metadata.get("repository")
3914 if pkg_repo_name != portdir_repo_name:
3915 if not pkg_repo_name:
3916 pkg_repo_name = "unknown repo"
3917 msg += " from %s" % pkg_repo_name
3920 msg += " %s %s" % (preposition, pkg.root)
3922 if not build_opts.pretend:
3923 self.statusMessage(msg)
3924 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3925 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3927 if pkg.type_name == "ebuild":
3929 build = EbuildBuild(args_set=args_set,
3930 background=self.background,
3931 config_pool=self.config_pool,
3932 find_blockers=find_blockers,
3933 ldpath_mtimes=ldpath_mtimes, logger=logger,
3934 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3935 prefetcher=self.prefetcher, scheduler=scheduler,
3936 settings=settings, world_atom=world_atom)
3938 self._install_task = build
3939 self._start_task(build, self._default_final_exit)
3942 elif pkg.type_name == "binary":
3944 binpkg = Binpkg(background=self.background,
3945 find_blockers=find_blockers,
3946 ldpath_mtimes=ldpath_mtimes, logger=logger,
3947 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3948 prefetcher=self.prefetcher, settings=settings,
3949 scheduler=scheduler, world_atom=world_atom)
3951 self._install_task = binpkg
3952 self._start_task(binpkg, self._default_final_exit)
3956 self._install_task.poll()
3957 return self.returncode
3960 self._install_task.wait()
3961 return self.returncode
3966 build_opts = self.build_opts
3967 find_blockers = self.find_blockers
3968 logger = self.logger
3969 mtimedb = self.mtimedb
3970 pkg_count = self.pkg_count
3971 prefetcher = self.prefetcher
3972 scheduler = self.scheduler
3973 settings = self.settings
3974 world_atom = self.world_atom
3975 ldpath_mtimes = mtimedb["ldpath"]
3978 if not (build_opts.buildpkgonly or \
3979 build_opts.fetchonly or build_opts.pretend):
3981 uninstall = PackageUninstall(background=self.background,
3982 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3983 pkg=pkg, scheduler=scheduler, settings=settings)
3986 retval = uninstall.wait()
3987 if retval != os.EX_OK:
3991 if build_opts.fetchonly or \
3992 build_opts.buildpkgonly:
3993 return self.returncode
3995 retval = self._install_task.install()
3998 class PackageMerge(AsynchronousTask):
4000 TODO: Implement asynchronous merge so that the scheduler can
4001 run while a merge is executing.
4004 __slots__ = ("merge",)
4008 pkg = self.merge.pkg
4009 pkg_count = self.merge.pkg_count
4012 action_desc = "Uninstalling"
4013 preposition = "from"
4015 action_desc = "Installing"
4018 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4021 msg += " %s %s" % (preposition, pkg.root)
4023 if not self.merge.build_opts.fetchonly and \
4024 not self.merge.build_opts.pretend and \
4025 not self.merge.build_opts.buildpkgonly:
4026 self.merge.statusMessage(msg)
4028 self.returncode = self.merge.merge()
4031 class DependencyArg(object):
4032 def __init__(self, arg=None, root_config=None):
4034 self.root_config = root_config
4037 return str(self.arg)
4039 class AtomArg(DependencyArg):
4040 def __init__(self, atom=None, **kwargs):
4041 DependencyArg.__init__(self, **kwargs)
4043 if not isinstance(self.atom, portage.dep.Atom):
4044 self.atom = portage.dep.Atom(self.atom)
4045 self.set = (self.atom, )
4047 class PackageArg(DependencyArg):
4048 def __init__(self, package=None, **kwargs):
4049 DependencyArg.__init__(self, **kwargs)
4050 self.package = package
4051 self.atom = portage.dep.Atom("=" + package.cpv)
4052 self.set = (self.atom, )
4054 class SetArg(DependencyArg):
4055 def __init__(self, set=None, **kwargs):
4056 DependencyArg.__init__(self, **kwargs)
4058 self.name = self.arg[len(SETPREFIX):]
4060 class Dependency(SlotObject):
4061 __slots__ = ("atom", "blocker", "depth",
4062 "parent", "onlydeps", "priority", "root")
4063 def __init__(self, **kwargs):
4064 SlotObject.__init__(self, **kwargs)
4065 if self.priority is None:
4066 self.priority = DepPriority()
4067 if self.depth is None:
4070 class BlockerCache(portage.cache.mappings.MutableMapping):
4071 """This caches blockers of installed packages so that dep_check does not
4072 have to be done for every single installed package on every invocation of
4073 emerge. The cache is invalidated whenever it is detected that something
4074 has changed that might alter the results of dep_check() calls:
4075 1) the set of installed packages (including COUNTER) has changed
4076 2) the old-style virtuals have changed
4079 # Number of uncached packages to trigger cache update, since
4080 # it's wasteful to update it for every vdb change.
4081 _cache_threshold = 5
4083 class BlockerData(object):
4085 __slots__ = ("__weakref__", "atoms", "counter")
4087 def __init__(self, counter, atoms):
4088 self.counter = counter
4091 def __init__(self, myroot, vardb):
4093 self._virtuals = vardb.settings.getvirtuals()
4094 self._cache_filename = os.path.join(myroot,
4095 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4096 self._cache_version = "1"
4097 self._cache_data = None
4098 self._modified = set()
4103 f = open(self._cache_filename, mode='rb')
4104 mypickle = pickle.Unpickler(f)
4105 self._cache_data = mypickle.load()
4108 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4109 if isinstance(e, pickle.UnpicklingError):
4110 writemsg("!!! Error loading '%s': %s\n" % \
4111 (self._cache_filename, str(e)), noiselevel=-1)
4114 cache_valid = self._cache_data and \
4115 isinstance(self._cache_data, dict) and \
4116 self._cache_data.get("version") == self._cache_version and \
4117 isinstance(self._cache_data.get("blockers"), dict)
4119 # Validate all the atoms and counters so that
4120 # corruption is detected as soon as possible.
4121 invalid_items = set()
4122 for k, v in self._cache_data["blockers"].iteritems():
4123 if not isinstance(k, basestring):
4124 invalid_items.add(k)
4127 if portage.catpkgsplit(k) is None:
4128 invalid_items.add(k)
4130 except portage.exception.InvalidData:
4131 invalid_items.add(k)
4133 if not isinstance(v, tuple) or \
4135 invalid_items.add(k)
4138 if not isinstance(counter, (int, long)):
4139 invalid_items.add(k)
4141 if not isinstance(atoms, (list, tuple)):
4142 invalid_items.add(k)
4144 invalid_atom = False
4146 if not isinstance(atom, basestring):
4149 if atom[:1] != "!" or \
4150 not portage.isvalidatom(
4151 atom, allow_blockers=True):
4155 invalid_items.add(k)
4158 for k in invalid_items:
4159 del self._cache_data["blockers"][k]
4160 if not self._cache_data["blockers"]:
4164 self._cache_data = {"version":self._cache_version}
4165 self._cache_data["blockers"] = {}
4166 self._cache_data["virtuals"] = self._virtuals
4167 self._modified.clear()
4170 """If the current user has permission and the internal blocker cache
4171 been updated, save it to disk and mark it unmodified. This is called
4172 by emerge after it has proccessed blockers for all installed packages.
4173 Currently, the cache is only written if the user has superuser
4174 privileges (since that's required to obtain a lock), but all users
4175 have read access and benefit from faster blocker lookups (as long as
4176 the entire cache is still valid). The cache is stored as a pickled
4177 dict object with the following format:
4181 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4182 "virtuals" : vardb.settings.getvirtuals()
4185 if len(self._modified) >= self._cache_threshold and \
4188 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4189 pickle.dump(self._cache_data, f, -1)
4191 portage.util.apply_secpass_permissions(
4192 self._cache_filename, gid=portage.portage_gid, mode=0644)
4193 except (IOError, OSError), e:
4195 self._modified.clear()
4197 def __setitem__(self, cpv, blocker_data):
4199 Update the cache and mark it as modified for a future call to
4202 @param cpv: Package for which to cache blockers.
4204 @param blocker_data: An object with counter and atoms attributes.
4205 @type blocker_data: BlockerData
4207 self._cache_data["blockers"][cpv] = \
4208 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4209 self._modified.add(cpv)
4212 if self._cache_data is None:
4213 # triggered by python-trace
4215 return iter(self._cache_data["blockers"])
4217 def __delitem__(self, cpv):
4218 del self._cache_data["blockers"][cpv]
4220 def __getitem__(self, cpv):
4223 @returns: An object with counter and atoms attributes.
4225 return self.BlockerData(*self._cache_data["blockers"][cpv])
4227 class BlockerDB(object):
4229 def __init__(self, root_config):
4230 self._root_config = root_config
4231 self._vartree = root_config.trees["vartree"]
4232 self._portdb = root_config.trees["porttree"].dbapi
4234 self._dep_check_trees = None
4235 self._fake_vartree = None
4237 def _get_fake_vartree(self, acquire_lock=0):
4238 fake_vartree = self._fake_vartree
4239 if fake_vartree is None:
4240 fake_vartree = FakeVartree(self._root_config,
4241 acquire_lock=acquire_lock)
4242 self._fake_vartree = fake_vartree
4243 self._dep_check_trees = { self._vartree.root : {
4244 "porttree" : fake_vartree,
4245 "vartree" : fake_vartree,
4248 fake_vartree.sync(acquire_lock=acquire_lock)
4251 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4252 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4253 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4254 settings = self._vartree.settings
4255 stale_cache = set(blocker_cache)
4256 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4257 dep_check_trees = self._dep_check_trees
4258 vardb = fake_vartree.dbapi
4259 installed_pkgs = list(vardb)
4261 for inst_pkg in installed_pkgs:
4262 stale_cache.discard(inst_pkg.cpv)
4263 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4264 if cached_blockers is not None and \
4265 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4266 cached_blockers = None
4267 if cached_blockers is not None:
4268 blocker_atoms = cached_blockers.atoms
4270 # Use aux_get() to trigger FakeVartree global
4271 # updates on *DEPEND when appropriate.
4272 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4274 portage.dep._dep_check_strict = False
4275 success, atoms = portage.dep_check(depstr,
4276 vardb, settings, myuse=inst_pkg.use.enabled,
4277 trees=dep_check_trees, myroot=inst_pkg.root)
4279 portage.dep._dep_check_strict = True
4281 pkg_location = os.path.join(inst_pkg.root,
4282 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4283 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4284 (pkg_location, atoms), noiselevel=-1)
4287 blocker_atoms = [atom for atom in atoms \
4288 if atom.startswith("!")]
4289 blocker_atoms.sort()
4290 counter = long(inst_pkg.metadata["COUNTER"])
4291 blocker_cache[inst_pkg.cpv] = \
4292 blocker_cache.BlockerData(counter, blocker_atoms)
4293 for cpv in stale_cache:
4294 del blocker_cache[cpv]
4295 blocker_cache.flush()
4297 blocker_parents = digraph()
4299 for pkg in installed_pkgs:
4300 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4301 blocker_atom = blocker_atom.lstrip("!")
4302 blocker_atoms.append(blocker_atom)
4303 blocker_parents.add(blocker_atom, pkg)
4305 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4306 blocking_pkgs = set()
4307 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4308 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4310 # Check for blockers in the other direction.
4311 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4313 portage.dep._dep_check_strict = False
4314 success, atoms = portage.dep_check(depstr,
4315 vardb, settings, myuse=new_pkg.use.enabled,
4316 trees=dep_check_trees, myroot=new_pkg.root)
4318 portage.dep._dep_check_strict = True
4320 # We should never get this far with invalid deps.
4321 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4324 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4327 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4328 for inst_pkg in installed_pkgs:
4330 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4331 except (portage.exception.InvalidDependString, StopIteration):
4333 blocking_pkgs.add(inst_pkg)
4335 return blocking_pkgs
4337 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4339 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4340 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4341 p_type, p_root, p_key, p_status = parent_node
4343 if p_status == "nomerge":
4344 category, pf = portage.catsplit(p_key)
4345 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4346 msg.append("Portage is unable to process the dependencies of the ")
4347 msg.append("'%s' package. " % p_key)
4348 msg.append("In order to correct this problem, the package ")
4349 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4350 msg.append("As a temporary workaround, the --nodeps option can ")
4351 msg.append("be used to ignore all dependencies. For reference, ")
4352 msg.append("the problematic dependencies can be found in the ")
4353 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4355 msg.append("This package can not be installed. ")
4356 msg.append("Please notify the '%s' package maintainer " % p_key)
4357 msg.append("about this problem.")
4359 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4360 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4362 class PackageVirtualDbapi(portage.dbapi):
4364 A dbapi-like interface class that represents the state of the installed
4365 package database as new packages are installed, replacing any packages
4366 that previously existed in the same slot. The main difference between
4367 this class and fakedbapi is that this one uses Package instances
4368 internally (passed in via cpv_inject() and cpv_remove() calls).
4370 def __init__(self, settings):
4371 portage.dbapi.__init__(self)
4372 self.settings = settings
4373 self._match_cache = {}
4379 Remove all packages.
4383 self._cp_map.clear()
4384 self._cpv_map.clear()
4387 obj = PackageVirtualDbapi(self.settings)
4388 obj._match_cache = self._match_cache.copy()
4389 obj._cp_map = self._cp_map.copy()
4390 for k, v in obj._cp_map.iteritems():
4391 obj._cp_map[k] = v[:]
4392 obj._cpv_map = self._cpv_map.copy()
4396 return self._cpv_map.itervalues()
4398 def __contains__(self, item):
4399 existing = self._cpv_map.get(item.cpv)
4400 if existing is not None and \
4405 def get(self, item, default=None):
4406 cpv = getattr(item, "cpv", None)
4410 type_name, root, cpv, operation = item
4412 existing = self._cpv_map.get(cpv)
4413 if existing is not None and \
4418 def match_pkgs(self, atom):
4419 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4421 def _clear_cache(self):
4422 if self._categories is not None:
4423 self._categories = None
4424 if self._match_cache:
4425 self._match_cache = {}
4427 def match(self, origdep, use_cache=1):
4428 result = self._match_cache.get(origdep)
4429 if result is not None:
4431 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4432 self._match_cache[origdep] = result
4435 def cpv_exists(self, cpv):
4436 return cpv in self._cpv_map
4438 def cp_list(self, mycp, use_cache=1):
4439 cachelist = self._match_cache.get(mycp)
4440 # cp_list() doesn't expand old-style virtuals
4441 if cachelist and cachelist[0].startswith(mycp):
4443 cpv_list = self._cp_map.get(mycp)
4444 if cpv_list is None:
4447 cpv_list = [pkg.cpv for pkg in cpv_list]
4448 self._cpv_sort_ascending(cpv_list)
4449 if not (not cpv_list and mycp.startswith("virtual/")):
4450 self._match_cache[mycp] = cpv_list
4454 return list(self._cp_map)
4457 return list(self._cpv_map)
4459 def cpv_inject(self, pkg):
4460 cp_list = self._cp_map.get(pkg.cp)
4463 self._cp_map[pkg.cp] = cp_list
4464 e_pkg = self._cpv_map.get(pkg.cpv)
4465 if e_pkg is not None:
4468 self.cpv_remove(e_pkg)
4469 for e_pkg in cp_list:
4470 if e_pkg.slot_atom == pkg.slot_atom:
4473 self.cpv_remove(e_pkg)
4476 self._cpv_map[pkg.cpv] = pkg
4479 def cpv_remove(self, pkg):
4480 old_pkg = self._cpv_map.get(pkg.cpv)
4483 self._cp_map[pkg.cp].remove(pkg)
4484 del self._cpv_map[pkg.cpv]
4487 def aux_get(self, cpv, wants):
4488 metadata = self._cpv_map[cpv].metadata
4489 return [metadata.get(x, "") for x in wants]
4491 def aux_update(self, cpv, values):
4492 self._cpv_map[cpv].metadata.update(values)
4495 class depgraph(object):
4497 pkg_tree_map = RootConfig.pkg_tree_map
4499 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4501 def __init__(self, settings, trees, myopts, myparams, spinner):
4502 self.settings = settings
4503 self.target_root = settings["ROOT"]
4504 self.myopts = myopts
4505 self.myparams = myparams
4507 if settings.get("PORTAGE_DEBUG", "") == "1":
4509 self.spinner = spinner
4510 self._running_root = trees["/"]["root_config"]
4511 self._opts_no_restart = Scheduler._opts_no_restart
4512 self.pkgsettings = {}
4513 # Maps slot atom to package for each Package added to the graph.
4514 self._slot_pkg_map = {}
4515 # Maps nodes to the reasons they were selected for reinstallation.
4516 self._reinstall_nodes = {}
4519 self._trees_orig = trees
4521 # Contains a filtered view of preferred packages that are selected
4522 # from available repositories.
4523 self._filtered_trees = {}
4524 # Contains installed packages and new packages that have been added
4526 self._graph_trees = {}
4527 # All Package instances
4528 self._pkg_cache = {}
4529 for myroot in trees:
4530 self.trees[myroot] = {}
4531 # Create a RootConfig instance that references
4532 # the FakeVartree instead of the real one.
4533 self.roots[myroot] = RootConfig(
4534 trees[myroot]["vartree"].settings,
4536 trees[myroot]["root_config"].setconfig)
4537 for tree in ("porttree", "bintree"):
4538 self.trees[myroot][tree] = trees[myroot][tree]
4539 self.trees[myroot]["vartree"] = \
4540 FakeVartree(trees[myroot]["root_config"],
4541 pkg_cache=self._pkg_cache)
4542 self.pkgsettings[myroot] = portage.config(
4543 clone=self.trees[myroot]["vartree"].settings)
4544 self._slot_pkg_map[myroot] = {}
4545 vardb = self.trees[myroot]["vartree"].dbapi
4546 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4547 "--buildpkgonly" not in self.myopts
4548 # This fakedbapi instance will model the state that the vdb will
4549 # have after new packages have been installed.
4550 fakedb = PackageVirtualDbapi(vardb.settings)
4551 if preload_installed_pkgs:
4553 self.spinner.update()
4554 # This triggers metadata updates via FakeVartree.
4555 vardb.aux_get(pkg.cpv, [])
4556 fakedb.cpv_inject(pkg)
4558 # Now that the vardb state is cached in our FakeVartree,
4559 # we won't be needing the real vartree cache for awhile.
4560 # To make some room on the heap, clear the vardbapi
4562 trees[myroot]["vartree"].dbapi._clear_cache()
4565 self.mydbapi[myroot] = fakedb
4568 graph_tree.dbapi = fakedb
4569 self._graph_trees[myroot] = {}
4570 self._filtered_trees[myroot] = {}
4571 # Substitute the graph tree for the vartree in dep_check() since we
4572 # want atom selections to be consistent with package selections
4573 # have already been made.
4574 self._graph_trees[myroot]["porttree"] = graph_tree
4575 self._graph_trees[myroot]["vartree"] = graph_tree
4576 def filtered_tree():
4578 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4579 self._filtered_trees[myroot]["porttree"] = filtered_tree
4581 # Passing in graph_tree as the vartree here could lead to better
4582 # atom selections in some cases by causing atoms for packages that
4583 # have been added to the graph to be preferred over other choices.
4584 # However, it can trigger atom selections that result in
4585 # unresolvable direct circular dependencies. For example, this
4586 # happens with gwydion-dylan which depends on either itself or
4587 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4588 # gwydion-dylan-bin needs to be selected in order to avoid a
4589 # an unresolvable direct circular dependency.
4591 # To solve the problem described above, pass in "graph_db" so that
4592 # packages that have been added to the graph are distinguishable
4593 # from other available packages and installed packages. Also, pass
4594 # the parent package into self._select_atoms() calls so that
4595 # unresolvable direct circular dependencies can be detected and
4596 # avoided when possible.
4597 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4598 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4601 portdb = self.trees[myroot]["porttree"].dbapi
4602 bindb = self.trees[myroot]["bintree"].dbapi
4603 vardb = self.trees[myroot]["vartree"].dbapi
4604 # (db, pkg_type, built, installed, db_keys)
4605 if "--usepkgonly" not in self.myopts:
4606 db_keys = list(portdb._aux_cache_keys)
4607 dbs.append((portdb, "ebuild", False, False, db_keys))
4608 if "--usepkg" in self.myopts:
4609 db_keys = list(bindb._aux_cache_keys)
4610 dbs.append((bindb, "binary", True, False, db_keys))
4611 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4612 dbs.append((vardb, "installed", True, True, db_keys))
4613 self._filtered_trees[myroot]["dbs"] = dbs
4614 if "--usepkg" in self.myopts:
4615 self.trees[myroot]["bintree"].populate(
4616 "--getbinpkg" in self.myopts,
4617 "--getbinpkgonly" in self.myopts)
4620 self.digraph=portage.digraph()
4621 # contains all sets added to the graph
4623 # contains atoms given as arguments
4624 self._sets["args"] = InternalPackageSet()
4625 # contains all atoms from all sets added to the graph, including
4626 # atoms given as arguments
4627 self._set_atoms = InternalPackageSet()
4628 self._atom_arg_map = {}
4629 # contains all nodes pulled in by self._set_atoms
4630 self._set_nodes = set()
4631 # Contains only Blocker -> Uninstall edges
4632 self._blocker_uninstalls = digraph()
4633 # Contains only Package -> Blocker edges
4634 self._blocker_parents = digraph()
4635 # Contains only irrelevant Package -> Blocker edges
4636 self._irrelevant_blockers = digraph()
4637 # Contains only unsolvable Package -> Blocker edges
4638 self._unsolvable_blockers = digraph()
4639 # Contains all Blocker -> Blocked Package edges
4640 self._blocked_pkgs = digraph()
4641 # Contains world packages that have been protected from
4642 # uninstallation but may not have been added to the graph
4643 # if the graph is not complete yet.
4644 self._blocked_world_pkgs = {}
4645 self._slot_collision_info = {}
4646 # Slot collision nodes are not allowed to block other packages since
4647 # blocker validation is only able to account for one package per slot.
4648 self._slot_collision_nodes = set()
4649 self._parent_atoms = {}
4650 self._slot_conflict_parent_atoms = set()
4651 self._serialized_tasks_cache = None
4652 self._scheduler_graph = None
4653 self._displayed_list = None
4654 self._pprovided_args = []
4655 self._missing_args = []
4656 self._masked_installed = set()
4657 self._unsatisfied_deps_for_display = []
4658 self._unsatisfied_blockers_for_display = None
4659 self._circular_deps_for_display = None
4660 self._dep_stack = []
4661 self._unsatisfied_deps = []
4662 self._initially_unsatisfied_deps = []
4663 self._ignored_deps = []
4664 self._required_set_names = set(["system", "world"])
4665 self._select_atoms = self._select_atoms_highest_available
4666 self._select_package = self._select_pkg_highest_available
4667 self._highest_pkg_cache = {}
4669 def _show_slot_collision_notice(self):
4670 """Show an informational message advising the user to mask one of the
4671 the packages. In some cases it may be possible to resolve this
4672 automatically, but support for backtracking (removal nodes that have
4673 already been selected) will be required in order to handle all possible
4677 if not self._slot_collision_info:
4680 self._show_merge_list()
4683 msg.append("\n!!! Multiple package instances within a single " + \
4684 "package slot have been pulled\n")
4685 msg.append("!!! into the dependency graph, resulting" + \
4686 " in a slot conflict:\n\n")
4688 # Max number of parents shown, to avoid flooding the display.
4690 explanation_columns = 70
4692 for (slot_atom, root), slot_nodes \
4693 in self._slot_collision_info.iteritems():
4694 msg.append(str(slot_atom))
4697 for node in slot_nodes:
4699 msg.append(str(node))
4700 parent_atoms = self._parent_atoms.get(node)
4703 # Prefer conflict atoms over others.
4704 for parent_atom in parent_atoms:
4705 if len(pruned_list) >= max_parents:
4707 if parent_atom in self._slot_conflict_parent_atoms:
4708 pruned_list.add(parent_atom)
4710 # If this package was pulled in by conflict atoms then
4711 # show those alone since those are the most interesting.
4713 # When generating the pruned list, prefer instances
4714 # of DependencyArg over instances of Package.
4715 for parent_atom in parent_atoms:
4716 if len(pruned_list) >= max_parents:
4718 parent, atom = parent_atom
4719 if isinstance(parent, DependencyArg):
4720 pruned_list.add(parent_atom)
4721 # Prefer Packages instances that themselves have been
4722 # pulled into collision slots.
4723 for parent_atom in parent_atoms:
4724 if len(pruned_list) >= max_parents:
4726 parent, atom = parent_atom
4727 if isinstance(parent, Package) and \
4728 (parent.slot_atom, parent.root) \
4729 in self._slot_collision_info:
4730 pruned_list.add(parent_atom)
4731 for parent_atom in parent_atoms:
4732 if len(pruned_list) >= max_parents:
4734 pruned_list.add(parent_atom)
4735 omitted_parents = len(parent_atoms) - len(pruned_list)
4736 parent_atoms = pruned_list
4737 msg.append(" pulled in by\n")
4738 for parent_atom in parent_atoms:
4739 parent, atom = parent_atom
4740 msg.append(2*indent)
4741 if isinstance(parent,
4742 (PackageArg, AtomArg)):
4743 # For PackageArg and AtomArg types, it's
4744 # redundant to display the atom attribute.
4745 msg.append(str(parent))
4747 # Display the specific atom from SetArg or
4749 msg.append("%s required by %s" % (atom, parent))
4752 msg.append(2*indent)
4753 msg.append("(and %d more)\n" % omitted_parents)
4755 msg.append(" (no parents)\n")
4757 explanation = self._slot_conflict_explanation(slot_nodes)
4760 msg.append(indent + "Explanation:\n\n")
4761 for line in textwrap.wrap(explanation, explanation_columns):
4762 msg.append(2*indent + line + "\n")
4765 sys.stderr.write("".join(msg))
4768 explanations_for_all = explanations == len(self._slot_collision_info)
4770 if explanations_for_all or "--quiet" in self.myopts:
4774 msg.append("It may be possible to solve this problem ")
4775 msg.append("by using package.mask to prevent one of ")
4776 msg.append("those packages from being selected. ")
4777 msg.append("However, it is also possible that conflicting ")
4778 msg.append("dependencies exist such that they are impossible to ")
4779 msg.append("satisfy simultaneously. If such a conflict exists in ")
4780 msg.append("the dependencies of two different packages, then those ")
4781 msg.append("packages can not be installed simultaneously.")
4783 from formatter import AbstractFormatter, DumbWriter
4784 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4786 f.add_flowing_data(x)
4790 msg.append("For more information, see MASKED PACKAGES ")
4791 msg.append("section in the emerge man page or refer ")
4792 msg.append("to the Gentoo Handbook.")
4794 f.add_flowing_data(x)
4798 def _slot_conflict_explanation(self, slot_nodes):
4800 When a slot conflict occurs due to USE deps, there are a few
4801 different cases to consider:
4803 1) New USE are correctly set but --newuse wasn't requested so an
4804 installed package with incorrect USE happened to get pulled
4805 into graph before the new one.
4807 2) New USE are incorrectly set but an installed package has correct
4808 USE so it got pulled into the graph, and a new instance also got
4809 pulled in due to --newuse or an upgrade.
4811 3) Multiple USE deps exist that can't be satisfied simultaneously,
4812 and multiple package instances got pulled into the same slot to
4813 satisfy the conflicting deps.
4815 Currently, explanations and suggested courses of action are generated
4816 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4819 if len(slot_nodes) != 2:
4820 # Suggestions are only implemented for
4821 # conflicts between two packages.
4824 all_conflict_atoms = self._slot_conflict_parent_atoms
4826 matched_atoms = None
4827 unmatched_node = None
4828 for node in slot_nodes:
4829 parent_atoms = self._parent_atoms.get(node)
4830 if not parent_atoms:
4831 # Normally, there are always parent atoms. If there are
4832 # none then something unexpected is happening and there's
4833 # currently no suggestion for this case.
4835 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4836 for parent_atom in conflict_atoms:
4837 parent, atom = parent_atom
4839 # Suggestions are currently only implemented for cases
4840 # in which all conflict atoms have USE deps.
4843 if matched_node is not None:
4844 # If conflict atoms match multiple nodes
4845 # then there's no suggestion.
4848 matched_atoms = conflict_atoms
4850 if unmatched_node is not None:
4851 # Neither node is matched by conflict atoms, and
4852 # there is no suggestion for this case.
4854 unmatched_node = node
4856 if matched_node is None or unmatched_node is None:
4857 # This shouldn't happen.
4860 if unmatched_node.installed and not matched_node.installed:
4861 return "New USE are correctly set, but --newuse wasn't" + \
4862 " requested, so an installed package with incorrect USE " + \
4863 "happened to get pulled into the dependency graph. " + \
4864 "In order to solve " + \
4865 "this, either specify the --newuse option or explicitly " + \
4866 " reinstall '%s'." % matched_node.slot_atom
4868 if matched_node.installed and not unmatched_node.installed:
4869 atoms = sorted(set(atom for parent, atom in matched_atoms))
4870 explanation = ("New USE for '%s' are incorrectly set. " + \
4871 "In order to solve this, adjust USE to satisfy '%s'") % \
4872 (matched_node.slot_atom, atoms[0])
4874 for atom in atoms[1:-1]:
4875 explanation += ", '%s'" % (atom,)
4878 explanation += " and '%s'" % (atoms[-1],)
4884 def _process_slot_conflicts(self):
4886 Process slot conflict data to identify specific atoms which
4887 lead to conflict. These atoms only match a subset of the
4888 packages that have been pulled into a given slot.
4890 for (slot_atom, root), slot_nodes \
4891 in self._slot_collision_info.iteritems():
4893 all_parent_atoms = set()
4894 for pkg in slot_nodes:
4895 parent_atoms = self._parent_atoms.get(pkg)
4896 if not parent_atoms:
4898 all_parent_atoms.update(parent_atoms)
4900 for pkg in slot_nodes:
4901 parent_atoms = self._parent_atoms.get(pkg)
4902 if parent_atoms is None:
4903 parent_atoms = set()
4904 self._parent_atoms[pkg] = parent_atoms
4905 for parent_atom in all_parent_atoms:
4906 if parent_atom in parent_atoms:
4908 # Use package set for matching since it will match via
4909 # PROVIDE when necessary, while match_from_list does not.
4910 parent, atom = parent_atom
4911 atom_set = InternalPackageSet(
4912 initial_atoms=(atom,))
4913 if atom_set.findAtomForPackage(pkg):
4914 parent_atoms.add(parent_atom)
4916 self._slot_conflict_parent_atoms.add(parent_atom)
4918 def _reinstall_for_flags(self, forced_flags,
4919 orig_use, orig_iuse, cur_use, cur_iuse):
4920 """Return a set of flags that trigger reinstallation, or None if there
4921 are no such flags."""
4922 if "--newuse" in self.myopts:
4923 flags = set(orig_iuse.symmetric_difference(
4924 cur_iuse).difference(forced_flags))
4925 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4926 cur_iuse.intersection(cur_use)))
4929 elif "changed-use" == self.myopts.get("--reinstall"):
4930 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4931 cur_iuse.intersection(cur_use))
4936 def _create_graph(self, allow_unsatisfied=False):
4937 dep_stack = self._dep_stack
4939 self.spinner.update()
4940 dep = dep_stack.pop()
4941 if isinstance(dep, Package):
4942 if not self._add_pkg_deps(dep,
4943 allow_unsatisfied=allow_unsatisfied):
4946 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4950 def _add_dep(self, dep, allow_unsatisfied=False):
4951 debug = "--debug" in self.myopts
4952 buildpkgonly = "--buildpkgonly" in self.myopts
4953 nodeps = "--nodeps" in self.myopts
4954 empty = "empty" in self.myparams
4955 deep = "deep" in self.myparams
4956 update = "--update" in self.myopts and dep.depth <= 1
4958 if not buildpkgonly and \
4960 dep.parent not in self._slot_collision_nodes:
4961 if dep.parent.onlydeps:
4962 # It's safe to ignore blockers if the
4963 # parent is an --onlydeps node.
4965 # The blocker applies to the root where
4966 # the parent is or will be installed.
4967 blocker = Blocker(atom=dep.atom,
4968 eapi=dep.parent.metadata["EAPI"],
4969 root=dep.parent.root)
4970 self._blocker_parents.add(blocker, dep.parent)
4972 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4973 onlydeps=dep.onlydeps)
4975 if dep.priority.optional:
4976 # This could be an unecessary build-time dep
4977 # pulled in by --with-bdeps=y.
4979 if allow_unsatisfied:
4980 self._unsatisfied_deps.append(dep)
4982 self._unsatisfied_deps_for_display.append(
4983 ((dep.root, dep.atom), {"myparent":dep.parent}))
4985 # In some cases, dep_check will return deps that shouldn't
4986 # be proccessed any further, so they are identified and
4987 # discarded here. Try to discard as few as possible since
4988 # discarded dependencies reduce the amount of information
4989 # available for optimization of merge order.
4990 if dep.priority.satisfied and \
4991 not dep_pkg.installed and \
4992 not (existing_node or empty or deep or update):
4994 if dep.root == self.target_root:
4996 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4997 except StopIteration:
4999 except portage.exception.InvalidDependString:
5000 if not dep_pkg.installed:
5001 # This shouldn't happen since the package
5002 # should have been masked.
5005 self._ignored_deps.append(dep)
5008 if not self._add_pkg(dep_pkg, dep):
5012 def _add_pkg(self, pkg, dep):
5019 myparent = dep.parent
5020 priority = dep.priority
5022 if priority is None:
5023 priority = DepPriority()
5025 Fills the digraph with nodes comprised of packages to merge.
5026 mybigkey is the package spec of the package to merge.
5027 myparent is the package depending on mybigkey ( or None )
5028 addme = Should we add this package to the digraph or are we just looking at it's deps?
5029 Think --onlydeps, we need to ignore packages in that case.
5032 #IUSE-aware emerge -> USE DEP aware depgraph
5033 #"no downgrade" emerge
5035 # Ensure that the dependencies of the same package
5036 # are never processed more than once.
5037 previously_added = pkg in self.digraph
5039 # select the correct /var database that we'll be checking against
5040 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5041 pkgsettings = self.pkgsettings[pkg.root]
5046 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5047 except portage.exception.InvalidDependString, e:
5048 if not pkg.installed:
5049 show_invalid_depstring_notice(
5050 pkg, pkg.metadata["PROVIDE"], str(e))
5054 if not pkg.onlydeps:
5055 if not pkg.installed and \
5056 "empty" not in self.myparams and \
5057 vardbapi.match(pkg.slot_atom):
5058 # Increase the priority of dependencies on packages that
5059 # are being rebuilt. This optimizes merge order so that
5060 # dependencies are rebuilt/updated as soon as possible,
5061 # which is needed especially when emerge is called by
5062 # revdep-rebuild since dependencies may be affected by ABI
5063 # breakage that has rendered them useless. Don't adjust
5064 # priority here when in "empty" mode since all packages
5065 # are being merged in that case.
5066 priority.rebuild = True
5068 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5069 slot_collision = False
5071 existing_node_matches = pkg.cpv == existing_node.cpv
5072 if existing_node_matches and \
5073 pkg != existing_node and \
5074 dep.atom is not None:
5075 # Use package set for matching since it will match via
5076 # PROVIDE when necessary, while match_from_list does not.
5077 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5078 if not atom_set.findAtomForPackage(existing_node):
5079 existing_node_matches = False
5080 if existing_node_matches:
5081 # The existing node can be reused.
5083 for parent_atom in arg_atoms:
5084 parent, atom = parent_atom
5085 self.digraph.add(existing_node, parent,
5087 self._add_parent_atom(existing_node, parent_atom)
5088 # If a direct circular dependency is not an unsatisfied
5089 # buildtime dependency then drop it here since otherwise
5090 # it can skew the merge order calculation in an unwanted
5092 if existing_node != myparent or \
5093 (priority.buildtime and not priority.satisfied):
5094 self.digraph.addnode(existing_node, myparent,
5096 if dep.atom is not None and dep.parent is not None:
5097 self._add_parent_atom(existing_node,
5098 (dep.parent, dep.atom))
5102 # A slot collision has occurred. Sometimes this coincides
5103 # with unresolvable blockers, so the slot collision will be
5104 # shown later if there are no unresolvable blockers.
5105 self._add_slot_conflict(pkg)
5106 slot_collision = True
5109 # Now add this node to the graph so that self.display()
5110 # can show use flags and --tree portage.output. This node is
5111 # only being partially added to the graph. It must not be
5112 # allowed to interfere with the other nodes that have been
5113 # added. Do not overwrite data for existing nodes in
5114 # self.mydbapi since that data will be used for blocker
5116 # Even though the graph is now invalid, continue to process
5117 # dependencies so that things like --fetchonly can still
5118 # function despite collisions.
5120 elif not previously_added:
5121 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5122 self.mydbapi[pkg.root].cpv_inject(pkg)
5123 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5125 if not pkg.installed:
5126 # Allow this package to satisfy old-style virtuals in case it
5127 # doesn't already. Any pre-existing providers will be preferred
5130 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5131 # For consistency, also update the global virtuals.
5132 settings = self.roots[pkg.root].settings
5134 settings.setinst(pkg.cpv, pkg.metadata)
5136 except portage.exception.InvalidDependString, e:
5137 show_invalid_depstring_notice(
5138 pkg, pkg.metadata["PROVIDE"], str(e))
5143 self._set_nodes.add(pkg)
5145 # Do this even when addme is False (--onlydeps) so that the
5146 # parent/child relationship is always known in case
5147 # self._show_slot_collision_notice() needs to be called later.
5148 self.digraph.add(pkg, myparent, priority=priority)
5149 if dep.atom is not None and dep.parent is not None:
5150 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5153 for parent_atom in arg_atoms:
5154 parent, atom = parent_atom
5155 self.digraph.add(pkg, parent, priority=priority)
5156 self._add_parent_atom(pkg, parent_atom)
5158 """ This section determines whether we go deeper into dependencies or not.
5159 We want to go deeper on a few occasions:
5160 Installing package A, we need to make sure package A's deps are met.
5161 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5162 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5164 dep_stack = self._dep_stack
5165 if "recurse" not in self.myparams:
5167 elif pkg.installed and \
5168 "deep" not in self.myparams:
5169 dep_stack = self._ignored_deps
5171 self.spinner.update()
5176 if not previously_added:
5177 dep_stack.append(pkg)
5180 def _add_parent_atom(self, pkg, parent_atom):
5181 parent_atoms = self._parent_atoms.get(pkg)
5182 if parent_atoms is None:
5183 parent_atoms = set()
5184 self._parent_atoms[pkg] = parent_atoms
5185 parent_atoms.add(parent_atom)
5187 def _add_slot_conflict(self, pkg):
5188 self._slot_collision_nodes.add(pkg)
5189 slot_key = (pkg.slot_atom, pkg.root)
5190 slot_nodes = self._slot_collision_info.get(slot_key)
5191 if slot_nodes is None:
5193 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5194 self._slot_collision_info[slot_key] = slot_nodes
5197 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5199 mytype = pkg.type_name
5202 metadata = pkg.metadata
5203 myuse = pkg.use.enabled
5205 depth = pkg.depth + 1
5206 removal_action = "remove" in self.myparams
5209 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5211 edepend[k] = metadata[k]
5213 if not pkg.built and \
5214 "--buildpkgonly" in self.myopts and \
5215 "deep" not in self.myparams and \
5216 "empty" not in self.myparams:
5217 edepend["RDEPEND"] = ""
5218 edepend["PDEPEND"] = ""
5219 bdeps_optional = False
5221 if pkg.built and not removal_action:
5222 if self.myopts.get("--with-bdeps", "n") == "y":
5223 # Pull in build time deps as requested, but marked them as
5224 # "optional" since they are not strictly required. This allows
5225 # more freedom in the merge order calculation for solving
5226 # circular dependencies. Don't convert to PDEPEND since that
5227 # could make --with-bdeps=y less effective if it is used to
5228 # adjust merge order to prevent built_with_use() calls from
5230 bdeps_optional = True
5232 # built packages do not have build time dependencies.
5233 edepend["DEPEND"] = ""
5235 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5236 edepend["DEPEND"] = ""
5239 ("/", edepend["DEPEND"],
5240 self._priority(buildtime=(not bdeps_optional),
5241 optional=bdeps_optional)),
5242 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5243 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5246 debug = "--debug" in self.myopts
5247 strict = mytype != "installed"
5249 for dep_root, dep_string, dep_priority in deps:
5254 print "Parent: ", jbigkey
5255 print "Depstring:", dep_string
5256 print "Priority:", dep_priority
5257 vardb = self.roots[dep_root].trees["vartree"].dbapi
5259 selected_atoms = self._select_atoms(dep_root,
5260 dep_string, myuse=myuse, parent=pkg, strict=strict,
5261 priority=dep_priority)
5262 except portage.exception.InvalidDependString, e:
5263 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5266 print "Candidates:", selected_atoms
5268 for atom in selected_atoms:
5271 atom = portage.dep.Atom(atom)
5273 mypriority = dep_priority.copy()
5274 if not atom.blocker and vardb.match(atom):
5275 mypriority.satisfied = True
5277 if not self._add_dep(Dependency(atom=atom,
5278 blocker=atom.blocker, depth=depth, parent=pkg,
5279 priority=mypriority, root=dep_root),
5280 allow_unsatisfied=allow_unsatisfied):
5283 except portage.exception.InvalidAtom, e:
5284 show_invalid_depstring_notice(
5285 pkg, dep_string, str(e))
5287 if not pkg.installed:
5291 print "Exiting...", jbigkey
5292 except portage.exception.AmbiguousPackageName, e:
5294 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5295 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5297 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5298 portage.writemsg("\n", noiselevel=-1)
5299 if mytype == "binary":
5301 "!!! This binary package cannot be installed: '%s'\n" % \
5302 mykey, noiselevel=-1)
5303 elif mytype == "ebuild":
5304 portdb = self.roots[myroot].trees["porttree"].dbapi
5305 myebuild, mylocation = portdb.findname2(mykey)
5306 portage.writemsg("!!! This ebuild cannot be installed: " + \
5307 "'%s'\n" % myebuild, noiselevel=-1)
5308 portage.writemsg("!!! Please notify the package maintainer " + \
5309 "that atoms must be fully-qualified.\n", noiselevel=-1)
5313 def _priority(self, **kwargs):
5314 if "remove" in self.myparams:
5315 priority_constructor = UnmergeDepPriority
5317 priority_constructor = DepPriority
5318 return priority_constructor(**kwargs)
5320 def _dep_expand(self, root_config, atom_without_category):
5322 @param root_config: a root config instance
5323 @type root_config: RootConfig
5324 @param atom_without_category: an atom without a category component
5325 @type atom_without_category: String
5327 @returns: a list of atoms containing categories (possibly empty)
5329 null_cp = portage.dep_getkey(insert_category_into_atom(
5330 atom_without_category, "null"))
5331 cat, atom_pn = portage.catsplit(null_cp)
5333 dbs = self._filtered_trees[root_config.root]["dbs"]
5335 for db, pkg_type, built, installed, db_keys in dbs:
5336 for cat in db.categories:
5337 if db.cp_list("%s/%s" % (cat, atom_pn)):
5341 for cat in categories:
5342 deps.append(insert_category_into_atom(
5343 atom_without_category, cat))
5346 def _have_new_virt(self, root, atom_cp):
5348 for db, pkg_type, built, installed, db_keys in \
5349 self._filtered_trees[root]["dbs"]:
5350 if db.cp_list(atom_cp):
5355 def _iter_atoms_for_pkg(self, pkg):
5356 # TODO: add multiple $ROOT support
5357 if pkg.root != self.target_root:
5359 atom_arg_map = self._atom_arg_map
5360 root_config = self.roots[pkg.root]
5361 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5362 atom_cp = portage.dep_getkey(atom)
5363 if atom_cp != pkg.cp and \
5364 self._have_new_virt(pkg.root, atom_cp):
5366 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5367 visible_pkgs.reverse() # descending order
5369 for visible_pkg in visible_pkgs:
5370 if visible_pkg.cp != atom_cp:
5372 if pkg >= visible_pkg:
5373 # This is descending order, and we're not
5374 # interested in any versions <= pkg given.
5376 if pkg.slot_atom != visible_pkg.slot_atom:
5377 higher_slot = visible_pkg
5379 if higher_slot is not None:
5381 for arg in atom_arg_map[(atom, pkg.root)]:
5382 if isinstance(arg, PackageArg) and \
5387 def select_files(self, myfiles):
5388 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5389 appropriate depgraph and return a favorite list."""
5390 debug = "--debug" in self.myopts
5391 root_config = self.roots[self.target_root]
5392 sets = root_config.sets
5393 getSetAtoms = root_config.setconfig.getSetAtoms
5395 myroot = self.target_root
5396 dbs = self._filtered_trees[myroot]["dbs"]
5397 vardb = self.trees[myroot]["vartree"].dbapi
5398 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5399 portdb = self.trees[myroot]["porttree"].dbapi
5400 bindb = self.trees[myroot]["bintree"].dbapi
5401 pkgsettings = self.pkgsettings[myroot]
5403 onlydeps = "--onlydeps" in self.myopts
5406 ext = os.path.splitext(x)[1]
5408 if not os.path.exists(x):
5410 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5411 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5412 elif os.path.exists(
5413 os.path.join(pkgsettings["PKGDIR"], x)):
5414 x = os.path.join(pkgsettings["PKGDIR"], x)
5416 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5417 print "!!! Please ensure the tbz2 exists as specified.\n"
5418 return 0, myfavorites
5419 mytbz2=portage.xpak.tbz2(x)
5420 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5421 if os.path.realpath(x) != \
5422 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5423 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5424 return 0, myfavorites
5425 db_keys = list(bindb._aux_cache_keys)
5426 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5427 pkg = Package(type_name="binary", root_config=root_config,
5428 cpv=mykey, built=True, metadata=metadata,
5430 self._pkg_cache[pkg] = pkg
5431 args.append(PackageArg(arg=x, package=pkg,
5432 root_config=root_config))
5433 elif ext==".ebuild":
5434 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5435 pkgdir = os.path.dirname(ebuild_path)
5436 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5437 cp = pkgdir[len(tree_root)+1:]
5438 e = portage.exception.PackageNotFound(
5439 ("%s is not in a valid portage tree " + \
5440 "hierarchy or does not exist") % x)
5441 if not portage.isvalidatom(cp):
5443 cat = portage.catsplit(cp)[0]
5444 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5445 if not portage.isvalidatom("="+mykey):
5447 ebuild_path = portdb.findname(mykey)
5449 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5450 cp, os.path.basename(ebuild_path)):
5451 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5452 return 0, myfavorites
5453 if mykey not in portdb.xmatch(
5454 "match-visible", portage.dep_getkey(mykey)):
5455 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5456 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5457 print colorize("BAD", "*** page for details.")
5458 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5461 raise portage.exception.PackageNotFound(
5462 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5463 db_keys = list(portdb._aux_cache_keys)
5464 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5465 pkg = Package(type_name="ebuild", root_config=root_config,
5466 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5467 pkgsettings.setcpv(pkg)
5468 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5469 self._pkg_cache[pkg] = pkg
5470 args.append(PackageArg(arg=x, package=pkg,
5471 root_config=root_config))
5472 elif x.startswith(os.path.sep):
5473 if not x.startswith(myroot):
5474 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5475 " $ROOT.\n") % x, noiselevel=-1)
5477 # Queue these up since it's most efficient to handle
5478 # multiple files in a single iter_owners() call.
5479 lookup_owners.append(x)
5481 if x in ("system", "world"):
5483 if x.startswith(SETPREFIX):
5484 s = x[len(SETPREFIX):]
5486 raise portage.exception.PackageSetNotFound(s)
5489 # Recursively expand sets so that containment tests in
5490 # self._get_parent_sets() properly match atoms in nested
5491 # sets (like if world contains system).
5492 expanded_set = InternalPackageSet(
5493 initial_atoms=getSetAtoms(s))
5494 self._sets[s] = expanded_set
5495 args.append(SetArg(arg=x, set=expanded_set,
5496 root_config=root_config))
5498 if not is_valid_package_atom(x):
5499 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5501 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5502 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5504 # Don't expand categories or old-style virtuals here unless
5505 # necessary. Expansion of old-style virtuals here causes at
5506 # least the following problems:
5507 # 1) It's more difficult to determine which set(s) an atom
5508 # came from, if any.
5509 # 2) It takes away freedom from the resolver to choose other
5510 # possible expansions when necessary.
5512 args.append(AtomArg(arg=x, atom=x,
5513 root_config=root_config))
5515 expanded_atoms = self._dep_expand(root_config, x)
5516 installed_cp_set = set()
5517 for atom in expanded_atoms:
5518 atom_cp = portage.dep_getkey(atom)
5519 if vardb.cp_list(atom_cp):
5520 installed_cp_set.add(atom_cp)
5521 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5522 installed_cp = iter(installed_cp_set).next()
5523 expanded_atoms = [atom for atom in expanded_atoms \
5524 if portage.dep_getkey(atom) == installed_cp]
5526 if len(expanded_atoms) > 1:
5529 ambiguous_package_name(x, expanded_atoms, root_config,
5530 self.spinner, self.myopts)
5531 return False, myfavorites
5533 atom = expanded_atoms[0]
5535 null_atom = insert_category_into_atom(x, "null")
5536 null_cp = portage.dep_getkey(null_atom)
5537 cat, atom_pn = portage.catsplit(null_cp)
5538 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5540 # Allow the depgraph to choose which virtual.
5541 atom = insert_category_into_atom(x, "virtual")
5543 atom = insert_category_into_atom(x, "null")
5545 args.append(AtomArg(arg=x, atom=atom,
5546 root_config=root_config))
5550 search_for_multiple = False
5551 if len(lookup_owners) > 1:
5552 search_for_multiple = True
5554 for x in lookup_owners:
5555 if not search_for_multiple and os.path.isdir(x):
5556 search_for_multiple = True
5557 relative_paths.append(x[len(myroot):])
5560 for pkg, relative_path in \
5561 real_vardb._owners.iter_owners(relative_paths):
5562 owners.add(pkg.mycpv)
5563 if not search_for_multiple:
5567 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5568 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5572 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5574 # portage now masks packages with missing slot, but it's
5575 # possible that one was installed by an older version
5576 atom = portage.cpv_getkey(cpv)
5578 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5579 args.append(AtomArg(arg=atom, atom=atom,
5580 root_config=root_config))
5582 if "--update" in self.myopts:
5583 # In some cases, the greedy slots behavior can pull in a slot that
5584 # the user would want to uninstall due to it being blocked by a
5585 # newer version in a different slot. Therefore, it's necessary to
5586 # detect and discard any that should be uninstalled. Each time
5587 # that arguments are updated, package selections are repeated in
5588 # order to ensure consistency with the current arguments:
5590 # 1) Initialize args
5591 # 2) Select packages and generate initial greedy atoms
5592 # 3) Update args with greedy atoms
5593 # 4) Select packages and generate greedy atoms again, while
5594 # accounting for any blockers between selected packages
5595 # 5) Update args with revised greedy atoms
5597 self._set_args(args)
5600 greedy_args.append(arg)
5601 if not isinstance(arg, AtomArg):
5603 for atom in self._greedy_slots(arg.root_config, arg.atom):
5605 AtomArg(arg=arg.arg, atom=atom,
5606 root_config=arg.root_config))
5608 self._set_args(greedy_args)
5611 # Revise greedy atoms, accounting for any blockers
5612 # between selected packages.
5613 revised_greedy_args = []
5615 revised_greedy_args.append(arg)
5616 if not isinstance(arg, AtomArg):
5618 for atom in self._greedy_slots(arg.root_config, arg.atom,
5619 blocker_lookahead=True):
5620 revised_greedy_args.append(
5621 AtomArg(arg=arg.arg, atom=atom,
5622 root_config=arg.root_config))
5623 args = revised_greedy_args
5624 del revised_greedy_args
5626 self._set_args(args)
5628 myfavorites = set(myfavorites)
5630 if isinstance(arg, (AtomArg, PackageArg)):
5631 myfavorites.add(arg.atom)
5632 elif isinstance(arg, SetArg):
5633 myfavorites.add(arg.arg)
5634 myfavorites = list(myfavorites)
5636 pprovideddict = pkgsettings.pprovideddict
5638 portage.writemsg("\n", noiselevel=-1)
5639 # Order needs to be preserved since a feature of --nodeps
5640 # is to allow the user to force a specific merge order.
5644 for atom in arg.set:
5645 self.spinner.update()
5646 dep = Dependency(atom=atom, onlydeps=onlydeps,
5647 root=myroot, parent=arg)
5648 atom_cp = portage.dep_getkey(atom)
5650 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5651 if pprovided and portage.match_from_list(atom, pprovided):
5652 # A provided package has been specified on the command line.
5653 self._pprovided_args.append((arg, atom))
5655 if isinstance(arg, PackageArg):
5656 if not self._add_pkg(arg.package, dep) or \
5657 not self._create_graph():
5658 sys.stderr.write(("\n\n!!! Problem resolving " + \
5659 "dependencies for %s\n") % arg.arg)
5660 return 0, myfavorites
5663 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5664 (arg, atom), noiselevel=-1)
5665 pkg, existing_node = self._select_package(
5666 myroot, atom, onlydeps=onlydeps)
5668 if not (isinstance(arg, SetArg) and \
5669 arg.name in ("system", "world")):
5670 self._unsatisfied_deps_for_display.append(
5671 ((myroot, atom), {}))
5672 return 0, myfavorites
5673 self._missing_args.append((arg, atom))
5675 if atom_cp != pkg.cp:
5676 # For old-style virtuals, we need to repeat the
5677 # package.provided check against the selected package.
5678 expanded_atom = atom.replace(atom_cp, pkg.cp)
5679 pprovided = pprovideddict.get(pkg.cp)
5681 portage.match_from_list(expanded_atom, pprovided):
5682 # A provided package has been
5683 # specified on the command line.
5684 self._pprovided_args.append((arg, atom))
5686 if pkg.installed and "selective" not in self.myparams:
5687 self._unsatisfied_deps_for_display.append(
5688 ((myroot, atom), {}))
5689 # Previous behavior was to bail out in this case, but
5690 # since the dep is satisfied by the installed package,
5691 # it's more friendly to continue building the graph
5692 # and just show a warning message. Therefore, only bail
5693 # out here if the atom is not from either the system or
5695 if not (isinstance(arg, SetArg) and \
5696 arg.name in ("system", "world")):
5697 return 0, myfavorites
5699 # Add the selected package to the graph as soon as possible
5700 # so that later dep_check() calls can use it as feedback
5701 # for making more consistent atom selections.
5702 if not self._add_pkg(pkg, dep):
5703 if isinstance(arg, SetArg):
5704 sys.stderr.write(("\n\n!!! Problem resolving " + \
5705 "dependencies for %s from %s\n") % \
5708 sys.stderr.write(("\n\n!!! Problem resolving " + \
5709 "dependencies for %s\n") % atom)
5710 return 0, myfavorites
5712 except portage.exception.MissingSignature, e:
5713 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5714 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5715 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5716 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5717 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5718 return 0, myfavorites
5719 except portage.exception.InvalidSignature, e:
5720 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5721 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5722 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5723 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5724 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5725 return 0, myfavorites
5726 except SystemExit, e:
5727 raise # Needed else can't exit
5728 except Exception, e:
5729 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5730 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5733 # Now that the root packages have been added to the graph,
5734 # process the dependencies.
5735 if not self._create_graph():
5736 return 0, myfavorites
5739 if "--usepkgonly" in self.myopts:
5740 for xs in self.digraph.all_nodes():
5741 if not isinstance(xs, Package):
5743 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5747 print "Missing binary for:",xs[2]
5751 except self._unknown_internal_error:
5752 return False, myfavorites
5754 # We're true here unless we are missing binaries.
5755 return (not missing,myfavorites)
5757 def _set_args(self, args):
5759 Create the "args" package set from atoms and packages given as
5760 arguments. This method can be called multiple times if necessary.
5761 The package selection cache is automatically invalidated, since
5762 arguments influence package selections.
5764 args_set = self._sets["args"]
5767 if not isinstance(arg, (AtomArg, PackageArg)):
5770 if atom in args_set:
5774 self._set_atoms.clear()
5775 self._set_atoms.update(chain(*self._sets.itervalues()))
5776 atom_arg_map = self._atom_arg_map
5777 atom_arg_map.clear()
5779 for atom in arg.set:
5780 atom_key = (atom, arg.root_config.root)
5781 refs = atom_arg_map.get(atom_key)
5784 atom_arg_map[atom_key] = refs
5788 # Invalidate the package selection cache, since
5789 # arguments influence package selections.
5790 self._highest_pkg_cache.clear()
5791 for trees in self._filtered_trees.itervalues():
5792 trees["porttree"].dbapi._clear_cache()
5794 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5796 Return a list of slot atoms corresponding to installed slots that
5797 differ from the slot of the highest visible match. When
5798 blocker_lookahead is True, slot atoms that would trigger a blocker
5799 conflict are automatically discarded, potentially allowing automatic
5800 uninstallation of older slots when appropriate.
5802 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5803 if highest_pkg is None:
5805 vardb = root_config.trees["vartree"].dbapi
5807 for cpv in vardb.match(atom):
5808 # don't mix new virtuals with old virtuals
5809 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5810 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5812 slots.add(highest_pkg.metadata["SLOT"])
5816 slots.remove(highest_pkg.metadata["SLOT"])
5819 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5820 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5821 if pkg is not None and \
5822 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5823 greedy_pkgs.append(pkg)
5826 if not blocker_lookahead:
5827 return [pkg.slot_atom for pkg in greedy_pkgs]
5830 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5831 for pkg in greedy_pkgs + [highest_pkg]:
5832 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5834 atoms = self._select_atoms(
5835 pkg.root, dep_str, pkg.use.enabled,
5836 parent=pkg, strict=True)
5837 except portage.exception.InvalidDependString:
5839 blocker_atoms = (x for x in atoms if x.blocker)
5840 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5842 if highest_pkg not in blockers:
5845 # filter packages with invalid deps
5846 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5848 # filter packages that conflict with highest_pkg
5849 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5850 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5851 blockers[pkg].findAtomForPackage(highest_pkg))]
5856 # If two packages conflict, discard the lower version.
5857 discard_pkgs = set()
5858 greedy_pkgs.sort(reverse=True)
5859 for i in xrange(len(greedy_pkgs) - 1):
5860 pkg1 = greedy_pkgs[i]
5861 if pkg1 in discard_pkgs:
5863 for j in xrange(i + 1, len(greedy_pkgs)):
5864 pkg2 = greedy_pkgs[j]
5865 if pkg2 in discard_pkgs:
5867 if blockers[pkg1].findAtomForPackage(pkg2) or \
5868 blockers[pkg2].findAtomForPackage(pkg1):
5870 discard_pkgs.add(pkg2)
5872 return [pkg.slot_atom for pkg in greedy_pkgs \
5873 if pkg not in discard_pkgs]
5875 def _select_atoms_from_graph(self, *pargs, **kwargs):
5877 Prefer atoms matching packages that have already been
5878 added to the graph or those that are installed and have
5879 not been scheduled for replacement.
5881 kwargs["trees"] = self._graph_trees
5882 return self._select_atoms_highest_available(*pargs, **kwargs)
5884 def _select_atoms_highest_available(self, root, depstring,
5885 myuse=None, parent=None, strict=True, trees=None, priority=None):
5886 """This will raise InvalidDependString if necessary. If trees is
5887 None then self._filtered_trees is used."""
5888 pkgsettings = self.pkgsettings[root]
5890 trees = self._filtered_trees
5891 if not getattr(priority, "buildtime", False):
5892 # The parent should only be passed to dep_check() for buildtime
5893 # dependencies since that's the only case when it's appropriate
5894 # to trigger the circular dependency avoidance code which uses it.
5895 # It's important not to trigger the same circular dependency
5896 # avoidance code for runtime dependencies since it's not needed
5897 # and it can promote an incorrect package choice.
5901 if parent is not None:
5902 trees[root]["parent"] = parent
5904 portage.dep._dep_check_strict = False
5905 mycheck = portage.dep_check(depstring, None,
5906 pkgsettings, myuse=myuse,
5907 myroot=root, trees=trees)
5909 if parent is not None:
5910 trees[root].pop("parent")
5911 portage.dep._dep_check_strict = True
5913 raise portage.exception.InvalidDependString(mycheck[1])
5914 selected_atoms = mycheck[1]
5915 return selected_atoms
5917 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5918 atom = portage.dep.Atom(atom)
5919 atom_set = InternalPackageSet(initial_atoms=(atom,))
5920 atom_without_use = atom
5922 atom_without_use = portage.dep.remove_slot(atom)
5924 atom_without_use += ":" + atom.slot
5925 atom_without_use = portage.dep.Atom(atom_without_use)
5926 xinfo = '"%s"' % atom
5929 # Discard null/ from failed cpv_expand category expansion.
5930 xinfo = xinfo.replace("null/", "")
5931 masked_packages = []
5933 missing_licenses = []
5934 have_eapi_mask = False
5935 pkgsettings = self.pkgsettings[root]
5936 implicit_iuse = pkgsettings._get_implicit_iuse()
5937 root_config = self.roots[root]
5938 portdb = self.roots[root].trees["porttree"].dbapi
5939 dbs = self._filtered_trees[root]["dbs"]
5940 for db, pkg_type, built, installed, db_keys in dbs:
5944 if hasattr(db, "xmatch"):
5945 cpv_list = db.xmatch("match-all", atom_without_use)
5947 cpv_list = db.match(atom_without_use)
5950 for cpv in cpv_list:
5951 metadata, mreasons = get_mask_info(root_config, cpv,
5952 pkgsettings, db, pkg_type, built, installed, db_keys)
5953 if metadata is not None:
5954 pkg = Package(built=built, cpv=cpv,
5955 installed=installed, metadata=metadata,
5956 root_config=root_config)
5957 if pkg.cp != atom.cp:
5958 # A cpv can be returned from dbapi.match() as an
5959 # old-style virtual match even in cases when the
5960 # package does not actually PROVIDE the virtual.
5961 # Filter out any such false matches here.
5962 if not atom_set.findAtomForPackage(pkg):
5964 if atom.use and not mreasons:
5965 missing_use.append(pkg)
5967 masked_packages.append(
5968 (root_config, pkgsettings, cpv, metadata, mreasons))
5970 missing_use_reasons = []
5971 missing_iuse_reasons = []
5972 for pkg in missing_use:
5973 use = pkg.use.enabled
5974 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5975 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5977 for x in atom.use.required:
5978 if iuse_re.match(x) is None:
5979 missing_iuse.append(x)
5982 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5983 missing_iuse_reasons.append((pkg, mreasons))
5985 need_enable = sorted(atom.use.enabled.difference(use))
5986 need_disable = sorted(atom.use.disabled.intersection(use))
5987 if need_enable or need_disable:
5989 changes.extend(colorize("red", "+" + x) \
5990 for x in need_enable)
5991 changes.extend(colorize("blue", "-" + x) \
5992 for x in need_disable)
5993 mreasons.append("Change USE: %s" % " ".join(changes))
5994 missing_use_reasons.append((pkg, mreasons))
5996 if missing_iuse_reasons and not missing_use_reasons:
5997 missing_use_reasons = missing_iuse_reasons
5998 elif missing_use_reasons:
5999 # Only show the latest version.
6000 del missing_use_reasons[1:]
6002 if missing_use_reasons:
6003 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6004 print "!!! One of the following packages is required to complete your request:"
6005 for pkg, mreasons in missing_use_reasons:
6006 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6008 elif masked_packages:
6010 colorize("BAD", "All ebuilds that could satisfy ") + \
6011 colorize("INFORM", xinfo) + \
6012 colorize("BAD", " have been masked.")
6013 print "!!! One of the following masked packages is required to complete your request:"
6014 have_eapi_mask = show_masked_packages(masked_packages)
6017 msg = ("The current version of portage supports " + \
6018 "EAPI '%s'. You must upgrade to a newer version" + \
6019 " of portage before EAPI masked packages can" + \
6020 " be installed.") % portage.const.EAPI
6021 from textwrap import wrap
6022 for line in wrap(msg, 75):
6027 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6029 # Show parent nodes and the argument that pulled them in.
6030 traversed_nodes = set()
6033 while node is not None:
6034 traversed_nodes.add(node)
6035 msg.append('(dependency required by "%s" [%s])' % \
6036 (colorize('INFORM', str(node.cpv)), node.type_name))
6037 # When traversing to parents, prefer arguments over packages
6038 # since arguments are root nodes. Never traverse the same
6039 # package twice, in order to prevent an infinite loop.
6040 selected_parent = None
6041 for parent in self.digraph.parent_nodes(node):
6042 if isinstance(parent, DependencyArg):
6043 msg.append('(dependency required by "%s" [argument])' % \
6044 (colorize('INFORM', str(parent))))
6045 selected_parent = None
6047 if parent not in traversed_nodes:
6048 selected_parent = parent
6049 node = selected_parent
6055 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6056 cache_key = (root, atom, onlydeps)
6057 ret = self._highest_pkg_cache.get(cache_key)
6060 if pkg and not existing:
6061 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6062 if existing and existing == pkg:
6063 # Update the cache to reflect that the
6064 # package has been added to the graph.
6066 self._highest_pkg_cache[cache_key] = ret
6068 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6069 self._highest_pkg_cache[cache_key] = ret
6072 settings = pkg.root_config.settings
6073 if visible(settings, pkg) and not (pkg.installed and \
6074 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6075 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6078 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6079 root_config = self.roots[root]
6080 pkgsettings = self.pkgsettings[root]
6081 dbs = self._filtered_trees[root]["dbs"]
6082 vardb = self.roots[root].trees["vartree"].dbapi
6083 portdb = self.roots[root].trees["porttree"].dbapi
6084 # List of acceptable packages, ordered by type preference.
6085 matched_packages = []
6086 highest_version = None
6087 if not isinstance(atom, portage.dep.Atom):
6088 atom = portage.dep.Atom(atom)
6090 atom_set = InternalPackageSet(initial_atoms=(atom,))
6091 existing_node = None
6093 usepkgonly = "--usepkgonly" in self.myopts
6094 empty = "empty" in self.myparams
6095 selective = "selective" in self.myparams
6097 noreplace = "--noreplace" in self.myopts
6098 # Behavior of the "selective" parameter depends on
6099 # whether or not a package matches an argument atom.
6100 # If an installed package provides an old-style
6101 # virtual that is no longer provided by an available
6102 # package, the installed package may match an argument
6103 # atom even though none of the available packages do.
6104 # Therefore, "selective" logic does not consider
6105 # whether or not an installed package matches an
6106 # argument atom. It only considers whether or not
6107 # available packages match argument atoms, which is
6108 # represented by the found_available_arg flag.
6109 found_available_arg = False
6110 for find_existing_node in True, False:
6113 for db, pkg_type, built, installed, db_keys in dbs:
6116 if installed and not find_existing_node:
6117 want_reinstall = reinstall or empty or \
6118 (found_available_arg and not selective)
6119 if want_reinstall and matched_packages:
6121 if hasattr(db, "xmatch"):
6122 cpv_list = db.xmatch("match-all", atom)
6124 cpv_list = db.match(atom)
6126 # USE=multislot can make an installed package appear as if
6127 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6128 # won't do any good as long as USE=multislot is enabled since
6129 # the newly built package still won't have the expected slot.
6130 # Therefore, assume that such SLOT dependencies are already
6131 # satisfied rather than forcing a rebuild.
6132 if installed and not cpv_list and atom.slot:
6133 for cpv in db.match(atom.cp):
6134 slot_available = False
6135 for other_db, other_type, other_built, \
6136 other_installed, other_keys in dbs:
6139 other_db.aux_get(cpv, ["SLOT"])[0]:
6140 slot_available = True
6144 if not slot_available:
6146 inst_pkg = self._pkg(cpv, "installed",
6147 root_config, installed=installed)
6148 # Remove the slot from the atom and verify that
6149 # the package matches the resulting atom.
6150 atom_without_slot = portage.dep.remove_slot(atom)
6152 atom_without_slot += str(atom.use)
6153 atom_without_slot = portage.dep.Atom(atom_without_slot)
6154 if portage.match_from_list(
6155 atom_without_slot, [inst_pkg]):
6156 cpv_list = [inst_pkg.cpv]
6161 pkg_status = "merge"
6162 if installed or onlydeps:
6163 pkg_status = "nomerge"
6166 for cpv in cpv_list:
6167 # Make --noreplace take precedence over --newuse.
6168 if not installed and noreplace and \
6169 cpv in vardb.match(atom):
6170 # If the installed version is masked, it may
6171 # be necessary to look at lower versions,
6172 # in case there is a visible downgrade.
6174 reinstall_for_flags = None
6175 cache_key = (pkg_type, root, cpv, pkg_status)
6176 calculated_use = True
6177 pkg = self._pkg_cache.get(cache_key)
6179 calculated_use = False
6181 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6184 pkg = Package(built=built, cpv=cpv,
6185 installed=installed, metadata=metadata,
6186 onlydeps=onlydeps, root_config=root_config,
6188 metadata = pkg.metadata
6189 if not built and ("?" in metadata["LICENSE"] or \
6190 "?" in metadata["PROVIDE"]):
6191 # This is avoided whenever possible because
6192 # it's expensive. It only needs to be done here
6193 # if it has an effect on visibility.
6194 pkgsettings.setcpv(pkg)
6195 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6196 calculated_use = True
6197 self._pkg_cache[pkg] = pkg
6199 if not installed or (built and matched_packages):
6200 # Only enforce visibility on installed packages
6201 # if there is at least one other visible package
6202 # available. By filtering installed masked packages
6203 # here, packages that have been masked since they
6204 # were installed can be automatically downgraded
6205 # to an unmasked version.
6207 if not visible(pkgsettings, pkg):
6209 except portage.exception.InvalidDependString:
6213 # Enable upgrade or downgrade to a version
6214 # with visible KEYWORDS when the installed
6215 # version is masked by KEYWORDS, but never
6216 # reinstall the same exact version only due
6217 # to a KEYWORDS mask.
6218 if built and matched_packages:
6220 different_version = None
6221 for avail_pkg in matched_packages:
6222 if not portage.dep.cpvequal(
6223 pkg.cpv, avail_pkg.cpv):
6224 different_version = avail_pkg
6226 if different_version is not None:
6229 pkgsettings._getMissingKeywords(
6230 pkg.cpv, pkg.metadata):
6233 # If the ebuild no longer exists or it's
6234 # keywords have been dropped, reject built
6235 # instances (installed or binary).
6236 # If --usepkgonly is enabled, assume that
6237 # the ebuild status should be ignored.
6241 pkg.cpv, "ebuild", root_config)
6242 except portage.exception.PackageNotFound:
6245 if not visible(pkgsettings, pkg_eb):
6248 if not pkg.built and not calculated_use:
6249 # This is avoided whenever possible because
6251 pkgsettings.setcpv(pkg)
6252 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6254 if pkg.cp != atom.cp:
6255 # A cpv can be returned from dbapi.match() as an
6256 # old-style virtual match even in cases when the
6257 # package does not actually PROVIDE the virtual.
6258 # Filter out any such false matches here.
6259 if not atom_set.findAtomForPackage(pkg):
6263 if root == self.target_root:
6265 # Ebuild USE must have been calculated prior
6266 # to this point, in case atoms have USE deps.
6267 myarg = self._iter_atoms_for_pkg(pkg).next()
6268 except StopIteration:
6270 except portage.exception.InvalidDependString:
6272 # masked by corruption
6274 if not installed and myarg:
6275 found_available_arg = True
6277 if atom.use and not pkg.built:
6278 use = pkg.use.enabled
6279 if atom.use.enabled.difference(use):
6281 if atom.use.disabled.intersection(use):
6283 if pkg.cp == atom_cp:
6284 if highest_version is None:
6285 highest_version = pkg
6286 elif pkg > highest_version:
6287 highest_version = pkg
6288 # At this point, we've found the highest visible
6289 # match from the current repo. Any lower versions
6290 # from this repo are ignored, so this so the loop
6291 # will always end with a break statement below
6293 if find_existing_node:
6294 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6297 if portage.dep.match_from_list(atom, [e_pkg]):
6298 if highest_version and \
6299 e_pkg.cp == atom_cp and \
6300 e_pkg < highest_version and \
6301 e_pkg.slot_atom != highest_version.slot_atom:
6302 # There is a higher version available in a
6303 # different slot, so this existing node is
6307 matched_packages.append(e_pkg)
6308 existing_node = e_pkg
6310 # Compare built package to current config and
6311 # reject the built package if necessary.
6312 if built and not installed and \
6313 ("--newuse" in self.myopts or \
6314 "--reinstall" in self.myopts):
6315 iuses = pkg.iuse.all
6316 old_use = pkg.use.enabled
6318 pkgsettings.setcpv(myeb)
6320 pkgsettings.setcpv(pkg)
6321 now_use = pkgsettings["PORTAGE_USE"].split()
6322 forced_flags = set()
6323 forced_flags.update(pkgsettings.useforce)
6324 forced_flags.update(pkgsettings.usemask)
6326 if myeb and not usepkgonly:
6327 cur_iuse = myeb.iuse.all
6328 if self._reinstall_for_flags(forced_flags,
6332 # Compare current config to installed package
6333 # and do not reinstall if possible.
6334 if not installed and \
6335 ("--newuse" in self.myopts or \
6336 "--reinstall" in self.myopts) and \
6337 cpv in vardb.match(atom):
6338 pkgsettings.setcpv(pkg)
6339 forced_flags = set()
6340 forced_flags.update(pkgsettings.useforce)
6341 forced_flags.update(pkgsettings.usemask)
6342 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6343 old_iuse = set(filter_iuse_defaults(
6344 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6345 cur_use = pkgsettings["PORTAGE_USE"].split()
6346 cur_iuse = pkg.iuse.all
6347 reinstall_for_flags = \
6348 self._reinstall_for_flags(
6349 forced_flags, old_use, old_iuse,
6351 if reinstall_for_flags:
6355 matched_packages.append(pkg)
6356 if reinstall_for_flags:
6357 self._reinstall_nodes[pkg] = \
6361 if not matched_packages:
6364 if "--debug" in self.myopts:
6365 for pkg in matched_packages:
6366 portage.writemsg("%s %s\n" % \
6367 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6369 # Filter out any old-style virtual matches if they are
6370 # mixed with new-style virtual matches.
6371 cp = portage.dep_getkey(atom)
6372 if len(matched_packages) > 1 and \
6373 "virtual" == portage.catsplit(cp)[0]:
6374 for pkg in matched_packages:
6377 # Got a new-style virtual, so filter
6378 # out any old-style virtuals.
6379 matched_packages = [pkg for pkg in matched_packages \
6383 if len(matched_packages) > 1:
6384 bestmatch = portage.best(
6385 [pkg.cpv for pkg in matched_packages])
6386 matched_packages = [pkg for pkg in matched_packages \
6387 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6389 # ordered by type preference ("ebuild" type is the last resort)
6390 return matched_packages[-1], existing_node
6392 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6394 Select packages that have already been added to the graph or
6395 those that are installed and have not been scheduled for
6398 graph_db = self._graph_trees[root]["porttree"].dbapi
6399 matches = graph_db.match_pkgs(atom)
6402 pkg = matches[-1] # highest match
6403 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6404 return pkg, in_graph
6406 def _complete_graph(self):
6408 Add any deep dependencies of required sets (args, system, world) that
6409 have not been pulled into the graph yet. This ensures that the graph
6410 is consistent such that initially satisfied deep dependencies are not
6411 broken in the new graph. Initially unsatisfied dependencies are
6412 irrelevant since we only want to avoid breaking dependencies that are
6415 Since this method can consume enough time to disturb users, it is
6416 currently only enabled by the --complete-graph option.
6418 if "--buildpkgonly" in self.myopts or \
6419 "recurse" not in self.myparams:
6422 if "complete" not in self.myparams:
6423 # Skip this to avoid consuming enough time to disturb users.
6426 # Put the depgraph into a mode that causes it to only
6427 # select packages that have already been added to the
6428 # graph or those that are installed and have not been
6429 # scheduled for replacement. Also, toggle the "deep"
6430 # parameter so that all dependencies are traversed and
6432 self._select_atoms = self._select_atoms_from_graph
6433 self._select_package = self._select_pkg_from_graph
6434 already_deep = "deep" in self.myparams
6435 if not already_deep:
6436 self.myparams.add("deep")
6438 for root in self.roots:
6439 required_set_names = self._required_set_names.copy()
6440 if root == self.target_root and \
6441 (already_deep or "empty" in self.myparams):
6442 required_set_names.difference_update(self._sets)
6443 if not required_set_names and not self._ignored_deps:
6445 root_config = self.roots[root]
6446 setconfig = root_config.setconfig
6448 # Reuse existing SetArg instances when available.
6449 for arg in self.digraph.root_nodes():
6450 if not isinstance(arg, SetArg):
6452 if arg.root_config != root_config:
6454 if arg.name in required_set_names:
6456 required_set_names.remove(arg.name)
6457 # Create new SetArg instances only when necessary.
6458 for s in required_set_names:
6459 expanded_set = InternalPackageSet(
6460 initial_atoms=setconfig.getSetAtoms(s))
6461 atom = SETPREFIX + s
6462 args.append(SetArg(arg=atom, set=expanded_set,
6463 root_config=root_config))
6464 vardb = root_config.trees["vartree"].dbapi
6466 for atom in arg.set:
6467 self._dep_stack.append(
6468 Dependency(atom=atom, root=root, parent=arg))
6469 if self._ignored_deps:
6470 self._dep_stack.extend(self._ignored_deps)
6471 self._ignored_deps = []
6472 if not self._create_graph(allow_unsatisfied=True):
6474 # Check the unsatisfied deps to see if any initially satisfied deps
6475 # will become unsatisfied due to an upgrade. Initially unsatisfied
6476 # deps are irrelevant since we only want to avoid breaking deps
6477 # that are initially satisfied.
6478 while self._unsatisfied_deps:
6479 dep = self._unsatisfied_deps.pop()
6480 matches = vardb.match_pkgs(dep.atom)
6482 self._initially_unsatisfied_deps.append(dep)
6484 # An scheduled installation broke a deep dependency.
6485 # Add the installed package to the graph so that it
6486 # will be appropriately reported as a slot collision
6487 # (possibly solvable via backtracking).
6488 pkg = matches[-1] # highest match
6489 if not self._add_pkg(pkg, dep):
6491 if not self._create_graph(allow_unsatisfied=True):
6495 def _pkg(self, cpv, type_name, root_config, installed=False):
6497 Get a package instance from the cache, or create a new
6498 one if necessary. Raises KeyError from aux_get if it
6499 failures for some reason (package does not exist or is
6504 operation = "nomerge"
6505 pkg = self._pkg_cache.get(
6506 (type_name, root_config.root, cpv, operation))
6508 tree_type = self.pkg_tree_map[type_name]
6509 db = root_config.trees[tree_type].dbapi
6510 db_keys = list(self._trees_orig[root_config.root][
6511 tree_type].dbapi._aux_cache_keys)
6513 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6515 raise portage.exception.PackageNotFound(cpv)
6516 pkg = Package(cpv=cpv, metadata=metadata,
6517 root_config=root_config, installed=installed)
6518 if type_name == "ebuild":
6519 settings = self.pkgsettings[root_config.root]
6520 settings.setcpv(pkg)
6521 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6522 self._pkg_cache[pkg] = pkg
6525 def validate_blockers(self):
6526 """Remove any blockers from the digraph that do not match any of the
6527 packages within the graph. If necessary, create hard deps to ensure
6528 correct merge order such that mutually blocking packages are never
6529 installed simultaneously."""
6531 if "--buildpkgonly" in self.myopts or \
6532 "--nodeps" in self.myopts:
6535 #if "deep" in self.myparams:
6537 # Pull in blockers from all installed packages that haven't already
6538 # been pulled into the depgraph. This is not enabled by default
6539 # due to the performance penalty that is incurred by all the
6540 # additional dep_check calls that are required.
6542 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6543 for myroot in self.trees:
6544 vardb = self.trees[myroot]["vartree"].dbapi
6545 portdb = self.trees[myroot]["porttree"].dbapi
6546 pkgsettings = self.pkgsettings[myroot]
6547 final_db = self.mydbapi[myroot]
6549 blocker_cache = BlockerCache(myroot, vardb)
6550 stale_cache = set(blocker_cache)
6553 stale_cache.discard(cpv)
6554 pkg_in_graph = self.digraph.contains(pkg)
6556 # Check for masked installed packages. Only warn about
6557 # packages that are in the graph in order to avoid warning
6558 # about those that will be automatically uninstalled during
6559 # the merge process or by --depclean.
6561 if pkg_in_graph and not visible(pkgsettings, pkg):
6562 self._masked_installed.add(pkg)
6564 blocker_atoms = None
6570 self._blocker_parents.child_nodes(pkg))
6575 self._irrelevant_blockers.child_nodes(pkg))
6578 if blockers is not None:
6579 blockers = set(str(blocker.atom) \
6580 for blocker in blockers)
6582 # If this node has any blockers, create a "nomerge"
6583 # node for it so that they can be enforced.
6584 self.spinner.update()
6585 blocker_data = blocker_cache.get(cpv)
6586 if blocker_data is not None and \
6587 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6590 # If blocker data from the graph is available, use
6591 # it to validate the cache and update the cache if
6593 if blocker_data is not None and \
6594 blockers is not None:
6595 if not blockers.symmetric_difference(
6596 blocker_data.atoms):
6600 if blocker_data is None and \
6601 blockers is not None:
6602 # Re-use the blockers from the graph.
6603 blocker_atoms = sorted(blockers)
6604 counter = long(pkg.metadata["COUNTER"])
6606 blocker_cache.BlockerData(counter, blocker_atoms)
6607 blocker_cache[pkg.cpv] = blocker_data
6611 blocker_atoms = blocker_data.atoms
6613 # Use aux_get() to trigger FakeVartree global
6614 # updates on *DEPEND when appropriate.
6615 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6616 # It is crucial to pass in final_db here in order to
6617 # optimize dep_check calls by eliminating atoms via
6618 # dep_wordreduce and dep_eval calls.
6620 portage.dep._dep_check_strict = False
6622 success, atoms = portage.dep_check(depstr,
6623 final_db, pkgsettings, myuse=pkg.use.enabled,
6624 trees=self._graph_trees, myroot=myroot)
6625 except Exception, e:
6626 if isinstance(e, SystemExit):
6628 # This is helpful, for example, if a ValueError
6629 # is thrown from cpv_expand due to multiple
6630 # matches (this can happen if an atom lacks a
6632 show_invalid_depstring_notice(
6633 pkg, depstr, str(e))
6637 portage.dep._dep_check_strict = True
6639 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6640 if replacement_pkg and \
6641 replacement_pkg[0].operation == "merge":
6642 # This package is being replaced anyway, so
6643 # ignore invalid dependencies so as not to
6644 # annoy the user too much (otherwise they'd be
6645 # forced to manually unmerge it first).
6647 show_invalid_depstring_notice(pkg, depstr, atoms)
6649 blocker_atoms = [myatom for myatom in atoms \
6650 if myatom.startswith("!")]
6651 blocker_atoms.sort()
6652 counter = long(pkg.metadata["COUNTER"])
6653 blocker_cache[cpv] = \
6654 blocker_cache.BlockerData(counter, blocker_atoms)
6657 for atom in blocker_atoms:
6658 blocker = Blocker(atom=portage.dep.Atom(atom),
6659 eapi=pkg.metadata["EAPI"], root=myroot)
6660 self._blocker_parents.add(blocker, pkg)
6661 except portage.exception.InvalidAtom, e:
6662 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6663 show_invalid_depstring_notice(
6664 pkg, depstr, "Invalid Atom: %s" % (e,))
6666 for cpv in stale_cache:
6667 del blocker_cache[cpv]
6668 blocker_cache.flush()
6671 # Discard any "uninstall" tasks scheduled by previous calls
6672 # to this method, since those tasks may not make sense given
6673 # the current graph state.
6674 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6675 if previous_uninstall_tasks:
6676 self._blocker_uninstalls = digraph()
6677 self.digraph.difference_update(previous_uninstall_tasks)
6679 for blocker in self._blocker_parents.leaf_nodes():
6680 self.spinner.update()
6681 root_config = self.roots[blocker.root]
6682 virtuals = root_config.settings.getvirtuals()
6683 myroot = blocker.root
6684 initial_db = self.trees[myroot]["vartree"].dbapi
6685 final_db = self.mydbapi[myroot]
6687 provider_virtual = False
6688 if blocker.cp in virtuals and \
6689 not self._have_new_virt(blocker.root, blocker.cp):
6690 provider_virtual = True
6692 if provider_virtual:
6694 for provider_entry in virtuals[blocker.cp]:
6696 portage.dep_getkey(provider_entry)
6697 atoms.append(blocker.atom.replace(
6698 blocker.cp, provider_cp))
6700 atoms = [blocker.atom]
6702 blocked_initial = []
6704 blocked_initial.extend(initial_db.match_pkgs(atom))
6708 blocked_final.extend(final_db.match_pkgs(atom))
6710 if not blocked_initial and not blocked_final:
6711 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6712 self._blocker_parents.remove(blocker)
6713 # Discard any parents that don't have any more blockers.
6714 for pkg in parent_pkgs:
6715 self._irrelevant_blockers.add(blocker, pkg)
6716 if not self._blocker_parents.child_nodes(pkg):
6717 self._blocker_parents.remove(pkg)
6719 for parent in self._blocker_parents.parent_nodes(blocker):
6720 unresolved_blocks = False
6721 depends_on_order = set()
6722 for pkg in blocked_initial:
6723 if pkg.slot_atom == parent.slot_atom:
6724 # TODO: Support blocks within slots in cases where it
6725 # might make sense. For example, a new version might
6726 # require that the old version be uninstalled at build
6729 if parent.installed:
6730 # Two currently installed packages conflict with
6731 # eachother. Ignore this case since the damage
6732 # is already done and this would be likely to
6733 # confuse users if displayed like a normal blocker.
6736 self._blocked_pkgs.add(pkg, blocker)
6738 if parent.operation == "merge":
6739 # Maybe the blocked package can be replaced or simply
6740 # unmerged to resolve this block.
6741 depends_on_order.add((pkg, parent))
6743 # None of the above blocker resolutions techniques apply,
6744 # so apparently this one is unresolvable.
6745 unresolved_blocks = True
6746 for pkg in blocked_final:
6747 if pkg.slot_atom == parent.slot_atom:
6748 # TODO: Support blocks within slots.
6750 if parent.operation == "nomerge" and \
6751 pkg.operation == "nomerge":
6752 # This blocker will be handled the next time that a
6753 # merge of either package is triggered.
6756 self._blocked_pkgs.add(pkg, blocker)
6758 # Maybe the blocking package can be
6759 # unmerged to resolve this block.
6760 if parent.operation == "merge" and pkg.installed:
6761 depends_on_order.add((pkg, parent))
6763 elif parent.operation == "nomerge":
6764 depends_on_order.add((parent, pkg))
6766 # None of the above blocker resolutions techniques apply,
6767 # so apparently this one is unresolvable.
6768 unresolved_blocks = True
6770 # Make sure we don't unmerge any package that have been pulled
6772 if not unresolved_blocks and depends_on_order:
6773 for inst_pkg, inst_task in depends_on_order:
6774 if self.digraph.contains(inst_pkg) and \
6775 self.digraph.parent_nodes(inst_pkg):
6776 unresolved_blocks = True
6779 if not unresolved_blocks and depends_on_order:
6780 for inst_pkg, inst_task in depends_on_order:
6781 uninst_task = Package(built=inst_pkg.built,
6782 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6783 metadata=inst_pkg.metadata,
6784 operation="uninstall",
6785 root_config=inst_pkg.root_config,
6786 type_name=inst_pkg.type_name)
6787 self._pkg_cache[uninst_task] = uninst_task
6788 # Enforce correct merge order with a hard dep.
6789 self.digraph.addnode(uninst_task, inst_task,
6790 priority=BlockerDepPriority.instance)
6791 # Count references to this blocker so that it can be
6792 # invalidated after nodes referencing it have been
6794 self._blocker_uninstalls.addnode(uninst_task, blocker)
6795 if not unresolved_blocks and not depends_on_order:
6796 self._irrelevant_blockers.add(blocker, parent)
6797 self._blocker_parents.remove_edge(blocker, parent)
6798 if not self._blocker_parents.parent_nodes(blocker):
6799 self._blocker_parents.remove(blocker)
6800 if not self._blocker_parents.child_nodes(parent):
6801 self._blocker_parents.remove(parent)
6802 if unresolved_blocks:
6803 self._unsolvable_blockers.add(blocker, parent)
6807 def _accept_blocker_conflicts(self):
6809 for x in ("--buildpkgonly", "--fetchonly",
6810 "--fetch-all-uri", "--nodeps"):
6811 if x in self.myopts:
6816 def _merge_order_bias(self, mygraph):
6818 For optimal leaf node selection, promote deep system runtime deps and
6819 order nodes from highest to lowest overall reference count.
6823 for node in mygraph.order:
6824 node_info[node] = len(mygraph.parent_nodes(node))
6825 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6827 def cmp_merge_preference(node1, node2):
6829 if node1.operation == 'uninstall':
6830 if node2.operation == 'uninstall':
6834 if node2.operation == 'uninstall':
6835 if node1.operation == 'uninstall':
6839 node1_sys = node1 in deep_system_deps
6840 node2_sys = node2 in deep_system_deps
6841 if node1_sys != node2_sys:
6846 return node_info[node2] - node_info[node1]
6848 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6850 def altlist(self, reversed=False):
6852 while self._serialized_tasks_cache is None:
6853 self._resolve_conflicts()
6855 self._serialized_tasks_cache, self._scheduler_graph = \
6856 self._serialize_tasks()
6857 except self._serialize_tasks_retry:
6860 retlist = self._serialized_tasks_cache[:]
6865 def schedulerGraph(self):
6867 The scheduler graph is identical to the normal one except that
6868 uninstall edges are reversed in specific cases that require
6869 conflicting packages to be temporarily installed simultaneously.
6870 This is intended for use by the Scheduler in it's parallelization
6871 logic. It ensures that temporary simultaneous installation of
6872 conflicting packages is avoided when appropriate (especially for
6873 !!atom blockers), but allowed in specific cases that require it.
6875 Note that this method calls break_refs() which alters the state of
6876 internal Package instances such that this depgraph instance should
6877 not be used to perform any more calculations.
6879 if self._scheduler_graph is None:
6881 self.break_refs(self._scheduler_graph.order)
6882 return self._scheduler_graph
6884 def break_refs(self, nodes):
6886 Take a mergelist like that returned from self.altlist() and
6887 break any references that lead back to the depgraph. This is
6888 useful if you want to hold references to packages without
6889 also holding the depgraph on the heap.
6892 if hasattr(node, "root_config"):
6893 # The FakeVartree references the _package_cache which
6894 # references the depgraph. So that Package instances don't
6895 # hold the depgraph and FakeVartree on the heap, replace
6896 # the RootConfig that references the FakeVartree with the
6897 # original RootConfig instance which references the actual
6899 node.root_config = \
6900 self._trees_orig[node.root_config.root]["root_config"]
6902 def _resolve_conflicts(self):
6903 if not self._complete_graph():
6904 raise self._unknown_internal_error()
6906 if not self.validate_blockers():
6907 raise self._unknown_internal_error()
6909 if self._slot_collision_info:
6910 self._process_slot_conflicts()
6912 def _serialize_tasks(self):
6914 if "--debug" in self.myopts:
6915 writemsg("\ndigraph:\n\n", noiselevel=-1)
6916 self.digraph.debug_print()
6917 writemsg("\n", noiselevel=-1)
6919 scheduler_graph = self.digraph.copy()
6920 mygraph=self.digraph.copy()
6921 # Prune "nomerge" root nodes if nothing depends on them, since
6922 # otherwise they slow down merge order calculation. Don't remove
6923 # non-root nodes since they help optimize merge order in some cases
6924 # such as revdep-rebuild.
6925 removed_nodes = set()
6927 for node in mygraph.root_nodes():
6928 if not isinstance(node, Package) or \
6929 node.installed or node.onlydeps:
6930 removed_nodes.add(node)
6932 self.spinner.update()
6933 mygraph.difference_update(removed_nodes)
6934 if not removed_nodes:
6936 removed_nodes.clear()
6937 self._merge_order_bias(mygraph)
6938 def cmp_circular_bias(n1, n2):
6940 RDEPEND is stronger than PDEPEND and this function
6941 measures such a strength bias within a circular
6942 dependency relationship.
6944 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6945 ignore_priority=priority_range.ignore_medium_soft)
6946 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6947 ignore_priority=priority_range.ignore_medium_soft)
6948 if n1_n2_medium == n2_n1_medium:
6953 myblocker_uninstalls = self._blocker_uninstalls.copy()
6955 # Contains uninstall tasks that have been scheduled to
6956 # occur after overlapping blockers have been installed.
6957 scheduled_uninstalls = set()
6958 # Contains any Uninstall tasks that have been ignored
6959 # in order to avoid the circular deps code path. These
6960 # correspond to blocker conflicts that could not be
6962 ignored_uninstall_tasks = set()
6963 have_uninstall_task = False
6964 complete = "complete" in self.myparams
6967 def get_nodes(**kwargs):
6969 Returns leaf nodes excluding Uninstall instances
6970 since those should be executed as late as possible.
6972 return [node for node in mygraph.leaf_nodes(**kwargs) \
6973 if isinstance(node, Package) and \
6974 (node.operation != "uninstall" or \
6975 node in scheduled_uninstalls)]
6977 # sys-apps/portage needs special treatment if ROOT="/"
6978 running_root = self._running_root.root
6979 from portage.const import PORTAGE_PACKAGE_ATOM
6980 runtime_deps = InternalPackageSet(
6981 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6982 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6983 PORTAGE_PACKAGE_ATOM)
6984 replacement_portage = self.mydbapi[running_root].match_pkgs(
6985 PORTAGE_PACKAGE_ATOM)
6988 running_portage = running_portage[0]
6990 running_portage = None
6992 if replacement_portage:
6993 replacement_portage = replacement_portage[0]
6995 replacement_portage = None
6997 if replacement_portage == running_portage:
6998 replacement_portage = None
7000 if replacement_portage is not None:
7001 # update from running_portage to replacement_portage asap
7002 asap_nodes.append(replacement_portage)
7004 if running_portage is not None:
7006 portage_rdepend = self._select_atoms_highest_available(
7007 running_root, running_portage.metadata["RDEPEND"],
7008 myuse=running_portage.use.enabled,
7009 parent=running_portage, strict=False)
7010 except portage.exception.InvalidDependString, e:
7011 portage.writemsg("!!! Invalid RDEPEND in " + \
7012 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7013 (running_root, running_portage.cpv, e), noiselevel=-1)
7015 portage_rdepend = []
7016 runtime_deps.update(atom for atom in portage_rdepend \
7017 if not atom.startswith("!"))
7019 def gather_deps(ignore_priority, mergeable_nodes,
7020 selected_nodes, node):
7022 Recursively gather a group of nodes that RDEPEND on
7023 eachother. This ensures that they are merged as a group
7024 and get their RDEPENDs satisfied as soon as possible.
7026 if node in selected_nodes:
7028 if node not in mergeable_nodes:
7030 if node == replacement_portage and \
7031 mygraph.child_nodes(node,
7032 ignore_priority=priority_range.ignore_medium_soft):
7033 # Make sure that portage always has all of it's
7034 # RDEPENDs installed first.
7036 selected_nodes.add(node)
7037 for child in mygraph.child_nodes(node,
7038 ignore_priority=ignore_priority):
7039 if not gather_deps(ignore_priority,
7040 mergeable_nodes, selected_nodes, child):
7044 def ignore_uninst_or_med(priority):
7045 if priority is BlockerDepPriority.instance:
7047 return priority_range.ignore_medium(priority)
7049 def ignore_uninst_or_med_soft(priority):
7050 if priority is BlockerDepPriority.instance:
7052 return priority_range.ignore_medium_soft(priority)
7054 tree_mode = "--tree" in self.myopts
7055 # Tracks whether or not the current iteration should prefer asap_nodes
7056 # if available. This is set to False when the previous iteration
7057 # failed to select any nodes. It is reset whenever nodes are
7058 # successfully selected.
7061 # Controls whether or not the current iteration should drop edges that
7062 # are "satisfied" by installed packages, in order to solve circular
7063 # dependencies. The deep runtime dependencies of installed packages are
7064 # not checked in this case (bug #199856), so it must be avoided
7065 # whenever possible.
7066 drop_satisfied = False
7068 # State of variables for successive iterations that loosen the
7069 # criteria for node selection.
7071 # iteration prefer_asap drop_satisfied
7076 # If no nodes are selected on the last iteration, it is due to
7077 # unresolved blockers or circular dependencies.
7079 while not mygraph.empty():
7080 self.spinner.update()
7081 selected_nodes = None
7082 ignore_priority = None
7083 if drop_satisfied or (prefer_asap and asap_nodes):
7084 priority_range = DepPrioritySatisfiedRange
7086 priority_range = DepPriorityNormalRange
7087 if prefer_asap and asap_nodes:
7088 # ASAP nodes are merged before their soft deps. Go ahead and
7089 # select root nodes here if necessary, since it's typical for
7090 # the parent to have been removed from the graph already.
7091 asap_nodes = [node for node in asap_nodes \
7092 if mygraph.contains(node)]
7093 for node in asap_nodes:
7094 if not mygraph.child_nodes(node,
7095 ignore_priority=priority_range.ignore_soft):
7096 selected_nodes = [node]
7097 asap_nodes.remove(node)
7099 if not selected_nodes and \
7100 not (prefer_asap and asap_nodes):
7101 for i in xrange(priority_range.NONE,
7102 priority_range.MEDIUM_SOFT + 1):
7103 ignore_priority = priority_range.ignore_priority[i]
7104 nodes = get_nodes(ignore_priority=ignore_priority)
7106 # If there is a mix of uninstall nodes with other
7107 # types, save the uninstall nodes for later since
7108 # sometimes a merge node will render an uninstall
7109 # node unnecessary (due to occupying the same slot),
7110 # and we want to avoid executing a separate uninstall
7111 # task in that case.
7113 good_uninstalls = []
7114 with_some_uninstalls_excluded = []
7116 if node.operation == "uninstall":
7117 slot_node = self.mydbapi[node.root
7118 ].match_pkgs(node.slot_atom)
7120 slot_node[0].operation == "merge":
7122 good_uninstalls.append(node)
7123 with_some_uninstalls_excluded.append(node)
7125 nodes = good_uninstalls
7126 elif with_some_uninstalls_excluded:
7127 nodes = with_some_uninstalls_excluded
7131 if ignore_priority is None and not tree_mode:
7132 # Greedily pop all of these nodes since no
7133 # relationship has been ignored. This optimization
7134 # destroys --tree output, so it's disabled in tree
7136 selected_nodes = nodes
7138 # For optimal merge order:
7139 # * Only pop one node.
7140 # * Removing a root node (node without a parent)
7141 # will not produce a leaf node, so avoid it.
7142 # * It's normal for a selected uninstall to be a
7143 # root node, so don't check them for parents.
7145 if node.operation == "uninstall" or \
7146 mygraph.parent_nodes(node):
7147 selected_nodes = [node]
7153 if not selected_nodes:
7154 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7156 mergeable_nodes = set(nodes)
7157 if prefer_asap and asap_nodes:
7159 for i in xrange(priority_range.SOFT,
7160 priority_range.MEDIUM_SOFT + 1):
7161 ignore_priority = priority_range.ignore_priority[i]
7163 if not mygraph.parent_nodes(node):
7165 selected_nodes = set()
7166 if gather_deps(ignore_priority,
7167 mergeable_nodes, selected_nodes, node):
7170 selected_nodes = None
7174 if prefer_asap and asap_nodes and not selected_nodes:
7175 # We failed to find any asap nodes to merge, so ignore
7176 # them for the next iteration.
7180 if selected_nodes and ignore_priority is not None:
7181 # Try to merge ignored medium_soft deps as soon as possible
7182 # if they're not satisfied by installed packages.
7183 for node in selected_nodes:
7184 children = set(mygraph.child_nodes(node))
7185 soft = children.difference(
7186 mygraph.child_nodes(node,
7187 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7188 medium_soft = children.difference(
7189 mygraph.child_nodes(node,
7191 DepPrioritySatisfiedRange.ignore_medium_soft))
7192 medium_soft.difference_update(soft)
7193 for child in medium_soft:
7194 if child in selected_nodes:
7196 if child in asap_nodes:
7198 asap_nodes.append(child)
7200 if selected_nodes and len(selected_nodes) > 1:
7201 if not isinstance(selected_nodes, list):
7202 selected_nodes = list(selected_nodes)
7203 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7205 if not selected_nodes and not myblocker_uninstalls.is_empty():
7206 # An Uninstall task needs to be executed in order to
7207 # avoid conflict if possible.
7210 priority_range = DepPrioritySatisfiedRange
7212 priority_range = DepPriorityNormalRange
7214 mergeable_nodes = get_nodes(
7215 ignore_priority=ignore_uninst_or_med)
7217 min_parent_deps = None
7219 for task in myblocker_uninstalls.leaf_nodes():
7220 # Do some sanity checks so that system or world packages
7221 # don't get uninstalled inappropriately here (only really
7222 # necessary when --complete-graph has not been enabled).
7224 if task in ignored_uninstall_tasks:
7227 if task in scheduled_uninstalls:
7228 # It's been scheduled but it hasn't
7229 # been executed yet due to dependence
7230 # on installation of blocking packages.
7233 root_config = self.roots[task.root]
7234 inst_pkg = self._pkg_cache[
7235 ("installed", task.root, task.cpv, "nomerge")]
7237 if self.digraph.contains(inst_pkg):
7240 forbid_overlap = False
7241 heuristic_overlap = False
7242 for blocker in myblocker_uninstalls.parent_nodes(task):
7243 if blocker.eapi in ("0", "1"):
7244 heuristic_overlap = True
7245 elif blocker.atom.blocker.overlap.forbid:
7246 forbid_overlap = True
7248 if forbid_overlap and running_root == task.root:
7251 if heuristic_overlap and running_root == task.root:
7252 # Never uninstall sys-apps/portage or it's essential
7253 # dependencies, except through replacement.
7255 runtime_dep_atoms = \
7256 list(runtime_deps.iterAtomsForPackage(task))
7257 except portage.exception.InvalidDependString, e:
7258 portage.writemsg("!!! Invalid PROVIDE in " + \
7259 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7260 (task.root, task.cpv, e), noiselevel=-1)
7264 # Don't uninstall a runtime dep if it appears
7265 # to be the only suitable one installed.
7267 vardb = root_config.trees["vartree"].dbapi
7268 for atom in runtime_dep_atoms:
7269 other_version = None
7270 for pkg in vardb.match_pkgs(atom):
7271 if pkg.cpv == task.cpv and \
7272 pkg.metadata["COUNTER"] == \
7273 task.metadata["COUNTER"]:
7277 if other_version is None:
7283 # For packages in the system set, don't take
7284 # any chances. If the conflict can't be resolved
7285 # by a normal replacement operation then abort.
7288 for atom in root_config.sets[
7289 "system"].iterAtomsForPackage(task):
7292 except portage.exception.InvalidDependString, e:
7293 portage.writemsg("!!! Invalid PROVIDE in " + \
7294 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7295 (task.root, task.cpv, e), noiselevel=-1)
7301 # Note that the world check isn't always
7302 # necessary since self._complete_graph() will
7303 # add all packages from the system and world sets to the
7304 # graph. This just allows unresolved conflicts to be
7305 # detected as early as possible, which makes it possible
7306 # to avoid calling self._complete_graph() when it is
7307 # unnecessary due to blockers triggering an abortion.
7309 # For packages in the world set, go ahead an uninstall
7310 # when necessary, as long as the atom will be satisfied
7311 # in the final state.
7312 graph_db = self.mydbapi[task.root]
7315 for atom in root_config.sets[
7316 "world"].iterAtomsForPackage(task):
7318 for pkg in graph_db.match_pkgs(atom):
7325 self._blocked_world_pkgs[inst_pkg] = atom
7327 except portage.exception.InvalidDependString, e:
7328 portage.writemsg("!!! Invalid PROVIDE in " + \
7329 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7330 (task.root, task.cpv, e), noiselevel=-1)
7336 # Check the deps of parent nodes to ensure that
7337 # the chosen task produces a leaf node. Maybe
7338 # this can be optimized some more to make the
7339 # best possible choice, but the current algorithm
7340 # is simple and should be near optimal for most
7342 mergeable_parent = False
7344 for parent in mygraph.parent_nodes(task):
7345 parent_deps.update(mygraph.child_nodes(parent,
7346 ignore_priority=priority_range.ignore_medium_soft))
7347 if parent in mergeable_nodes and \
7348 gather_deps(ignore_uninst_or_med_soft,
7349 mergeable_nodes, set(), parent):
7350 mergeable_parent = True
7352 if not mergeable_parent:
7355 parent_deps.remove(task)
7356 if min_parent_deps is None or \
7357 len(parent_deps) < min_parent_deps:
7358 min_parent_deps = len(parent_deps)
7361 if uninst_task is not None:
7362 # The uninstall is performed only after blocking
7363 # packages have been merged on top of it. File
7364 # collisions between blocking packages are detected
7365 # and removed from the list of files to be uninstalled.
7366 scheduled_uninstalls.add(uninst_task)
7367 parent_nodes = mygraph.parent_nodes(uninst_task)
7369 # Reverse the parent -> uninstall edges since we want
7370 # to do the uninstall after blocking packages have
7371 # been merged on top of it.
7372 mygraph.remove(uninst_task)
7373 for blocked_pkg in parent_nodes:
7374 mygraph.add(blocked_pkg, uninst_task,
7375 priority=BlockerDepPriority.instance)
7376 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7377 scheduler_graph.add(blocked_pkg, uninst_task,
7378 priority=BlockerDepPriority.instance)
7380 # Reset the state variables for leaf node selection and
7381 # continue trying to select leaf nodes.
7383 drop_satisfied = False
7386 if not selected_nodes:
7387 # Only select root nodes as a last resort. This case should
7388 # only trigger when the graph is nearly empty and the only
7389 # remaining nodes are isolated (no parents or children). Since
7390 # the nodes must be isolated, ignore_priority is not needed.
7391 selected_nodes = get_nodes()
7393 if not selected_nodes and not drop_satisfied:
7394 drop_satisfied = True
7397 if not selected_nodes and not myblocker_uninstalls.is_empty():
7398 # If possible, drop an uninstall task here in order to avoid
7399 # the circular deps code path. The corresponding blocker will
7400 # still be counted as an unresolved conflict.
7402 for node in myblocker_uninstalls.leaf_nodes():
7404 mygraph.remove(node)
7409 ignored_uninstall_tasks.add(node)
7412 if uninst_task is not None:
7413 # Reset the state variables for leaf node selection and
7414 # continue trying to select leaf nodes.
7416 drop_satisfied = False
7419 if not selected_nodes:
7420 self._circular_deps_for_display = mygraph
7421 raise self._unknown_internal_error()
7423 # At this point, we've succeeded in selecting one or more nodes, so
7424 # reset state variables for leaf node selection.
7426 drop_satisfied = False
7428 mygraph.difference_update(selected_nodes)
7430 for node in selected_nodes:
7431 if isinstance(node, Package) and \
7432 node.operation == "nomerge":
7435 # Handle interactions between blockers
7436 # and uninstallation tasks.
7437 solved_blockers = set()
7439 if isinstance(node, Package) and \
7440 "uninstall" == node.operation:
7441 have_uninstall_task = True
7444 vardb = self.trees[node.root]["vartree"].dbapi
7445 previous_cpv = vardb.match(node.slot_atom)
7447 # The package will be replaced by this one, so remove
7448 # the corresponding Uninstall task if necessary.
7449 previous_cpv = previous_cpv[0]
7451 ("installed", node.root, previous_cpv, "uninstall")
7453 mygraph.remove(uninst_task)
7457 if uninst_task is not None and \
7458 uninst_task not in ignored_uninstall_tasks and \
7459 myblocker_uninstalls.contains(uninst_task):
7460 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7461 myblocker_uninstalls.remove(uninst_task)
7462 # Discard any blockers that this Uninstall solves.
7463 for blocker in blocker_nodes:
7464 if not myblocker_uninstalls.child_nodes(blocker):
7465 myblocker_uninstalls.remove(blocker)
7466 solved_blockers.add(blocker)
7468 retlist.append(node)
7470 if (isinstance(node, Package) and \
7471 "uninstall" == node.operation) or \
7472 (uninst_task is not None and \
7473 uninst_task in scheduled_uninstalls):
7474 # Include satisfied blockers in the merge list
7475 # since the user might be interested and also
7476 # it serves as an indicator that blocking packages
7477 # will be temporarily installed simultaneously.
7478 for blocker in solved_blockers:
7479 retlist.append(Blocker(atom=blocker.atom,
7480 root=blocker.root, eapi=blocker.eapi,
7483 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7484 for node in myblocker_uninstalls.root_nodes():
7485 unsolvable_blockers.add(node)
7487 for blocker in unsolvable_blockers:
7488 retlist.append(blocker)
7490 # If any Uninstall tasks need to be executed in order
7491 # to avoid a conflict, complete the graph with any
7492 # dependencies that may have been initially
7493 # neglected (to ensure that unsafe Uninstall tasks
7494 # are properly identified and blocked from execution).
7495 if have_uninstall_task and \
7497 not unsolvable_blockers:
7498 self.myparams.add("complete")
7499 raise self._serialize_tasks_retry("")
7501 if unsolvable_blockers and \
7502 not self._accept_blocker_conflicts():
7503 self._unsatisfied_blockers_for_display = unsolvable_blockers
7504 self._serialized_tasks_cache = retlist[:]
7505 self._scheduler_graph = scheduler_graph
7506 raise self._unknown_internal_error()
7508 if self._slot_collision_info and \
7509 not self._accept_blocker_conflicts():
7510 self._serialized_tasks_cache = retlist[:]
7511 self._scheduler_graph = scheduler_graph
7512 raise self._unknown_internal_error()
7514 return retlist, scheduler_graph
7516 def _show_circular_deps(self, mygraph):
7517 # No leaf nodes are available, so we have a circular
7518 # dependency panic situation. Reduce the noise level to a
7519 # minimum via repeated elimination of root nodes since they
7520 # have no parents and thus can not be part of a cycle.
7522 root_nodes = mygraph.root_nodes(
7523 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7526 mygraph.difference_update(root_nodes)
7527 # Display the USE flags that are enabled on nodes that are part
7528 # of dependency cycles in case that helps the user decide to
7529 # disable some of them.
7531 tempgraph = mygraph.copy()
7532 while not tempgraph.empty():
7533 nodes = tempgraph.leaf_nodes()
7535 node = tempgraph.order[0]
7538 display_order.append(node)
7539 tempgraph.remove(node)
7540 display_order.reverse()
7541 self.myopts.pop("--quiet", None)
7542 self.myopts.pop("--verbose", None)
7543 self.myopts["--tree"] = True
7544 portage.writemsg("\n\n", noiselevel=-1)
7545 self.display(display_order)
7546 prefix = colorize("BAD", " * ")
7547 portage.writemsg("\n", noiselevel=-1)
7548 portage.writemsg(prefix + "Error: circular dependencies:\n",
7550 portage.writemsg("\n", noiselevel=-1)
7551 mygraph.debug_print()
7552 portage.writemsg("\n", noiselevel=-1)
7553 portage.writemsg(prefix + "Note that circular dependencies " + \
7554 "can often be avoided by temporarily\n", noiselevel=-1)
7555 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7556 "optional dependencies.\n", noiselevel=-1)
7558 def _show_merge_list(self):
7559 if self._serialized_tasks_cache is not None and \
7560 not (self._displayed_list and \
7561 (self._displayed_list == self._serialized_tasks_cache or \
7562 self._displayed_list == \
7563 list(reversed(self._serialized_tasks_cache)))):
7564 display_list = self._serialized_tasks_cache[:]
7565 if "--tree" in self.myopts:
7566 display_list.reverse()
7567 self.display(display_list)
7569 def _show_unsatisfied_blockers(self, blockers):
7570 self._show_merge_list()
7571 msg = "Error: The above package list contains " + \
7572 "packages which cannot be installed " + \
7573 "at the same time on the same system."
7574 prefix = colorize("BAD", " * ")
7575 from textwrap import wrap
7576 portage.writemsg("\n", noiselevel=-1)
7577 for line in wrap(msg, 70):
7578 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7580 # Display the conflicting packages along with the packages
7581 # that pulled them in. This is helpful for troubleshooting
7582 # cases in which blockers don't solve automatically and
7583 # the reasons are not apparent from the normal merge list
7587 for blocker in blockers:
7588 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7589 self._blocker_parents.parent_nodes(blocker)):
7590 parent_atoms = self._parent_atoms.get(pkg)
7591 if not parent_atoms:
7592 atom = self._blocked_world_pkgs.get(pkg)
7593 if atom is not None:
7594 parent_atoms = set([("@world", atom)])
7596 conflict_pkgs[pkg] = parent_atoms
7599 # Reduce noise by pruning packages that are only
7600 # pulled in by other conflict packages.
7602 for pkg, parent_atoms in conflict_pkgs.iteritems():
7603 relevant_parent = False
7604 for parent, atom in parent_atoms:
7605 if parent not in conflict_pkgs:
7606 relevant_parent = True
7608 if not relevant_parent:
7609 pruned_pkgs.add(pkg)
7610 for pkg in pruned_pkgs:
7611 del conflict_pkgs[pkg]
7617 # Max number of parents shown, to avoid flooding the display.
7619 for pkg, parent_atoms in conflict_pkgs.iteritems():
7623 # Prefer packages that are not directly involved in a conflict.
7624 for parent_atom in parent_atoms:
7625 if len(pruned_list) >= max_parents:
7627 parent, atom = parent_atom
7628 if parent not in conflict_pkgs:
7629 pruned_list.add(parent_atom)
7631 for parent_atom in parent_atoms:
7632 if len(pruned_list) >= max_parents:
7634 pruned_list.add(parent_atom)
7636 omitted_parents = len(parent_atoms) - len(pruned_list)
7637 msg.append(indent + "%s pulled in by\n" % pkg)
7639 for parent_atom in pruned_list:
7640 parent, atom = parent_atom
7641 msg.append(2*indent)
7642 if isinstance(parent,
7643 (PackageArg, AtomArg)):
7644 # For PackageArg and AtomArg types, it's
7645 # redundant to display the atom attribute.
7646 msg.append(str(parent))
7648 # Display the specific atom from SetArg or
7650 msg.append("%s required by %s" % (atom, parent))
7654 msg.append(2*indent)
7655 msg.append("(and %d more)\n" % omitted_parents)
7659 sys.stderr.write("".join(msg))
7662 if "--quiet" not in self.myopts:
7663 show_blocker_docs_link()
7665 def display(self, mylist, favorites=[], verbosity=None):
7667 # This is used to prevent display_problems() from
7668 # redundantly displaying this exact same merge list
7669 # again via _show_merge_list().
7670 self._displayed_list = mylist
7672 if verbosity is None:
7673 verbosity = ("--quiet" in self.myopts and 1 or \
7674 "--verbose" in self.myopts and 3 or 2)
7675 favorites_set = InternalPackageSet(favorites)
7676 oneshot = "--oneshot" in self.myopts or \
7677 "--onlydeps" in self.myopts
7678 columns = "--columns" in self.myopts
7683 counters = PackageCounters()
7685 if verbosity == 1 and "--verbose" not in self.myopts:
7686 def create_use_string(*args):
7689 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7691 is_new, reinst_flags,
7692 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7693 alphabetical=("--alphabetical" in self.myopts)):
7701 cur_iuse = set(cur_iuse)
7702 enabled_flags = cur_iuse.intersection(cur_use)
7703 removed_iuse = set(old_iuse).difference(cur_iuse)
7704 any_iuse = cur_iuse.union(old_iuse)
7705 any_iuse = list(any_iuse)
7707 for flag in any_iuse:
7710 reinst_flag = reinst_flags and flag in reinst_flags
7711 if flag in enabled_flags:
7713 if is_new or flag in old_use and \
7714 (all_flags or reinst_flag):
7715 flag_str = red(flag)
7716 elif flag not in old_iuse:
7717 flag_str = yellow(flag) + "%*"
7718 elif flag not in old_use:
7719 flag_str = green(flag) + "*"
7720 elif flag in removed_iuse:
7721 if all_flags or reinst_flag:
7722 flag_str = yellow("-" + flag) + "%"
7725 flag_str = "(" + flag_str + ")"
7726 removed.append(flag_str)
7729 if is_new or flag in old_iuse and \
7730 flag not in old_use and \
7731 (all_flags or reinst_flag):
7732 flag_str = blue("-" + flag)
7733 elif flag not in old_iuse:
7734 flag_str = yellow("-" + flag)
7735 if flag not in iuse_forced:
7737 elif flag in old_use:
7738 flag_str = green("-" + flag) + "*"
7740 if flag in iuse_forced:
7741 flag_str = "(" + flag_str + ")"
7743 enabled.append(flag_str)
7745 disabled.append(flag_str)
7748 ret = " ".join(enabled)
7750 ret = " ".join(enabled + disabled + removed)
7752 ret = '%s="%s" ' % (name, ret)
7755 repo_display = RepoDisplay(self.roots)
7759 mygraph = self.digraph.copy()
7761 # If there are any Uninstall instances, add the corresponding
7762 # blockers to the digraph (useful for --tree display).
7764 executed_uninstalls = set(node for node in mylist \
7765 if isinstance(node, Package) and node.operation == "unmerge")
7767 for uninstall in self._blocker_uninstalls.leaf_nodes():
7768 uninstall_parents = \
7769 self._blocker_uninstalls.parent_nodes(uninstall)
7770 if not uninstall_parents:
7773 # Remove the corresponding "nomerge" node and substitute
7774 # the Uninstall node.
7775 inst_pkg = self._pkg_cache[
7776 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7778 mygraph.remove(inst_pkg)
7783 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7785 inst_pkg_blockers = []
7787 # Break the Package -> Uninstall edges.
7788 mygraph.remove(uninstall)
7790 # Resolution of a package's blockers
7791 # depend on it's own uninstallation.
7792 for blocker in inst_pkg_blockers:
7793 mygraph.add(uninstall, blocker)
7795 # Expand Package -> Uninstall edges into
7796 # Package -> Blocker -> Uninstall edges.
7797 for blocker in uninstall_parents:
7798 mygraph.add(uninstall, blocker)
7799 for parent in self._blocker_parents.parent_nodes(blocker):
7800 if parent != inst_pkg:
7801 mygraph.add(blocker, parent)
7803 # If the uninstall task did not need to be executed because
7804 # of an upgrade, display Blocker -> Upgrade edges since the
7805 # corresponding Blocker -> Uninstall edges will not be shown.
7807 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7808 if upgrade_node is not None and \
7809 uninstall not in executed_uninstalls:
7810 for blocker in uninstall_parents:
7811 mygraph.add(upgrade_node, blocker)
7813 unsatisfied_blockers = []
7818 if isinstance(x, Blocker) and not x.satisfied:
7819 unsatisfied_blockers.append(x)
7822 if "--tree" in self.myopts:
7823 depth = len(tree_nodes)
7824 while depth and graph_key not in \
7825 mygraph.child_nodes(tree_nodes[depth-1]):
7828 tree_nodes = tree_nodes[:depth]
7829 tree_nodes.append(graph_key)
7830 display_list.append((x, depth, True))
7831 shown_edges.add((graph_key, tree_nodes[depth-1]))
7833 traversed_nodes = set() # prevent endless circles
7834 traversed_nodes.add(graph_key)
7835 def add_parents(current_node, ordered):
7837 # Do not traverse to parents if this node is an
7838 # an argument or a direct member of a set that has
7839 # been specified as an argument (system or world).
7840 if current_node not in self._set_nodes:
7841 parent_nodes = mygraph.parent_nodes(current_node)
7843 child_nodes = set(mygraph.child_nodes(current_node))
7844 selected_parent = None
7845 # First, try to avoid a direct cycle.
7846 for node in parent_nodes:
7847 if not isinstance(node, (Blocker, Package)):
7849 if node not in traversed_nodes and \
7850 node not in child_nodes:
7851 edge = (current_node, node)
7852 if edge in shown_edges:
7854 selected_parent = node
7856 if not selected_parent:
7857 # A direct cycle is unavoidable.
7858 for node in parent_nodes:
7859 if not isinstance(node, (Blocker, Package)):
7861 if node not in traversed_nodes:
7862 edge = (current_node, node)
7863 if edge in shown_edges:
7865 selected_parent = node
7868 shown_edges.add((current_node, selected_parent))
7869 traversed_nodes.add(selected_parent)
7870 add_parents(selected_parent, False)
7871 display_list.append((current_node,
7872 len(tree_nodes), ordered))
7873 tree_nodes.append(current_node)
7875 add_parents(graph_key, True)
7877 display_list.append((x, depth, True))
7878 mylist = display_list
7879 for x in unsatisfied_blockers:
7880 mylist.append((x, 0, True))
7882 last_merge_depth = 0
7883 for i in xrange(len(mylist)-1,-1,-1):
7884 graph_key, depth, ordered = mylist[i]
7885 if not ordered and depth == 0 and i > 0 \
7886 and graph_key == mylist[i-1][0] and \
7887 mylist[i-1][1] == 0:
7888 # An ordered node got a consecutive duplicate when the tree was
7892 if ordered and graph_key[-1] != "nomerge":
7893 last_merge_depth = depth
7895 if depth >= last_merge_depth or \
7896 i < len(mylist) - 1 and \
7897 depth >= mylist[i+1][1]:
7900 from portage import flatten
7901 from portage.dep import use_reduce, paren_reduce
7902 # files to fetch list - avoids counting a same file twice
7903 # in size display (verbose mode)
7906 # Use this set to detect when all the "repoadd" strings are "[0]"
7907 # and disable the entire repo display in this case.
7910 for mylist_index in xrange(len(mylist)):
7911 x, depth, ordered = mylist[mylist_index]
7915 portdb = self.trees[myroot]["porttree"].dbapi
7916 bindb = self.trees[myroot]["bintree"].dbapi
7917 vardb = self.trees[myroot]["vartree"].dbapi
7918 vartree = self.trees[myroot]["vartree"]
7919 pkgsettings = self.pkgsettings[myroot]
7922 indent = " " * depth
7924 if isinstance(x, Blocker):
7926 blocker_style = "PKG_BLOCKER_SATISFIED"
7927 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7929 blocker_style = "PKG_BLOCKER"
7930 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7932 counters.blocks += 1
7934 counters.blocks_satisfied += 1
7935 resolved = portage.key_expand(
7936 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7937 if "--columns" in self.myopts and "--quiet" in self.myopts:
7938 addl += " " + colorize(blocker_style, resolved)
7940 addl = "[%s %s] %s%s" % \
7941 (colorize(blocker_style, "blocks"),
7942 addl, indent, colorize(blocker_style, resolved))
7943 block_parents = self._blocker_parents.parent_nodes(x)
7944 block_parents = set([pnode[2] for pnode in block_parents])
7945 block_parents = ", ".join(block_parents)
7947 addl += colorize(blocker_style,
7948 " (\"%s\" is blocking %s)") % \
7949 (str(x.atom).lstrip("!"), block_parents)
7951 addl += colorize(blocker_style,
7952 " (is blocking %s)") % block_parents
7953 if isinstance(x, Blocker) and x.satisfied:
7958 blockers.append(addl)
7961 pkg_merge = ordered and pkg_status == "merge"
7962 if not pkg_merge and pkg_status == "merge":
7963 pkg_status = "nomerge"
7964 built = pkg_type != "ebuild"
7965 installed = pkg_type == "installed"
7967 metadata = pkg.metadata
7969 repo_name = metadata["repository"]
7970 if pkg_type == "ebuild":
7971 ebuild_path = portdb.findname(pkg_key)
7972 if not ebuild_path: # shouldn't happen
7973 raise portage.exception.PackageNotFound(pkg_key)
7974 repo_path_real = os.path.dirname(os.path.dirname(
7975 os.path.dirname(ebuild_path)))
7977 repo_path_real = portdb.getRepositoryPath(repo_name)
7978 pkg_use = list(pkg.use.enabled)
7980 restrict = flatten(use_reduce(paren_reduce(
7981 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7982 except portage.exception.InvalidDependString, e:
7983 if not pkg.installed:
7984 show_invalid_depstring_notice(x,
7985 pkg.metadata["RESTRICT"], str(e))
7989 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7990 "fetch" in restrict:
7993 counters.restrict_fetch += 1
7994 if portdb.fetch_check(pkg_key, pkg_use):
7997 counters.restrict_fetch_satisfied += 1
7999 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8000 #param is used for -u, where you still *do* want to see when something is being upgraded.
8003 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8004 if vardb.cpv_exists(pkg_key):
8005 addl=" "+yellow("R")+fetch+" "
8008 counters.reinst += 1
8009 elif pkg_status == "uninstall":
8010 counters.uninst += 1
8011 # filter out old-style virtual matches
8012 elif installed_versions and \
8013 portage.cpv_getkey(installed_versions[0]) == \
8014 portage.cpv_getkey(pkg_key):
8015 myinslotlist = vardb.match(pkg.slot_atom)
8016 # If this is the first install of a new-style virtual, we
8017 # need to filter out old-style virtual matches.
8018 if myinslotlist and \
8019 portage.cpv_getkey(myinslotlist[0]) != \
8020 portage.cpv_getkey(pkg_key):
8023 myoldbest = myinslotlist[:]
8025 if not portage.dep.cpvequal(pkg_key,
8026 portage.best([pkg_key] + myoldbest)):
8028 addl += turquoise("U")+blue("D")
8030 counters.downgrades += 1
8033 addl += turquoise("U") + " "
8035 counters.upgrades += 1
8037 # New slot, mark it new.
8038 addl = " " + green("NS") + fetch + " "
8039 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8041 counters.newslot += 1
8043 if "--changelog" in self.myopts:
8044 inst_matches = vardb.match(pkg.slot_atom)
8046 changelogs.extend(self.calc_changelog(
8047 portdb.findname(pkg_key),
8048 inst_matches[0], pkg_key))
8050 addl = " " + green("N") + " " + fetch + " "
8059 forced_flags = set()
8060 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8061 forced_flags.update(pkgsettings.useforce)
8062 forced_flags.update(pkgsettings.usemask)
8064 cur_use = [flag for flag in pkg.use.enabled \
8065 if flag in pkg.iuse.all]
8066 cur_iuse = sorted(pkg.iuse.all)
8068 if myoldbest and myinslotlist:
8069 previous_cpv = myoldbest[0]
8071 previous_cpv = pkg.cpv
8072 if vardb.cpv_exists(previous_cpv):
8073 old_iuse, old_use = vardb.aux_get(
8074 previous_cpv, ["IUSE", "USE"])
8075 old_iuse = list(set(
8076 filter_iuse_defaults(old_iuse.split())))
8078 old_use = old_use.split()
8085 old_use = [flag for flag in old_use if flag in old_iuse]
8087 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8089 use_expand.reverse()
8090 use_expand_hidden = \
8091 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8093 def map_to_use_expand(myvals, forcedFlags=False,
8097 for exp in use_expand:
8100 for val in myvals[:]:
8101 if val.startswith(exp.lower()+"_"):
8102 if val in forced_flags:
8103 forced[exp].add(val[len(exp)+1:])
8104 ret[exp].append(val[len(exp)+1:])
8107 forced["USE"] = [val for val in myvals \
8108 if val in forced_flags]
8110 for exp in use_expand_hidden:
8116 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8117 # are the only thing that triggered reinstallation.
8118 reinst_flags_map = {}
8119 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8120 reinst_expand_map = None
8121 if reinstall_for_flags:
8122 reinst_flags_map = map_to_use_expand(
8123 list(reinstall_for_flags), removeHidden=False)
8124 for k in list(reinst_flags_map):
8125 if not reinst_flags_map[k]:
8126 del reinst_flags_map[k]
8127 if not reinst_flags_map.get("USE"):
8128 reinst_expand_map = reinst_flags_map.copy()
8129 reinst_expand_map.pop("USE", None)
8130 if reinst_expand_map and \
8131 not set(reinst_expand_map).difference(
8133 use_expand_hidden = \
8134 set(use_expand_hidden).difference(
8137 cur_iuse_map, iuse_forced = \
8138 map_to_use_expand(cur_iuse, forcedFlags=True)
8139 cur_use_map = map_to_use_expand(cur_use)
8140 old_iuse_map = map_to_use_expand(old_iuse)
8141 old_use_map = map_to_use_expand(old_use)
8144 use_expand.insert(0, "USE")
8146 for key in use_expand:
8147 if key in use_expand_hidden:
8149 verboseadd += create_use_string(key.upper(),
8150 cur_iuse_map[key], iuse_forced[key],
8151 cur_use_map[key], old_iuse_map[key],
8152 old_use_map[key], is_new,
8153 reinst_flags_map.get(key))
8158 if pkg_type == "ebuild" and pkg_merge:
8160 myfilesdict = portdb.getfetchsizes(pkg_key,
8161 useflags=pkg_use, debug=self.edebug)
8162 except portage.exception.InvalidDependString, e:
8163 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8164 show_invalid_depstring_notice(x, src_uri, str(e))
8167 if myfilesdict is None:
8168 myfilesdict="[empty/missing/bad digest]"
8170 for myfetchfile in myfilesdict:
8171 if myfetchfile not in myfetchlist:
8172 mysize+=myfilesdict[myfetchfile]
8173 myfetchlist.append(myfetchfile)
8175 counters.totalsize += mysize
8176 verboseadd += format_size(mysize)
8179 # assign index for a previous version in the same slot
8180 has_previous = False
8181 repo_name_prev = None
8182 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8184 slot_matches = vardb.match(slot_atom)
8187 repo_name_prev = vardb.aux_get(slot_matches[0],
8190 # now use the data to generate output
8191 if pkg.installed or not has_previous:
8192 repoadd = repo_display.repoStr(repo_path_real)
8194 repo_path_prev = None
8196 repo_path_prev = portdb.getRepositoryPath(
8198 if repo_path_prev == repo_path_real:
8199 repoadd = repo_display.repoStr(repo_path_real)
8201 repoadd = "%s=>%s" % (
8202 repo_display.repoStr(repo_path_prev),
8203 repo_display.repoStr(repo_path_real))
8205 repoadd_set.add(repoadd)
8207 xs = [portage.cpv_getkey(pkg_key)] + \
8208 list(portage.catpkgsplit(pkg_key)[2:])
8215 if "COLUMNWIDTH" in self.settings:
8217 mywidth = int(self.settings["COLUMNWIDTH"])
8218 except ValueError, e:
8219 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8221 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8222 self.settings["COLUMNWIDTH"], noiselevel=-1)
8224 oldlp = mywidth - 30
8227 # Convert myoldbest from a list to a string.
8231 for pos, key in enumerate(myoldbest):
8232 key = portage.catpkgsplit(key)[2] + \
8233 "-" + portage.catpkgsplit(key)[3]
8234 if key[-3:] == "-r0":
8236 myoldbest[pos] = key
8237 myoldbest = blue("["+", ".join(myoldbest)+"]")
8240 root_config = self.roots[myroot]
8241 system_set = root_config.sets["system"]
8242 world_set = root_config.sets["world"]
8247 pkg_system = system_set.findAtomForPackage(pkg)
8248 pkg_world = world_set.findAtomForPackage(pkg)
8249 if not (oneshot or pkg_world) and \
8250 myroot == self.target_root and \
8251 favorites_set.findAtomForPackage(pkg):
8252 # Maybe it will be added to world now.
8253 if create_world_atom(pkg, favorites_set, root_config):
8255 except portage.exception.InvalidDependString:
8256 # This is reported elsewhere if relevant.
8259 def pkgprint(pkg_str):
8262 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8264 return colorize("PKG_MERGE_WORLD", pkg_str)
8266 return colorize("PKG_MERGE", pkg_str)
8267 elif pkg_status == "uninstall":
8268 return colorize("PKG_UNINSTALL", pkg_str)
8271 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8273 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8275 return colorize("PKG_NOMERGE", pkg_str)
8278 properties = flatten(use_reduce(paren_reduce(
8279 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8280 except portage.exception.InvalidDependString, e:
8281 if not pkg.installed:
8282 show_invalid_depstring_notice(pkg,
8283 pkg.metadata["PROPERTIES"], str(e))
8287 interactive = "interactive" in properties
8288 if interactive and pkg.operation == "merge":
8289 addl = colorize("WARN", "I") + addl[1:]
8291 counters.interactive += 1
8296 if "--columns" in self.myopts:
8297 if "--quiet" in self.myopts:
8298 myprint=addl+" "+indent+pkgprint(pkg_cp)
8299 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8300 myprint=myprint+myoldbest
8301 myprint=myprint+darkgreen("to "+x[1])
8305 myprint = "[%s] %s%s" % \
8306 (pkgprint(pkg_status.ljust(13)),
8307 indent, pkgprint(pkg.cp))
8309 myprint = "[%s %s] %s%s" % \
8310 (pkgprint(pkg.type_name), addl,
8311 indent, pkgprint(pkg.cp))
8312 if (newlp-nc_len(myprint)) > 0:
8313 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8314 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8315 if (oldlp-nc_len(myprint)) > 0:
8316 myprint=myprint+" "*(oldlp-nc_len(myprint))
8317 myprint=myprint+myoldbest
8318 myprint += darkgreen("to " + pkg.root)
8321 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8323 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8324 myprint += indent + pkgprint(pkg_key) + " " + \
8325 myoldbest + darkgreen("to " + myroot)
8327 if "--columns" in self.myopts:
8328 if "--quiet" in self.myopts:
8329 myprint=addl+" "+indent+pkgprint(pkg_cp)
8330 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8331 myprint=myprint+myoldbest
8335 myprint = "[%s] %s%s" % \
8336 (pkgprint(pkg_status.ljust(13)),
8337 indent, pkgprint(pkg.cp))
8339 myprint = "[%s %s] %s%s" % \
8340 (pkgprint(pkg.type_name), addl,
8341 indent, pkgprint(pkg.cp))
8342 if (newlp-nc_len(myprint)) > 0:
8343 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8344 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8345 if (oldlp-nc_len(myprint)) > 0:
8346 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8347 myprint += myoldbest
8350 myprint = "[%s] %s%s %s" % \
8351 (pkgprint(pkg_status.ljust(13)),
8352 indent, pkgprint(pkg.cpv),
8355 myprint = "[%s %s] %s%s %s" % \
8356 (pkgprint(pkg_type), addl, indent,
8357 pkgprint(pkg.cpv), myoldbest)
8359 if columns and pkg.operation == "uninstall":
8361 p.append((myprint, verboseadd, repoadd))
8363 if "--tree" not in self.myopts and \
8364 "--quiet" not in self.myopts and \
8365 not self._opts_no_restart.intersection(self.myopts) and \
8366 pkg.root == self._running_root.root and \
8367 portage.match_from_list(
8368 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8369 not vardb.cpv_exists(pkg.cpv) and \
8370 "--quiet" not in self.myopts:
8371 if mylist_index < len(mylist) - 1:
8372 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8373 p.append(colorize("WARN", " then resume the merge."))
8376 show_repos = repoadd_set and repoadd_set != set(["0"])
8379 if isinstance(x, basestring):
8380 out.write("%s\n" % (x,))
8383 myprint, verboseadd, repoadd = x
8386 myprint += " " + verboseadd
8388 if show_repos and repoadd:
8389 myprint += " " + teal("[%s]" % repoadd)
8391 out.write("%s\n" % (myprint,))
8400 sys.stdout.write(str(repo_display))
8402 if "--changelog" in self.myopts:
8404 for revision,text in changelogs:
8405 print bold('*'+revision)
8406 sys.stdout.write(text)
8411 def display_problems(self):
8413 Display problems with the dependency graph such as slot collisions.
8414 This is called internally by display() to show the problems _after_
8415 the merge list where it is most likely to be seen, but if display()
8416 is not going to be called then this method should be called explicitly
8417 to ensure that the user is notified of problems with the graph.
8419 All output goes to stderr, except for unsatisfied dependencies which
8420 go to stdout for parsing by programs such as autounmask.
8423 # Note that show_masked_packages() sends it's output to
8424 # stdout, and some programs such as autounmask parse the
8425 # output in cases when emerge bails out. However, when
8426 # show_masked_packages() is called for installed packages
8427 # here, the message is a warning that is more appropriate
8428 # to send to stderr, so temporarily redirect stdout to
8429 # stderr. TODO: Fix output code so there's a cleaner way
8430 # to redirect everything to stderr.
8435 sys.stdout = sys.stderr
8436 self._display_problems()
8442 # This goes to stdout for parsing by programs like autounmask.
8443 for pargs, kwargs in self._unsatisfied_deps_for_display:
8444 self._show_unsatisfied_dep(*pargs, **kwargs)
8446 def _display_problems(self):
8447 if self._circular_deps_for_display is not None:
8448 self._show_circular_deps(
8449 self._circular_deps_for_display)
8451 # The user is only notified of a slot conflict if
8452 # there are no unresolvable blocker conflicts.
8453 if self._unsatisfied_blockers_for_display is not None:
8454 self._show_unsatisfied_blockers(
8455 self._unsatisfied_blockers_for_display)
8457 self._show_slot_collision_notice()
8459 # TODO: Add generic support for "set problem" handlers so that
8460 # the below warnings aren't special cases for world only.
8462 if self._missing_args:
8463 world_problems = False
8464 if "world" in self._sets:
8465 # Filter out indirect members of world (from nested sets)
8466 # since only direct members of world are desired here.
8467 world_set = self.roots[self.target_root].sets["world"]
8468 for arg, atom in self._missing_args:
8469 if arg.name == "world" and atom in world_set:
8470 world_problems = True
8474 sys.stderr.write("\n!!! Problems have been " + \
8475 "detected with your world file\n")
8476 sys.stderr.write("!!! Please run " + \
8477 green("emaint --check world")+"\n\n")
8479 if self._missing_args:
8480 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8481 " Ebuilds for the following packages are either all\n")
8482 sys.stderr.write(colorize("BAD", "!!!") + \
8483 " masked or don't exist:\n")
8484 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8485 self._missing_args) + "\n")
8487 if self._pprovided_args:
8489 for arg, atom in self._pprovided_args:
8490 if isinstance(arg, SetArg):
8492 arg_atom = (atom, atom)
8495 arg_atom = (arg.arg, atom)
8496 refs = arg_refs.setdefault(arg_atom, [])
8497 if parent not in refs:
8500 msg.append(bad("\nWARNING: "))
8501 if len(self._pprovided_args) > 1:
8502 msg.append("Requested packages will not be " + \
8503 "merged because they are listed in\n")
8505 msg.append("A requested package will not be " + \
8506 "merged because it is listed in\n")
8507 msg.append("package.provided:\n\n")
8508 problems_sets = set()
8509 for (arg, atom), refs in arg_refs.iteritems():
8512 problems_sets.update(refs)
8514 ref_string = ", ".join(["'%s'" % name for name in refs])
8515 ref_string = " pulled in by " + ref_string
8516 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8518 if "world" in problems_sets:
8519 msg.append("This problem can be solved in one of the following ways:\n\n")
8520 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8521 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8522 msg.append(" C) Remove offending entries from package.provided.\n\n")
8523 msg.append("The best course of action depends on the reason that an offending\n")
8524 msg.append("package.provided entry exists.\n\n")
8525 sys.stderr.write("".join(msg))
8527 masked_packages = []
8528 for pkg in self._masked_installed:
8529 root_config = pkg.root_config
8530 pkgsettings = self.pkgsettings[pkg.root]
8531 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8532 masked_packages.append((root_config, pkgsettings,
8533 pkg.cpv, pkg.metadata, mreasons))
8535 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8536 " The following installed packages are masked:\n")
8537 show_masked_packages(masked_packages)
8541 def calc_changelog(self,ebuildpath,current,next):
8542 if ebuildpath == None or not os.path.exists(ebuildpath):
8544 current = '-'.join(portage.catpkgsplit(current)[1:])
8545 if current.endswith('-r0'):
8546 current = current[:-3]
8547 next = '-'.join(portage.catpkgsplit(next)[1:])
8548 if next.endswith('-r0'):
8550 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8552 changelog = open(changelogpath).read()
8553 except SystemExit, e:
8554 raise # Needed else can't exit
8557 divisions = self.find_changelog_tags(changelog)
8558 #print 'XX from',current,'to',next
8559 #for div,text in divisions: print 'XX',div
8560 # skip entries for all revisions above the one we are about to emerge
8561 for i in range(len(divisions)):
8562 if divisions[i][0]==next:
8563 divisions = divisions[i:]
8565 # find out how many entries we are going to display
8566 for i in range(len(divisions)):
8567 if divisions[i][0]==current:
8568 divisions = divisions[:i]
8571 # couldnt find the current revision in the list. display nothing
8575 def find_changelog_tags(self,changelog):
8579 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8581 if release is not None:
8582 divs.append((release,changelog))
8584 if release is not None:
8585 divs.append((release,changelog[:match.start()]))
8586 changelog = changelog[match.end():]
8587 release = match.group(1)
8588 if release.endswith('.ebuild'):
8589 release = release[:-7]
8590 if release.endswith('-r0'):
8591 release = release[:-3]
8593 def saveNomergeFavorites(self):
8594 """Find atoms in favorites that are not in the mergelist and add them
8595 to the world file if necessary."""
8596 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8597 "--oneshot", "--onlydeps", "--pretend"):
8598 if x in self.myopts:
8600 root_config = self.roots[self.target_root]
8601 world_set = root_config.sets["world"]
8603 world_locked = False
8604 if hasattr(world_set, "lock"):
8608 if hasattr(world_set, "load"):
8609 world_set.load() # maybe it's changed on disk
8611 args_set = self._sets["args"]
8612 portdb = self.trees[self.target_root]["porttree"].dbapi
8613 added_favorites = set()
8614 for x in self._set_nodes:
8615 pkg_type, root, pkg_key, pkg_status = x
8616 if pkg_status != "nomerge":
8620 myfavkey = create_world_atom(x, args_set, root_config)
8622 if myfavkey in added_favorites:
8624 added_favorites.add(myfavkey)
8625 except portage.exception.InvalidDependString, e:
8626 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8627 (pkg_key, str(e)), noiselevel=-1)
8628 writemsg("!!! see '%s'\n\n" % os.path.join(
8629 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8632 for k in self._sets:
8633 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8638 all_added.append(SETPREFIX + k)
8639 all_added.extend(added_favorites)
8642 print ">>> Recording %s in \"world\" favorites file..." % \
8643 colorize("INFORM", str(a))
8645 world_set.update(all_added)
8650 def loadResumeCommand(self, resume_data, skip_masked=False):
8652 Add a resume command to the graph and validate it in the process. This
8653 will raise a PackageNotFound exception if a package is not available.
8656 if not isinstance(resume_data, dict):
8659 mergelist = resume_data.get("mergelist")
8660 if not isinstance(mergelist, list):
8663 fakedb = self.mydbapi
8665 serialized_tasks = []
8668 if not (isinstance(x, list) and len(x) == 4):
8670 pkg_type, myroot, pkg_key, action = x
8671 if pkg_type not in self.pkg_tree_map:
8673 if action != "merge":
8675 tree_type = self.pkg_tree_map[pkg_type]
8676 mydb = trees[myroot][tree_type].dbapi
8677 db_keys = list(self._trees_orig[myroot][
8678 tree_type].dbapi._aux_cache_keys)
8680 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8682 # It does no exist or it is corrupt.
8683 if action == "uninstall":
8685 raise portage.exception.PackageNotFound(pkg_key)
8686 installed = action == "uninstall"
8687 built = pkg_type != "ebuild"
8688 root_config = self.roots[myroot]
8689 pkg = Package(built=built, cpv=pkg_key,
8690 installed=installed, metadata=metadata,
8691 operation=action, root_config=root_config,
8693 if pkg_type == "ebuild":
8694 pkgsettings = self.pkgsettings[myroot]
8695 pkgsettings.setcpv(pkg)
8696 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8697 self._pkg_cache[pkg] = pkg
8699 root_config = self.roots[pkg.root]
8700 if "merge" == pkg.operation and \
8701 not visible(root_config.settings, pkg):
8703 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8705 self._unsatisfied_deps_for_display.append(
8706 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8708 fakedb[myroot].cpv_inject(pkg)
8709 serialized_tasks.append(pkg)
8710 self.spinner.update()
8712 if self._unsatisfied_deps_for_display:
8715 if not serialized_tasks or "--nodeps" in self.myopts:
8716 self._serialized_tasks_cache = serialized_tasks
8717 self._scheduler_graph = self.digraph
8719 self._select_package = self._select_pkg_from_graph
8720 self.myparams.add("selective")
8721 # Always traverse deep dependencies in order to account for
8722 # potentially unsatisfied dependencies of installed packages.
8723 # This is necessary for correct --keep-going or --resume operation
8724 # in case a package from a group of circularly dependent packages
8725 # fails. In this case, a package which has recently been installed
8726 # may have an unsatisfied circular dependency (pulled in by
8727 # PDEPEND, for example). So, even though a package is already
8728 # installed, it may not have all of it's dependencies satisfied, so
8729 # it may not be usable. If such a package is in the subgraph of
8730 # deep depenedencies of a scheduled build, that build needs to
8731 # be cancelled. In order for this type of situation to be
8732 # recognized, deep traversal of dependencies is required.
8733 self.myparams.add("deep")
8735 favorites = resume_data.get("favorites")
8736 args_set = self._sets["args"]
8737 if isinstance(favorites, list):
8738 args = self._load_favorites(favorites)
8742 for task in serialized_tasks:
8743 if isinstance(task, Package) and \
8744 task.operation == "merge":
8745 if not self._add_pkg(task, None):
8748 # Packages for argument atoms need to be explicitly
8749 # added via _add_pkg() so that they are included in the
8750 # digraph (needed at least for --tree display).
8752 for atom in arg.set:
8753 pkg, existing_node = self._select_package(
8754 arg.root_config.root, atom)
8755 if existing_node is None and \
8757 if not self._add_pkg(pkg, Dependency(atom=atom,
8758 root=pkg.root, parent=arg)):
8761 # Allow unsatisfied deps here to avoid showing a masking
8762 # message for an unsatisfied dep that isn't necessarily
8764 if not self._create_graph(allow_unsatisfied=True):
8767 unsatisfied_deps = []
8768 for dep in self._unsatisfied_deps:
8769 if not isinstance(dep.parent, Package):
8771 if dep.parent.operation == "merge":
8772 unsatisfied_deps.append(dep)
8775 # For unsatisfied deps of installed packages, only account for
8776 # them if they are in the subgraph of dependencies of a package
8777 # which is scheduled to be installed.
8778 unsatisfied_install = False
8780 dep_stack = self.digraph.parent_nodes(dep.parent)
8782 node = dep_stack.pop()
8783 if not isinstance(node, Package):
8785 if node.operation == "merge":
8786 unsatisfied_install = True
8788 if node in traversed:
8791 dep_stack.extend(self.digraph.parent_nodes(node))
8793 if unsatisfied_install:
8794 unsatisfied_deps.append(dep)
8796 if masked_tasks or unsatisfied_deps:
8797 # This probably means that a required package
8798 # was dropped via --skipfirst. It makes the
8799 # resume list invalid, so convert it to a
8800 # UnsatisfiedResumeDep exception.
8801 raise self.UnsatisfiedResumeDep(self,
8802 masked_tasks + unsatisfied_deps)
8803 self._serialized_tasks_cache = None
8806 except self._unknown_internal_error:
8811 def _load_favorites(self, favorites):
8813 Use a list of favorites to resume state from a
8814 previous select_files() call. This creates similar
8815 DependencyArg instances to those that would have
8816 been created by the original select_files() call.
8817 This allows Package instances to be matched with
8818 DependencyArg instances during graph creation.
8820 root_config = self.roots[self.target_root]
8821 getSetAtoms = root_config.setconfig.getSetAtoms
8822 sets = root_config.sets
8825 if not isinstance(x, basestring):
8827 if x in ("system", "world"):
8829 if x.startswith(SETPREFIX):
8830 s = x[len(SETPREFIX):]
8835 # Recursively expand sets so that containment tests in
8836 # self._get_parent_sets() properly match atoms in nested
8837 # sets (like if world contains system).
8838 expanded_set = InternalPackageSet(
8839 initial_atoms=getSetAtoms(s))
8840 self._sets[s] = expanded_set
8841 args.append(SetArg(arg=x, set=expanded_set,
8842 root_config=root_config))
8844 if not portage.isvalidatom(x):
8846 args.append(AtomArg(arg=x, atom=x,
8847 root_config=root_config))
8849 self._set_args(args)
8852 class UnsatisfiedResumeDep(portage.exception.PortageException):
8854 A dependency of a resume list is not installed. This
8855 can occur when a required package is dropped from the
8856 merge list via --skipfirst.
8858 def __init__(self, depgraph, value):
8859 portage.exception.PortageException.__init__(self, value)
8860 self.depgraph = depgraph
8862 class _internal_exception(portage.exception.PortageException):
8863 def __init__(self, value=""):
8864 portage.exception.PortageException.__init__(self, value)
8866 class _unknown_internal_error(_internal_exception):
8868 Used by the depgraph internally to terminate graph creation.
8869 The specific reason for the failure should have been dumped
8870 to stderr, unfortunately, the exact reason for the failure
8874 class _serialize_tasks_retry(_internal_exception):
8876 This is raised by the _serialize_tasks() method when it needs to
8877 be called again for some reason. The only case that it's currently
8878 used for is when neglected dependencies need to be added to the
8879 graph in order to avoid making a potentially unsafe decision.
8882 class _dep_check_composite_db(portage.dbapi):
8884 A dbapi-like interface that is optimized for use in dep_check() calls.
8885 This is built on top of the existing depgraph package selection logic.
8886 Some packages that have been added to the graph may be masked from this
8887 view in order to influence the atom preference selection that occurs
8890 def __init__(self, depgraph, root):
8891 portage.dbapi.__init__(self)
8892 self._depgraph = depgraph
8894 self._match_cache = {}
8895 self._cpv_pkg_map = {}
8897 def _clear_cache(self):
8898 self._match_cache.clear()
8899 self._cpv_pkg_map.clear()
8901 def match(self, atom):
8902 ret = self._match_cache.get(atom)
8907 atom = self._dep_expand(atom)
8908 pkg, existing = self._depgraph._select_package(self._root, atom)
8912 # Return the highest available from select_package() as well as
8913 # any matching slots in the graph db.
8915 slots.add(pkg.metadata["SLOT"])
8916 atom_cp = portage.dep_getkey(atom)
8917 if pkg.cp.startswith("virtual/"):
8918 # For new-style virtual lookahead that occurs inside
8919 # dep_check(), examine all slots. This is needed
8920 # so that newer slots will not unnecessarily be pulled in
8921 # when a satisfying lower slot is already installed. For
8922 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8923 # there's no need to pull in a newer slot to satisfy a
8924 # virtual/jdk dependency.
8925 for db, pkg_type, built, installed, db_keys in \
8926 self._depgraph._filtered_trees[self._root]["dbs"]:
8927 for cpv in db.match(atom):
8928 if portage.cpv_getkey(cpv) != pkg.cp:
8930 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8932 if self._visible(pkg):
8933 self._cpv_pkg_map[pkg.cpv] = pkg
8935 slots.remove(pkg.metadata["SLOT"])
8937 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8938 pkg, existing = self._depgraph._select_package(
8939 self._root, slot_atom)
8942 if not self._visible(pkg):
8944 self._cpv_pkg_map[pkg.cpv] = pkg
8947 self._cpv_sort_ascending(ret)
8948 self._match_cache[orig_atom] = ret
8951 def _visible(self, pkg):
8952 if pkg.installed and "selective" not in self._depgraph.myparams:
8954 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8955 except (StopIteration, portage.exception.InvalidDependString):
8962 self._depgraph.pkgsettings[pkg.root], pkg):
8964 except portage.exception.InvalidDependString:
8966 in_graph = self._depgraph._slot_pkg_map[
8967 self._root].get(pkg.slot_atom)
8968 if in_graph is None:
8969 # Mask choices for packages which are not the highest visible
8970 # version within their slot (since they usually trigger slot
8972 highest_visible, in_graph = self._depgraph._select_package(
8973 self._root, pkg.slot_atom)
8974 if pkg != highest_visible:
8976 elif in_graph != pkg:
8977 # Mask choices for packages that would trigger a slot
8978 # conflict with a previously selected package.
8982 def _dep_expand(self, atom):
8984 This is only needed for old installed packages that may
8985 contain atoms that are not fully qualified with a specific
8986 category. Emulate the cpv_expand() function that's used by
8987 dbapi.match() in cases like this. If there are multiple
8988 matches, it's often due to a new-style virtual that has
8989 been added, so try to filter those out to avoid raising
8992 root_config = self._depgraph.roots[self._root]
8994 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8995 if len(expanded_atoms) > 1:
8996 non_virtual_atoms = []
8997 for x in expanded_atoms:
8998 if not portage.dep_getkey(x).startswith("virtual/"):
8999 non_virtual_atoms.append(x)
9000 if len(non_virtual_atoms) == 1:
9001 expanded_atoms = non_virtual_atoms
9002 if len(expanded_atoms) > 1:
9003 # compatible with portage.cpv_expand()
9004 raise portage.exception.AmbiguousPackageName(
9005 [portage.dep_getkey(x) for x in expanded_atoms])
9007 atom = expanded_atoms[0]
9009 null_atom = insert_category_into_atom(atom, "null")
9010 null_cp = portage.dep_getkey(null_atom)
9011 cat, atom_pn = portage.catsplit(null_cp)
9012 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9014 # Allow the resolver to choose which virtual.
9015 atom = insert_category_into_atom(atom, "virtual")
9017 atom = insert_category_into_atom(atom, "null")
9020 def aux_get(self, cpv, wants):
9021 metadata = self._cpv_pkg_map[cpv].metadata
9022 return [metadata.get(x, "") for x in wants]
9024 class RepoDisplay(object):
9025 def __init__(self, roots):
9026 self._shown_repos = {}
9027 self._unknown_repo = False
9029 for root_config in roots.itervalues():
9030 portdir = root_config.settings.get("PORTDIR")
9032 repo_paths.add(portdir)
9033 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9035 repo_paths.update(overlays.split())
9036 repo_paths = list(repo_paths)
9037 self._repo_paths = repo_paths
9038 self._repo_paths_real = [ os.path.realpath(repo_path) \
9039 for repo_path in repo_paths ]
9041 # pre-allocate index for PORTDIR so that it always has index 0.
9042 for root_config in roots.itervalues():
9043 portdb = root_config.trees["porttree"].dbapi
9044 portdir = portdb.porttree_root
9046 self.repoStr(portdir)
9048 def repoStr(self, repo_path_real):
9051 real_index = self._repo_paths_real.index(repo_path_real)
9052 if real_index == -1:
9054 self._unknown_repo = True
9056 shown_repos = self._shown_repos
9057 repo_paths = self._repo_paths
9058 repo_path = repo_paths[real_index]
9059 index = shown_repos.get(repo_path)
9061 index = len(shown_repos)
9062 shown_repos[repo_path] = index
9068 shown_repos = self._shown_repos
9069 unknown_repo = self._unknown_repo
9070 if shown_repos or self._unknown_repo:
9071 output.append("Portage tree and overlays:\n")
9072 show_repo_paths = list(shown_repos)
9073 for repo_path, repo_index in shown_repos.iteritems():
9074 show_repo_paths[repo_index] = repo_path
9076 for index, repo_path in enumerate(show_repo_paths):
9077 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9079 output.append(" "+teal("[?]") + \
9080 " indicates that the source repository could not be determined\n")
9081 return "".join(output)
9083 class PackageCounters(object):
9093 self.blocks_satisfied = 0
9095 self.restrict_fetch = 0
9096 self.restrict_fetch_satisfied = 0
9097 self.interactive = 0
9100 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9103 myoutput.append("Total: %s package" % total_installs)
9104 if total_installs != 1:
9105 myoutput.append("s")
9106 if total_installs != 0:
9107 myoutput.append(" (")
9108 if self.upgrades > 0:
9109 details.append("%s upgrade" % self.upgrades)
9110 if self.upgrades > 1:
9112 if self.downgrades > 0:
9113 details.append("%s downgrade" % self.downgrades)
9114 if self.downgrades > 1:
9117 details.append("%s new" % self.new)
9118 if self.newslot > 0:
9119 details.append("%s in new slot" % self.newslot)
9120 if self.newslot > 1:
9123 details.append("%s reinstall" % self.reinst)
9127 details.append("%s uninstall" % self.uninst)
9130 if self.interactive > 0:
9131 details.append("%s %s" % (self.interactive,
9132 colorize("WARN", "interactive")))
9133 myoutput.append(", ".join(details))
9134 if total_installs != 0:
9135 myoutput.append(")")
9136 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9137 if self.restrict_fetch:
9138 myoutput.append("\nFetch Restriction: %s package" % \
9139 self.restrict_fetch)
9140 if self.restrict_fetch > 1:
9141 myoutput.append("s")
9142 if self.restrict_fetch_satisfied < self.restrict_fetch:
9143 myoutput.append(bad(" (%s unsatisfied)") % \
9144 (self.restrict_fetch - self.restrict_fetch_satisfied))
9146 myoutput.append("\nConflict: %s block" % \
9149 myoutput.append("s")
9150 if self.blocks_satisfied < self.blocks:
9151 myoutput.append(bad(" (%s unsatisfied)") % \
9152 (self.blocks - self.blocks_satisfied))
9153 return "".join(myoutput)
9155 class PollSelectAdapter(PollConstants):
9158 Use select to emulate a poll object, for
9159 systems that don't support poll().
9163 self._registered = {}
9164 self._select_args = [[], [], []]
9166 def register(self, fd, *args):
9168 Only POLLIN is currently supported!
9172 "register expected at most 2 arguments, got " + \
9173 repr(1 + len(args)))
9175 eventmask = PollConstants.POLLIN | \
9176 PollConstants.POLLPRI | PollConstants.POLLOUT
9180 self._registered[fd] = eventmask
9181 self._select_args = None
9183 def unregister(self, fd):
9184 self._select_args = None
9185 del self._registered[fd]
9187 def poll(self, *args):
9190 "poll expected at most 2 arguments, got " + \
9191 repr(1 + len(args)))
9197 select_args = self._select_args
9198 if select_args is None:
9199 select_args = [self._registered.keys(), [], []]
9201 if timeout is not None:
9202 select_args = select_args[:]
9203 # Translate poll() timeout args to select() timeout args:
9205 # | units | value(s) for indefinite block
9206 # ---------|--------------|------------------------------
9207 # poll | milliseconds | omitted, negative, or None
9208 # ---------|--------------|------------------------------
9209 # select | seconds | omitted
9210 # ---------|--------------|------------------------------
9212 if timeout is not None and timeout < 0:
9214 if timeout is not None:
9215 select_args.append(timeout / 1000)
9217 select_events = select.select(*select_args)
9219 for fd in select_events[0]:
9220 poll_events.append((fd, PollConstants.POLLIN))
9223 class SequentialTaskQueue(SlotObject):
9225 __slots__ = ("max_jobs", "running_tasks") + \
9226 ("_dirty", "_scheduling", "_task_queue")
9228 def __init__(self, **kwargs):
9229 SlotObject.__init__(self, **kwargs)
9230 self._task_queue = deque()
9231 self.running_tasks = set()
9232 if self.max_jobs is None:
9236 def add(self, task):
9237 self._task_queue.append(task)
9240 def addFront(self, task):
9241 self._task_queue.appendleft(task)
9252 if self._scheduling:
9253 # Ignore any recursive schedule() calls triggered via
9254 # self._task_exit().
9257 self._scheduling = True
9259 task_queue = self._task_queue
9260 running_tasks = self.running_tasks
9261 max_jobs = self.max_jobs
9262 state_changed = False
9264 while task_queue and \
9265 (max_jobs is True or len(running_tasks) < max_jobs):
9266 task = task_queue.popleft()
9267 cancelled = getattr(task, "cancelled", None)
9269 running_tasks.add(task)
9270 task.addExitListener(self._task_exit)
9272 state_changed = True
9275 self._scheduling = False
9277 return state_changed
9279 def _task_exit(self, task):
9281 Since we can always rely on exit listeners being called, the set of
9282 running tasks is always pruned automatically and there is never any need
9283 to actively prune it.
9285 self.running_tasks.remove(task)
9286 if self._task_queue:
9290 self._task_queue.clear()
9291 running_tasks = self.running_tasks
9292 while running_tasks:
9293 task = running_tasks.pop()
9294 task.removeExitListener(self._task_exit)
9298 def __nonzero__(self):
9299 return bool(self._task_queue or self.running_tasks)
9302 return len(self._task_queue) + len(self.running_tasks)
9304 _can_poll_device = None
9306 def can_poll_device():
9308 Test if it's possible to use poll() on a device such as a pty. This
9309 is known to fail on Darwin.
9311 @returns: True if poll() on a device succeeds, False otherwise.
9314 global _can_poll_device
9315 if _can_poll_device is not None:
9316 return _can_poll_device
9318 if not hasattr(select, "poll"):
9319 _can_poll_device = False
9320 return _can_poll_device
9323 dev_null = open('/dev/null', 'rb')
9325 _can_poll_device = False
9326 return _can_poll_device
9329 p.register(dev_null.fileno(), PollConstants.POLLIN)
9331 invalid_request = False
9332 for f, event in p.poll():
9333 if event & PollConstants.POLLNVAL:
9334 invalid_request = True
9338 _can_poll_device = not invalid_request
9339 return _can_poll_device
9341 def create_poll_instance():
9343 Create an instance of select.poll, or an instance of
9344 PollSelectAdapter there is no poll() implementation or
9345 it is broken somehow.
9347 if can_poll_device():
9348 return select.poll()
9349 return PollSelectAdapter()
9351 getloadavg = getattr(os, "getloadavg", None)
9352 if getloadavg is None:
9355 Uses /proc/loadavg to emulate os.getloadavg().
9356 Raises OSError if the load average was unobtainable.
9359 loadavg_str = open('/proc/loadavg').readline()
9361 # getloadavg() is only supposed to raise OSError, so convert
9362 raise OSError('unknown')
9363 loadavg_split = loadavg_str.split()
9364 if len(loadavg_split) < 3:
9365 raise OSError('unknown')
9369 loadavg_floats.append(float(loadavg_split[i]))
9371 raise OSError('unknown')
9372 return tuple(loadavg_floats)
9374 class PollScheduler(object):
9376 class _sched_iface_class(SlotObject):
9377 __slots__ = ("register", "schedule", "unregister")
9381 self._max_load = None
9383 self._poll_event_queue = []
9384 self._poll_event_handlers = {}
9385 self._poll_event_handler_ids = {}
9386 # Increment id for each new handler.
9387 self._event_handler_id = 0
9388 self._poll_obj = create_poll_instance()
9389 self._scheduling = False
9391 def _schedule(self):
9393 Calls _schedule_tasks() and automatically returns early from
9394 any recursive calls to this method that the _schedule_tasks()
9395 call might trigger. This makes _schedule() safe to call from
9396 inside exit listeners.
9398 if self._scheduling:
9400 self._scheduling = True
9402 return self._schedule_tasks()
9404 self._scheduling = False
9406 def _running_job_count(self):
9409 def _can_add_job(self):
9410 max_jobs = self._max_jobs
9411 max_load = self._max_load
9413 if self._max_jobs is not True and \
9414 self._running_job_count() >= self._max_jobs:
9417 if max_load is not None and \
9418 (max_jobs is True or max_jobs > 1) and \
9419 self._running_job_count() >= 1:
9421 avg1, avg5, avg15 = getloadavg()
9425 if avg1 >= max_load:
9430 def _poll(self, timeout=None):
9432 All poll() calls pass through here. The poll events
9433 are added directly to self._poll_event_queue.
9434 In order to avoid endless blocking, this raises
9435 StopIteration if timeout is None and there are
9436 no file descriptors to poll.
9438 if not self._poll_event_handlers:
9440 if timeout is None and \
9441 not self._poll_event_handlers:
9442 raise StopIteration(
9443 "timeout is None and there are no poll() event handlers")
9445 # The following error is known to occur with Linux kernel versions
9448 # select.error: (4, 'Interrupted system call')
9450 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9451 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9452 # without any events.
9455 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9457 except select.error, e:
9458 writemsg_level("\n!!! select error: %s\n" % (e,),
9459 level=logging.ERROR, noiselevel=-1)
9461 if timeout is not None:
9464 def _next_poll_event(self, timeout=None):
9466 Since the _schedule_wait() loop is called by event
9467 handlers from _poll_loop(), maintain a central event
9468 queue for both of them to share events from a single
9469 poll() call. In order to avoid endless blocking, this
9470 raises StopIteration if timeout is None and there are
9471 no file descriptors to poll.
9473 if not self._poll_event_queue:
9475 return self._poll_event_queue.pop()
9477 def _poll_loop(self):
9479 event_handlers = self._poll_event_handlers
9480 event_handled = False
9483 while event_handlers:
9484 f, event = self._next_poll_event()
9485 handler, reg_id = event_handlers[f]
9487 event_handled = True
9488 except StopIteration:
9489 event_handled = True
9491 if not event_handled:
9492 raise AssertionError("tight loop")
9494 def _schedule_yield(self):
9496 Schedule for a short period of time chosen by the scheduler based
9497 on internal state. Synchronous tasks should call this periodically
9498 in order to allow the scheduler to service pending poll events. The
9499 scheduler will call poll() exactly once, without blocking, and any
9500 resulting poll events will be serviced.
9502 event_handlers = self._poll_event_handlers
9505 if not event_handlers:
9506 return bool(events_handled)
9508 if not self._poll_event_queue:
9512 while event_handlers and self._poll_event_queue:
9513 f, event = self._next_poll_event()
9514 handler, reg_id = event_handlers[f]
9517 except StopIteration:
9520 return bool(events_handled)
9522 def _register(self, f, eventmask, handler):
9525 @return: A unique registration id, for use in schedule() or
9528 if f in self._poll_event_handlers:
9529 raise AssertionError("fd %d is already registered" % f)
9530 self._event_handler_id += 1
9531 reg_id = self._event_handler_id
9532 self._poll_event_handler_ids[reg_id] = f
9533 self._poll_event_handlers[f] = (handler, reg_id)
9534 self._poll_obj.register(f, eventmask)
9537 def _unregister(self, reg_id):
9538 f = self._poll_event_handler_ids[reg_id]
9539 self._poll_obj.unregister(f)
9540 del self._poll_event_handlers[f]
9541 del self._poll_event_handler_ids[reg_id]
9543 def _schedule_wait(self, wait_ids):
9545 Schedule until wait_id is not longer registered
9548 @param wait_id: a task id to wait for
9550 event_handlers = self._poll_event_handlers
9551 handler_ids = self._poll_event_handler_ids
9552 event_handled = False
9554 if isinstance(wait_ids, int):
9555 wait_ids = frozenset([wait_ids])
9558 while wait_ids.intersection(handler_ids):
9559 f, event = self._next_poll_event()
9560 handler, reg_id = event_handlers[f]
9562 event_handled = True
9563 except StopIteration:
9564 event_handled = True
9566 return event_handled
9568 class QueueScheduler(PollScheduler):
9571 Add instances of SequentialTaskQueue and then call run(). The
9572 run() method returns when no tasks remain.
9575 def __init__(self, max_jobs=None, max_load=None):
9576 PollScheduler.__init__(self)
9578 if max_jobs is None:
9581 self._max_jobs = max_jobs
9582 self._max_load = max_load
9583 self.sched_iface = self._sched_iface_class(
9584 register=self._register,
9585 schedule=self._schedule_wait,
9586 unregister=self._unregister)
9589 self._schedule_listeners = []
9592 self._queues.append(q)
9594 def remove(self, q):
9595 self._queues.remove(q)
9599 while self._schedule():
9602 while self._running_job_count():
9605 def _schedule_tasks(self):
9608 @returns: True if there may be remaining tasks to schedule,
9611 while self._can_add_job():
9612 n = self._max_jobs - self._running_job_count()
9616 if not self._start_next_job(n):
9619 for q in self._queues:
9624 def _running_job_count(self):
9626 for q in self._queues:
9627 job_count += len(q.running_tasks)
9628 self._jobs = job_count
9631 def _start_next_job(self, n=1):
9633 for q in self._queues:
9634 initial_job_count = len(q.running_tasks)
9636 final_job_count = len(q.running_tasks)
9637 if final_job_count > initial_job_count:
9638 started_count += (final_job_count - initial_job_count)
9639 if started_count >= n:
9641 return started_count
9643 class TaskScheduler(object):
9646 A simple way to handle scheduling of AsynchrousTask instances. Simply
9647 add tasks and call run(). The run() method returns when no tasks remain.
9650 def __init__(self, max_jobs=None, max_load=None):
9651 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9652 self._scheduler = QueueScheduler(
9653 max_jobs=max_jobs, max_load=max_load)
9654 self.sched_iface = self._scheduler.sched_iface
9655 self.run = self._scheduler.run
9656 self._scheduler.add(self._queue)
9658 def add(self, task):
9659 self._queue.add(task)
9661 class JobStatusDisplay(object):
9663 _bound_properties = ("curval", "failed", "running")
9664 _jobs_column_width = 48
9666 # Don't update the display unless at least this much
9667 # time has passed, in units of seconds.
9668 _min_display_latency = 2
9670 _default_term_codes = {
9676 _termcap_name_map = {
9677 'carriage_return' : 'cr',
9682 def __init__(self, out=sys.stdout, quiet=False):
9683 object.__setattr__(self, "out", out)
9684 object.__setattr__(self, "quiet", quiet)
9685 object.__setattr__(self, "maxval", 0)
9686 object.__setattr__(self, "merges", 0)
9687 object.__setattr__(self, "_changed", False)
9688 object.__setattr__(self, "_displayed", False)
9689 object.__setattr__(self, "_last_display_time", 0)
9690 object.__setattr__(self, "width", 80)
9693 isatty = hasattr(out, "isatty") and out.isatty()
9694 object.__setattr__(self, "_isatty", isatty)
9695 if not isatty or not self._init_term():
9697 for k, capname in self._termcap_name_map.iteritems():
9698 term_codes[k] = self._default_term_codes[capname]
9699 object.__setattr__(self, "_term_codes", term_codes)
9700 encoding = sys.getdefaultencoding()
9701 for k, v in self._term_codes.items():
9702 if not isinstance(v, str):
9703 self._term_codes[k] = v.decode(encoding, 'replace')
9705 def _init_term(self):
9707 Initialize term control codes.
9709 @returns: True if term codes were successfully initialized,
9713 term_type = os.environ.get("TERM", "vt100")
9719 curses.setupterm(term_type, self.out.fileno())
9720 tigetstr = curses.tigetstr
9721 except curses.error:
9726 if tigetstr is None:
9730 for k, capname in self._termcap_name_map.iteritems():
9731 code = tigetstr(capname)
9733 code = self._default_term_codes[capname]
9734 term_codes[k] = code
9735 object.__setattr__(self, "_term_codes", term_codes)
9738 def _format_msg(self, msg):
9739 return ">>> %s" % msg
9743 self._term_codes['carriage_return'] + \
9744 self._term_codes['clr_eol'])
9746 self._displayed = False
9748 def _display(self, line):
9749 self.out.write(line)
9751 self._displayed = True
9753 def _update(self, msg):
9756 if not self._isatty:
9757 out.write(self._format_msg(msg) + self._term_codes['newline'])
9759 self._displayed = True
9765 self._display(self._format_msg(msg))
9767 def displayMessage(self, msg):
9769 was_displayed = self._displayed
9771 if self._isatty and self._displayed:
9774 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9776 self._displayed = False
9779 self._changed = True
9785 for name in self._bound_properties:
9786 object.__setattr__(self, name, 0)
9789 self.out.write(self._term_codes['newline'])
9791 self._displayed = False
9793 def __setattr__(self, name, value):
9794 old_value = getattr(self, name)
9795 if value == old_value:
9797 object.__setattr__(self, name, value)
9798 if name in self._bound_properties:
9799 self._property_change(name, old_value, value)
9801 def _property_change(self, name, old_value, new_value):
9802 self._changed = True
9805 def _load_avg_str(self):
9820 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9824 Display status on stdout, but only if something has
9825 changed since the last call.
9831 current_time = time.time()
9832 time_delta = current_time - self._last_display_time
9833 if self._displayed and \
9835 if not self._isatty:
9837 if time_delta < self._min_display_latency:
9840 self._last_display_time = current_time
9841 self._changed = False
9842 self._display_status()
9844 def _display_status(self):
9845 # Don't use len(self._completed_tasks) here since that also
9846 # can include uninstall tasks.
9847 curval_str = str(self.curval)
9848 maxval_str = str(self.maxval)
9849 running_str = str(self.running)
9850 failed_str = str(self.failed)
9851 load_avg_str = self._load_avg_str()
9853 color_output = StringIO()
9854 plain_output = StringIO()
9855 style_file = portage.output.ConsoleStyleFile(color_output)
9856 style_file.write_listener = plain_output
9857 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9858 style_writer.style_listener = style_file.new_styles
9859 f = formatter.AbstractFormatter(style_writer)
9861 number_style = "INFORM"
9862 f.add_literal_data("Jobs: ")
9863 f.push_style(number_style)
9864 f.add_literal_data(curval_str)
9866 f.add_literal_data(" of ")
9867 f.push_style(number_style)
9868 f.add_literal_data(maxval_str)
9870 f.add_literal_data(" complete")
9873 f.add_literal_data(", ")
9874 f.push_style(number_style)
9875 f.add_literal_data(running_str)
9877 f.add_literal_data(" running")
9880 f.add_literal_data(", ")
9881 f.push_style(number_style)
9882 f.add_literal_data(failed_str)
9884 f.add_literal_data(" failed")
9886 padding = self._jobs_column_width - len(plain_output.getvalue())
9888 f.add_literal_data(padding * " ")
9890 f.add_literal_data("Load avg: ")
9891 f.add_literal_data(load_avg_str)
9893 # Truncate to fit width, to avoid making the terminal scroll if the
9894 # line overflows (happens when the load average is large).
9895 plain_output = plain_output.getvalue()
9896 if self._isatty and len(plain_output) > self.width:
9897 # Use plain_output here since it's easier to truncate
9898 # properly than the color output which contains console
9900 self._update(plain_output[:self.width])
9902 self._update(color_output.getvalue())
9904 xtermTitle(" ".join(plain_output.split()))
9906 class Scheduler(PollScheduler):
9908 _opts_ignore_blockers = \
9909 frozenset(["--buildpkgonly",
9910 "--fetchonly", "--fetch-all-uri",
9911 "--nodeps", "--pretend"])
9913 _opts_no_background = \
9914 frozenset(["--pretend",
9915 "--fetchonly", "--fetch-all-uri"])
9917 _opts_no_restart = frozenset(["--buildpkgonly",
9918 "--fetchonly", "--fetch-all-uri", "--pretend"])
9920 _bad_resume_opts = set(["--ask", "--changelog",
9921 "--resume", "--skipfirst"])
9923 _fetch_log = "/var/log/emerge-fetch.log"
9925 class _iface_class(SlotObject):
9926 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9927 "dblinkElog", "fetch", "register", "schedule",
9928 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9931 class _fetch_iface_class(SlotObject):
9932 __slots__ = ("log_file", "schedule")
9934 _task_queues_class = slot_dict_class(
9935 ("merge", "jobs", "fetch", "unpack"), prefix="")
9937 class _build_opts_class(SlotObject):
9938 __slots__ = ("buildpkg", "buildpkgonly",
9939 "fetch_all_uri", "fetchonly", "pretend")
9941 class _binpkg_opts_class(SlotObject):
9942 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9944 class _pkg_count_class(SlotObject):
9945 __slots__ = ("curval", "maxval")
9947 class _emerge_log_class(SlotObject):
9948 __slots__ = ("xterm_titles",)
9950 def log(self, *pargs, **kwargs):
9951 if not self.xterm_titles:
9952 # Avoid interference with the scheduler's status display.
9953 kwargs.pop("short_msg", None)
9954 emergelog(self.xterm_titles, *pargs, **kwargs)
9956 class _failed_pkg(SlotObject):
9957 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9959 class _ConfigPool(object):
9960 """Interface for a task to temporarily allocate a config
9961 instance from a pool. This allows a task to be constructed
9962 long before the config instance actually becomes needed, like
9963 when prefetchers are constructed for the whole merge list."""
9964 __slots__ = ("_root", "_allocate", "_deallocate")
9965 def __init__(self, root, allocate, deallocate):
9967 self._allocate = allocate
9968 self._deallocate = deallocate
9970 return self._allocate(self._root)
9971 def deallocate(self, settings):
9972 self._deallocate(settings)
9974 class _unknown_internal_error(portage.exception.PortageException):
9976 Used internally to terminate scheduling. The specific reason for
9977 the failure should have been dumped to stderr.
9979 def __init__(self, value=""):
9980 portage.exception.PortageException.__init__(self, value)
9982 def __init__(self, settings, trees, mtimedb, myopts,
9983 spinner, mergelist, favorites, digraph):
9984 PollScheduler.__init__(self)
9985 self.settings = settings
9986 self.target_root = settings["ROOT"]
9988 self.myopts = myopts
9989 self._spinner = spinner
9990 self._mtimedb = mtimedb
9991 self._mergelist = mergelist
9992 self._favorites = favorites
9993 self._args_set = InternalPackageSet(favorites)
9994 self._build_opts = self._build_opts_class()
9995 for k in self._build_opts.__slots__:
9996 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9997 self._binpkg_opts = self._binpkg_opts_class()
9998 for k in self._binpkg_opts.__slots__:
9999 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10002 self._logger = self._emerge_log_class()
10003 self._task_queues = self._task_queues_class()
10004 for k in self._task_queues.allowed_keys:
10005 setattr(self._task_queues, k,
10006 SequentialTaskQueue())
10008 # Holds merges that will wait to be executed when no builds are
10009 # executing. This is useful for system packages since dependencies
10010 # on system packages are frequently unspecified.
10011 self._merge_wait_queue = []
10012 # Holds merges that have been transfered from the merge_wait_queue to
10013 # the actual merge queue. They are removed from this list upon
10014 # completion. Other packages can start building only when this list is
10016 self._merge_wait_scheduled = []
10018 # Holds system packages and their deep runtime dependencies. Before
10019 # being merged, these packages go to merge_wait_queue, to be merged
10020 # when no other packages are building.
10021 self._deep_system_deps = set()
10023 # Holds packages to merge which will satisfy currently unsatisfied
10024 # deep runtime dependencies of system packages. If this is not empty
10025 # then no parallel builds will be spawned until it is empty. This
10026 # minimizes the possibility that a build will fail due to the system
10027 # being in a fragile state. For example, see bug #259954.
10028 self._unsatisfied_system_deps = set()
10030 self._status_display = JobStatusDisplay()
10031 self._max_load = myopts.get("--load-average")
10032 max_jobs = myopts.get("--jobs")
10033 if max_jobs is None:
10035 self._set_max_jobs(max_jobs)
10037 # The root where the currently running
10038 # portage instance is installed.
10039 self._running_root = trees["/"]["root_config"]
10041 if settings.get("PORTAGE_DEBUG", "") == "1":
10043 self.pkgsettings = {}
10044 self._config_pool = {}
10045 self._blocker_db = {}
10047 self._config_pool[root] = []
10048 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10050 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10051 schedule=self._schedule_fetch)
10052 self._sched_iface = self._iface_class(
10053 dblinkEbuildPhase=self._dblink_ebuild_phase,
10054 dblinkDisplayMerge=self._dblink_display_merge,
10055 dblinkElog=self._dblink_elog,
10056 fetch=fetch_iface, register=self._register,
10057 schedule=self._schedule_wait,
10058 scheduleSetup=self._schedule_setup,
10059 scheduleUnpack=self._schedule_unpack,
10060 scheduleYield=self._schedule_yield,
10061 unregister=self._unregister)
10063 self._prefetchers = weakref.WeakValueDictionary()
10064 self._pkg_queue = []
10065 self._completed_tasks = set()
10067 self._failed_pkgs = []
10068 self._failed_pkgs_all = []
10069 self._failed_pkgs_die_msgs = []
10070 self._post_mod_echo_msgs = []
10071 self._parallel_fetch = False
10072 merge_count = len([x for x in mergelist \
10073 if isinstance(x, Package) and x.operation == "merge"])
10074 self._pkg_count = self._pkg_count_class(
10075 curval=0, maxval=merge_count)
10076 self._status_display.maxval = self._pkg_count.maxval
10078 # The load average takes some time to respond when new
10079 # jobs are added, so we need to limit the rate of adding
10081 self._job_delay_max = 10
10082 self._job_delay_factor = 1.0
10083 self._job_delay_exp = 1.5
10084 self._previous_job_start_time = None
10086 self._set_digraph(digraph)
10088 # This is used to memoize the _choose_pkg() result when
10089 # no packages can be chosen until one of the existing
10091 self._choose_pkg_return_early = False
10093 features = self.settings.features
10094 if "parallel-fetch" in features and \
10095 not ("--pretend" in self.myopts or \
10096 "--fetch-all-uri" in self.myopts or \
10097 "--fetchonly" in self.myopts):
10098 if "distlocks" not in features:
10099 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10100 portage.writemsg(red("!!!")+" parallel-fetching " + \
10101 "requires the distlocks feature enabled"+"\n",
10103 portage.writemsg(red("!!!")+" you have it disabled, " + \
10104 "thus parallel-fetching is being disabled"+"\n",
10106 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10107 elif len(mergelist) > 1:
10108 self._parallel_fetch = True
10110 if self._parallel_fetch:
10111 # clear out existing fetch log if it exists
10113 open(self._fetch_log, 'w')
10114 except EnvironmentError:
10117 self._running_portage = None
10118 portage_match = self._running_root.trees["vartree"].dbapi.match(
10119 portage.const.PORTAGE_PACKAGE_ATOM)
10121 cpv = portage_match.pop()
10122 self._running_portage = self._pkg(cpv, "installed",
10123 self._running_root, installed=True)
10125 def _poll(self, timeout=None):
10127 PollScheduler._poll(self, timeout=timeout)
10129 def _set_max_jobs(self, max_jobs):
10130 self._max_jobs = max_jobs
10131 self._task_queues.jobs.max_jobs = max_jobs
10133 def _background_mode(self):
10135 Check if background mode is enabled and adjust states as necessary.
10138 @returns: True if background mode is enabled, False otherwise.
10140 background = (self._max_jobs is True or \
10141 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10142 not bool(self._opts_no_background.intersection(self.myopts))
10145 interactive_tasks = self._get_interactive_tasks()
10146 if interactive_tasks:
10148 writemsg_level(">>> Sending package output to stdio due " + \
10149 "to interactive package(s):\n",
10150 level=logging.INFO, noiselevel=-1)
10152 for pkg in interactive_tasks:
10153 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10154 if pkg.root != "/":
10155 pkg_str += " for " + pkg.root
10156 msg.append(pkg_str)
10158 writemsg_level("".join("%s\n" % (l,) for l in msg),
10159 level=logging.INFO, noiselevel=-1)
10160 if self._max_jobs is True or self._max_jobs > 1:
10161 self._set_max_jobs(1)
10162 writemsg_level(">>> Setting --jobs=1 due " + \
10163 "to the above interactive package(s)\n",
10164 level=logging.INFO, noiselevel=-1)
10166 self._status_display.quiet = \
10167 not background or \
10168 ("--quiet" in self.myopts and \
10169 "--verbose" not in self.myopts)
10171 self._logger.xterm_titles = \
10172 "notitles" not in self.settings.features and \
10173 self._status_display.quiet
10177 def _get_interactive_tasks(self):
10178 from portage import flatten
10179 from portage.dep import use_reduce, paren_reduce
10180 interactive_tasks = []
10181 for task in self._mergelist:
10182 if not (isinstance(task, Package) and \
10183 task.operation == "merge"):
10186 properties = flatten(use_reduce(paren_reduce(
10187 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10188 except portage.exception.InvalidDependString, e:
10189 show_invalid_depstring_notice(task,
10190 task.metadata["PROPERTIES"], str(e))
10191 raise self._unknown_internal_error()
10192 if "interactive" in properties:
10193 interactive_tasks.append(task)
10194 return interactive_tasks
10196 def _set_digraph(self, digraph):
10197 if "--nodeps" in self.myopts or \
10198 (self._max_jobs is not True and self._max_jobs < 2):
10200 self._digraph = None
10203 self._digraph = digraph
10204 self._find_system_deps()
10205 self._prune_digraph()
10206 self._prevent_builddir_collisions()
10208 def _find_system_deps(self):
10210 Find system packages and their deep runtime dependencies. Before being
10211 merged, these packages go to merge_wait_queue, to be merged when no
10212 other packages are building.
10214 deep_system_deps = self._deep_system_deps
10215 deep_system_deps.clear()
10216 deep_system_deps.update(
10217 _find_deep_system_runtime_deps(self._digraph))
10218 deep_system_deps.difference_update([pkg for pkg in \
10219 deep_system_deps if pkg.operation != "merge"])
10221 def _prune_digraph(self):
10223 Prune any root nodes that are irrelevant.
10226 graph = self._digraph
10227 completed_tasks = self._completed_tasks
10228 removed_nodes = set()
10230 for node in graph.root_nodes():
10231 if not isinstance(node, Package) or \
10232 (node.installed and node.operation == "nomerge") or \
10234 node in completed_tasks:
10235 removed_nodes.add(node)
10237 graph.difference_update(removed_nodes)
10238 if not removed_nodes:
10240 removed_nodes.clear()
10242 def _prevent_builddir_collisions(self):
10244 When building stages, sometimes the same exact cpv needs to be merged
10245 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10246 in the builddir. Currently, normal file locks would be inappropriate
10247 for this purpose since emerge holds all of it's build dir locks from
10251 for pkg in self._mergelist:
10252 if not isinstance(pkg, Package):
10253 # a satisfied blocker
10257 if pkg.cpv not in cpv_map:
10258 cpv_map[pkg.cpv] = [pkg]
10260 for earlier_pkg in cpv_map[pkg.cpv]:
10261 self._digraph.add(earlier_pkg, pkg,
10262 priority=DepPriority(buildtime=True))
10263 cpv_map[pkg.cpv].append(pkg)
10265 class _pkg_failure(portage.exception.PortageException):
10267 An instance of this class is raised by unmerge() when
10268 an uninstallation fails.
10271 def __init__(self, *pargs):
10272 portage.exception.PortageException.__init__(self, pargs)
10274 self.status = pargs[0]
10276 def _schedule_fetch(self, fetcher):
10278 Schedule a fetcher on the fetch queue, in order to
10279 serialize access to the fetch log.
10281 self._task_queues.fetch.addFront(fetcher)
10283 def _schedule_setup(self, setup_phase):
10285 Schedule a setup phase on the merge queue, in order to
10286 serialize unsandboxed access to the live filesystem.
10288 self._task_queues.merge.addFront(setup_phase)
10291 def _schedule_unpack(self, unpack_phase):
10293 Schedule an unpack phase on the unpack queue, in order
10294 to serialize $DISTDIR access for live ebuilds.
10296 self._task_queues.unpack.add(unpack_phase)
10298 def _find_blockers(self, new_pkg):
10300 Returns a callable which should be called only when
10301 the vdb lock has been acquired.
10303 def get_blockers():
10304 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10305 return get_blockers
10307 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10308 if self._opts_ignore_blockers.intersection(self.myopts):
10311 # Call gc.collect() here to avoid heap overflow that
10312 # triggers 'Cannot allocate memory' errors (reported
10313 # with python-2.5).
10317 blocker_db = self._blocker_db[new_pkg.root]
10319 blocker_dblinks = []
10320 for blocking_pkg in blocker_db.findInstalledBlockers(
10321 new_pkg, acquire_lock=acquire_lock):
10322 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10324 if new_pkg.cpv == blocking_pkg.cpv:
10326 blocker_dblinks.append(portage.dblink(
10327 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10328 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10329 vartree=self.trees[blocking_pkg.root]["vartree"]))
10333 return blocker_dblinks
10335 def _dblink_pkg(self, pkg_dblink):
10336 cpv = pkg_dblink.mycpv
10337 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10338 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10339 installed = type_name == "installed"
10340 return self._pkg(cpv, type_name, root_config, installed=installed)
10342 def _append_to_log_path(self, log_path, msg):
10343 f = open(log_path, 'a')
10349 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10351 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10354 background = self._background
10356 if background and log_path is not None:
10357 log_file = open(log_path, 'a')
10362 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10364 if log_file is not None:
10367 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10368 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10369 background = self._background
10371 if log_path is None:
10372 if not (background and level < logging.WARN):
10373 portage.util.writemsg_level(msg,
10374 level=level, noiselevel=noiselevel)
10377 portage.util.writemsg_level(msg,
10378 level=level, noiselevel=noiselevel)
10379 self._append_to_log_path(log_path, msg)
10381 def _dblink_ebuild_phase(self,
10382 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10384 Using this callback for merge phases allows the scheduler
10385 to run while these phases execute asynchronously, and allows
10386 the scheduler control output handling.
10389 scheduler = self._sched_iface
10390 settings = pkg_dblink.settings
10391 pkg = self._dblink_pkg(pkg_dblink)
10392 background = self._background
10393 log_path = settings.get("PORTAGE_LOG_FILE")
10395 ebuild_phase = EbuildPhase(background=background,
10396 pkg=pkg, phase=phase, scheduler=scheduler,
10397 settings=settings, tree=pkg_dblink.treetype)
10398 ebuild_phase.start()
10399 ebuild_phase.wait()
10401 return ebuild_phase.returncode
10403 def _check_manifests(self):
10404 # Verify all the manifests now so that the user is notified of failure
10405 # as soon as possible.
10406 if "strict" not in self.settings.features or \
10407 "--fetchonly" in self.myopts or \
10408 "--fetch-all-uri" in self.myopts:
10411 shown_verifying_msg = False
10412 quiet_settings = {}
10413 for myroot, pkgsettings in self.pkgsettings.iteritems():
10414 quiet_config = portage.config(clone=pkgsettings)
10415 quiet_config["PORTAGE_QUIET"] = "1"
10416 quiet_config.backup_changes("PORTAGE_QUIET")
10417 quiet_settings[myroot] = quiet_config
10420 for x in self._mergelist:
10421 if not isinstance(x, Package) or \
10422 x.type_name != "ebuild":
10425 if not shown_verifying_msg:
10426 shown_verifying_msg = True
10427 self._status_msg("Verifying ebuild manifests")
10429 root_config = x.root_config
10430 portdb = root_config.trees["porttree"].dbapi
10431 quiet_config = quiet_settings[root_config.root]
10432 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10433 if not portage.digestcheck([], quiet_config, strict=True):
10438 def _add_prefetchers(self):
10440 if not self._parallel_fetch:
10443 if self._parallel_fetch:
10444 self._status_msg("Starting parallel fetch")
10446 prefetchers = self._prefetchers
10447 getbinpkg = "--getbinpkg" in self.myopts
10449 # In order to avoid "waiting for lock" messages
10450 # at the beginning, which annoy users, never
10451 # spawn a prefetcher for the first package.
10452 for pkg in self._mergelist[1:]:
10453 prefetcher = self._create_prefetcher(pkg)
10454 if prefetcher is not None:
10455 self._task_queues.fetch.add(prefetcher)
10456 prefetchers[pkg] = prefetcher
10458 def _create_prefetcher(self, pkg):
10460 @return: a prefetcher, or None if not applicable
10464 if not isinstance(pkg, Package):
10467 elif pkg.type_name == "ebuild":
10469 prefetcher = EbuildFetcher(background=True,
10470 config_pool=self._ConfigPool(pkg.root,
10471 self._allocate_config, self._deallocate_config),
10472 fetchonly=1, logfile=self._fetch_log,
10473 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10475 elif pkg.type_name == "binary" and \
10476 "--getbinpkg" in self.myopts and \
10477 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10479 prefetcher = BinpkgPrefetcher(background=True,
10480 pkg=pkg, scheduler=self._sched_iface)
10484 def _is_restart_scheduled(self):
10486 Check if the merge list contains a replacement
10487 for the current running instance, that will result
10488 in restart after merge.
10490 @returns: True if a restart is scheduled, False otherwise.
10492 if self._opts_no_restart.intersection(self.myopts):
10495 mergelist = self._mergelist
10497 for i, pkg in enumerate(mergelist):
10498 if self._is_restart_necessary(pkg) and \
10499 i != len(mergelist) - 1:
10504 def _is_restart_necessary(self, pkg):
10506 @return: True if merging the given package
10507 requires restart, False otherwise.
10510 # Figure out if we need a restart.
10511 if pkg.root == self._running_root.root and \
10512 portage.match_from_list(
10513 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10514 if self._running_portage:
10515 return pkg.cpv != self._running_portage.cpv
10519 def _restart_if_necessary(self, pkg):
10521 Use execv() to restart emerge. This happens
10522 if portage upgrades itself and there are
10523 remaining packages in the list.
10526 if self._opts_no_restart.intersection(self.myopts):
10529 if not self._is_restart_necessary(pkg):
10532 if pkg == self._mergelist[-1]:
10535 self._main_loop_cleanup()
10537 logger = self._logger
10538 pkg_count = self._pkg_count
10539 mtimedb = self._mtimedb
10540 bad_resume_opts = self._bad_resume_opts
10542 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10543 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10545 logger.log(" *** RESTARTING " + \
10546 "emerge via exec() after change of " + \
10547 "portage version.")
10549 mtimedb["resume"]["mergelist"].remove(list(pkg))
10551 portage.run_exitfuncs()
10552 mynewargv = [sys.argv[0], "--resume"]
10553 resume_opts = self.myopts.copy()
10554 # For automatic resume, we need to prevent
10555 # any of bad_resume_opts from leaking in
10556 # via EMERGE_DEFAULT_OPTS.
10557 resume_opts["--ignore-default-opts"] = True
10558 for myopt, myarg in resume_opts.iteritems():
10559 if myopt not in bad_resume_opts:
10561 mynewargv.append(myopt)
10563 mynewargv.append(myopt +"="+ str(myarg))
10564 # priority only needs to be adjusted on the first run
10565 os.environ["PORTAGE_NICENESS"] = "0"
10566 os.execv(mynewargv[0], mynewargv)
10570 if "--resume" in self.myopts:
10572 portage.writemsg_stdout(
10573 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10574 self._logger.log(" *** Resuming merge...")
10576 self._save_resume_list()
10579 self._background = self._background_mode()
10580 except self._unknown_internal_error:
10583 for root in self.trees:
10584 root_config = self.trees[root]["root_config"]
10586 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10587 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10588 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10589 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10590 if not tmpdir or not os.path.isdir(tmpdir):
10591 msg = "The directory specified in your " + \
10592 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10593 "does not exist. Please create this " + \
10594 "directory or correct your PORTAGE_TMPDIR setting."
10595 msg = textwrap.wrap(msg, 70)
10596 out = portage.output.EOutput()
10601 if self._background:
10602 root_config.settings.unlock()
10603 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10604 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10605 root_config.settings.lock()
10607 self.pkgsettings[root] = portage.config(
10608 clone=root_config.settings)
10610 rval = self._check_manifests()
10611 if rval != os.EX_OK:
10614 keep_going = "--keep-going" in self.myopts
10615 fetchonly = self._build_opts.fetchonly
10616 mtimedb = self._mtimedb
10617 failed_pkgs = self._failed_pkgs
10620 rval = self._merge()
10621 if rval == os.EX_OK or fetchonly or not keep_going:
10623 if "resume" not in mtimedb:
10625 mergelist = self._mtimedb["resume"].get("mergelist")
10629 if not failed_pkgs:
10632 for failed_pkg in failed_pkgs:
10633 mergelist.remove(list(failed_pkg.pkg))
10635 self._failed_pkgs_all.extend(failed_pkgs)
10641 if not self._calc_resume_list():
10644 clear_caches(self.trees)
10645 if not self._mergelist:
10648 self._save_resume_list()
10649 self._pkg_count.curval = 0
10650 self._pkg_count.maxval = len([x for x in self._mergelist \
10651 if isinstance(x, Package) and x.operation == "merge"])
10652 self._status_display.maxval = self._pkg_count.maxval
10654 self._logger.log(" *** Finished. Cleaning up...")
10657 self._failed_pkgs_all.extend(failed_pkgs)
10660 background = self._background
10661 failure_log_shown = False
10662 if background and len(self._failed_pkgs_all) == 1:
10663 # If only one package failed then just show it's
10664 # whole log for easy viewing.
10665 failed_pkg = self._failed_pkgs_all[-1]
10666 build_dir = failed_pkg.build_dir
10669 log_paths = [failed_pkg.build_log]
10671 log_path = self._locate_failure_log(failed_pkg)
10672 if log_path is not None:
10674 log_file = open(log_path)
10678 if log_file is not None:
10680 for line in log_file:
10681 writemsg_level(line, noiselevel=-1)
10684 failure_log_shown = True
10686 # Dump mod_echo output now since it tends to flood the terminal.
10687 # This allows us to avoid having more important output, generated
10688 # later, from being swept away by the mod_echo output.
10689 mod_echo_output = _flush_elog_mod_echo()
10691 if background and not failure_log_shown and \
10692 self._failed_pkgs_all and \
10693 self._failed_pkgs_die_msgs and \
10694 not mod_echo_output:
10696 printer = portage.output.EOutput()
10697 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10699 if mysettings["ROOT"] != "/":
10700 root_msg = " merged to %s" % mysettings["ROOT"]
10702 printer.einfo("Error messages for package %s%s:" % \
10703 (colorize("INFORM", key), root_msg))
10705 for phase in portage.const.EBUILD_PHASES:
10706 if phase not in logentries:
10708 for msgtype, msgcontent in logentries[phase]:
10709 if isinstance(msgcontent, basestring):
10710 msgcontent = [msgcontent]
10711 for line in msgcontent:
10712 printer.eerror(line.strip("\n"))
10714 if self._post_mod_echo_msgs:
10715 for msg in self._post_mod_echo_msgs:
10718 if len(self._failed_pkgs_all) > 1 or \
10719 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10720 if len(self._failed_pkgs_all) > 1:
10721 msg = "The following %d packages have " % \
10722 len(self._failed_pkgs_all) + \
10723 "failed to build or install:"
10725 msg = "The following package has " + \
10726 "failed to build or install:"
10727 prefix = bad(" * ")
10728 writemsg(prefix + "\n", noiselevel=-1)
10729 from textwrap import wrap
10730 for line in wrap(msg, 72):
10731 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10732 writemsg(prefix + "\n", noiselevel=-1)
10733 for failed_pkg in self._failed_pkgs_all:
10734 writemsg("%s\t%s\n" % (prefix,
10735 colorize("INFORM", str(failed_pkg.pkg))),
10737 writemsg(prefix + "\n", noiselevel=-1)
10741 def _elog_listener(self, mysettings, key, logentries, fulltext):
10742 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10744 self._failed_pkgs_die_msgs.append(
10745 (mysettings, key, errors))
10747 def _locate_failure_log(self, failed_pkg):
10749 build_dir = failed_pkg.build_dir
10752 log_paths = [failed_pkg.build_log]
10754 for log_path in log_paths:
10759 log_size = os.stat(log_path).st_size
10770 def _add_packages(self):
10771 pkg_queue = self._pkg_queue
10772 for pkg in self._mergelist:
10773 if isinstance(pkg, Package):
10774 pkg_queue.append(pkg)
10775 elif isinstance(pkg, Blocker):
10778 def _system_merge_started(self, merge):
10780 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10782 graph = self._digraph
10785 pkg = merge.merge.pkg
10786 completed_tasks = self._completed_tasks
10787 unsatisfied = self._unsatisfied_system_deps
10789 def ignore_non_runtime(priority):
10791 Ignore non-runtime priorities
10793 if isinstance(priority, DepPriority) and \
10794 (priority.runtime or priority.runtime_post):
10798 def ignore_satisfied_runtime(priority):
10800 Ignore non-runtime and satisfied runtime priorities.
10802 if isinstance(priority, DepPriority) and \
10803 not priority.satisfied and \
10804 (priority.runtime or priority.runtime_post):
10811 node = dep_stack.pop()
10812 if node in traversed:
10814 traversed.add(node)
10816 unsatisfied_runtime = set(graph.child_nodes(node,
10817 ignore_priority=ignore_satisfied_runtime))
10818 for child in graph.child_nodes(node,
10819 ignore_priority=ignore_non_runtime):
10820 if not isinstance(child, Package) or \
10821 child.operation == 'uninstall':
10825 if child.operation == 'merge' and \
10826 child in completed_tasks:
10827 # When traversing children, only traverse completed
10828 # 'merge' nodes since those are the only ones that need
10829 # to be checked for unsatisfied runtime deps, and it's
10830 # normal for nodes that aren't yet complete to have
10831 # unsatisfied runtime deps.
10832 dep_stack.append(child)
10833 if child.operation == 'merge' and \
10834 child not in completed_tasks and \
10835 child in unsatisfied_runtime:
10836 unsatisfied.add(child)
10838 def _merge_wait_exit_handler(self, task):
10839 self._merge_wait_scheduled.remove(task)
10840 self._merge_exit(task)
10842 def _merge_exit(self, merge):
10843 self._do_merge_exit(merge)
10844 self._deallocate_config(merge.merge.settings)
10845 if merge.returncode == os.EX_OK and \
10846 not merge.merge.pkg.installed:
10847 self._status_display.curval += 1
10848 self._status_display.merges = len(self._task_queues.merge)
10851 def _do_merge_exit(self, merge):
10852 pkg = merge.merge.pkg
10853 if merge.returncode != os.EX_OK:
10854 settings = merge.merge.settings
10855 build_dir = settings.get("PORTAGE_BUILDDIR")
10856 build_log = settings.get("PORTAGE_LOG_FILE")
10858 self._failed_pkgs.append(self._failed_pkg(
10859 build_dir=build_dir, build_log=build_log,
10861 returncode=merge.returncode))
10862 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10864 self._status_display.failed = len(self._failed_pkgs)
10867 self._task_complete(pkg)
10868 pkg_to_replace = merge.merge.pkg_to_replace
10869 if pkg_to_replace is not None:
10870 # When a package is replaced, mark it's uninstall
10871 # task complete (if any).
10872 uninst_hash_key = \
10873 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10874 self._task_complete(uninst_hash_key)
10879 self._restart_if_necessary(pkg)
10881 # Call mtimedb.commit() after each merge so that
10882 # --resume still works after being interrupted
10883 # by reboot, sigkill or similar.
10884 mtimedb = self._mtimedb
10885 mtimedb["resume"]["mergelist"].remove(list(pkg))
10886 if not mtimedb["resume"]["mergelist"]:
10887 del mtimedb["resume"]
10890 def _build_exit(self, build):
10891 if build.returncode == os.EX_OK:
10893 merge = PackageMerge(merge=build)
10894 if not build.build_opts.buildpkgonly and \
10895 build.pkg in self._deep_system_deps:
10896 # Since dependencies on system packages are frequently
10897 # unspecified, merge them only when no builds are executing.
10898 self._merge_wait_queue.append(merge)
10899 merge.addStartListener(self._system_merge_started)
10901 merge.addExitListener(self._merge_exit)
10902 self._task_queues.merge.add(merge)
10903 self._status_display.merges = len(self._task_queues.merge)
10905 settings = build.settings
10906 build_dir = settings.get("PORTAGE_BUILDDIR")
10907 build_log = settings.get("PORTAGE_LOG_FILE")
10909 self._failed_pkgs.append(self._failed_pkg(
10910 build_dir=build_dir, build_log=build_log,
10912 returncode=build.returncode))
10913 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10915 self._status_display.failed = len(self._failed_pkgs)
10916 self._deallocate_config(build.settings)
10918 self._status_display.running = self._jobs
10921 def _extract_exit(self, build):
10922 self._build_exit(build)
10924 def _task_complete(self, pkg):
10925 self._completed_tasks.add(pkg)
10926 self._unsatisfied_system_deps.discard(pkg)
10927 self._choose_pkg_return_early = False
10931 self._add_prefetchers()
10932 self._add_packages()
10933 pkg_queue = self._pkg_queue
10934 failed_pkgs = self._failed_pkgs
10935 portage.locks._quiet = self._background
10936 portage.elog._emerge_elog_listener = self._elog_listener
10942 self._main_loop_cleanup()
10943 portage.locks._quiet = False
10944 portage.elog._emerge_elog_listener = None
10946 rval = failed_pkgs[-1].returncode
10950 def _main_loop_cleanup(self):
10951 del self._pkg_queue[:]
10952 self._completed_tasks.clear()
10953 self._deep_system_deps.clear()
10954 self._unsatisfied_system_deps.clear()
10955 self._choose_pkg_return_early = False
10956 self._status_display.reset()
10957 self._digraph = None
10958 self._task_queues.fetch.clear()
10960 def _choose_pkg(self):
10962 Choose a task that has all it's dependencies satisfied.
10965 if self._choose_pkg_return_early:
10968 if self._digraph is None:
10969 if (self._jobs or self._task_queues.merge) and \
10970 not ("--nodeps" in self.myopts and \
10971 (self._max_jobs is True or self._max_jobs > 1)):
10972 self._choose_pkg_return_early = True
10974 return self._pkg_queue.pop(0)
10976 if not (self._jobs or self._task_queues.merge):
10977 return self._pkg_queue.pop(0)
10979 self._prune_digraph()
10982 later = set(self._pkg_queue)
10983 for pkg in self._pkg_queue:
10985 if not self._dependent_on_scheduled_merges(pkg, later):
10989 if chosen_pkg is not None:
10990 self._pkg_queue.remove(chosen_pkg)
10992 if chosen_pkg is None:
10993 # There's no point in searching for a package to
10994 # choose until at least one of the existing jobs
10996 self._choose_pkg_return_early = True
11000 def _dependent_on_scheduled_merges(self, pkg, later):
11002 Traverse the subgraph of the given packages deep dependencies
11003 to see if it contains any scheduled merges.
11004 @param pkg: a package to check dependencies for
11006 @param later: packages for which dependence should be ignored
11007 since they will be merged later than pkg anyway and therefore
11008 delaying the merge of pkg will not result in a more optimal
11012 @returns: True if the package is dependent, False otherwise.
11015 graph = self._digraph
11016 completed_tasks = self._completed_tasks
11019 traversed_nodes = set([pkg])
11020 direct_deps = graph.child_nodes(pkg)
11021 node_stack = direct_deps
11022 direct_deps = frozenset(direct_deps)
11024 node = node_stack.pop()
11025 if node in traversed_nodes:
11027 traversed_nodes.add(node)
11028 if not ((node.installed and node.operation == "nomerge") or \
11029 (node.operation == "uninstall" and \
11030 node not in direct_deps) or \
11031 node in completed_tasks or \
11035 node_stack.extend(graph.child_nodes(node))
11039 def _allocate_config(self, root):
11041 Allocate a unique config instance for a task in order
11042 to prevent interference between parallel tasks.
11044 if self._config_pool[root]:
11045 temp_settings = self._config_pool[root].pop()
11047 temp_settings = portage.config(clone=self.pkgsettings[root])
11048 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11049 # performance reasons, call it here to make sure all settings from the
11050 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11051 temp_settings.reload()
11052 temp_settings.reset()
11053 return temp_settings
11055 def _deallocate_config(self, settings):
11056 self._config_pool[settings["ROOT"]].append(settings)
11058 def _main_loop(self):
11060 # Only allow 1 job max if a restart is scheduled
11061 # due to portage update.
11062 if self._is_restart_scheduled() or \
11063 self._opts_no_background.intersection(self.myopts):
11064 self._set_max_jobs(1)
11066 merge_queue = self._task_queues.merge
11068 while self._schedule():
11069 if self._poll_event_handlers:
11074 if not (self._jobs or merge_queue):
11076 if self._poll_event_handlers:
11079 def _keep_scheduling(self):
11080 return bool(self._pkg_queue and \
11081 not (self._failed_pkgs and not self._build_opts.fetchonly))
11083 def _schedule_tasks(self):
11085 # When the number of jobs drops to zero, process all waiting merges.
11086 if not self._jobs and self._merge_wait_queue:
11087 for task in self._merge_wait_queue:
11088 task.addExitListener(self._merge_wait_exit_handler)
11089 self._task_queues.merge.add(task)
11090 self._status_display.merges = len(self._task_queues.merge)
11091 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11092 del self._merge_wait_queue[:]
11094 self._schedule_tasks_imp()
11095 self._status_display.display()
11098 for q in self._task_queues.values():
11102 # Cancel prefetchers if they're the only reason
11103 # the main poll loop is still running.
11104 if self._failed_pkgs and not self._build_opts.fetchonly and \
11105 not (self._jobs or self._task_queues.merge) and \
11106 self._task_queues.fetch:
11107 self._task_queues.fetch.clear()
11111 self._schedule_tasks_imp()
11112 self._status_display.display()
11114 return self._keep_scheduling()
11116 def _job_delay(self):
11119 @returns: True if job scheduling should be delayed, False otherwise.
11122 if self._jobs and self._max_load is not None:
11124 current_time = time.time()
11126 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11127 if delay > self._job_delay_max:
11128 delay = self._job_delay_max
11129 if (current_time - self._previous_job_start_time) < delay:
11134 def _schedule_tasks_imp(self):
11137 @returns: True if state changed, False otherwise.
11144 if not self._keep_scheduling():
11145 return bool(state_change)
11147 if self._choose_pkg_return_early or \
11148 self._merge_wait_scheduled or \
11149 (self._jobs and self._unsatisfied_system_deps) or \
11150 not self._can_add_job() or \
11152 return bool(state_change)
11154 pkg = self._choose_pkg()
11156 return bool(state_change)
11160 if not pkg.installed:
11161 self._pkg_count.curval += 1
11163 task = self._task(pkg)
11166 merge = PackageMerge(merge=task)
11167 merge.addExitListener(self._merge_exit)
11168 self._task_queues.merge.add(merge)
11172 self._previous_job_start_time = time.time()
11173 self._status_display.running = self._jobs
11174 task.addExitListener(self._extract_exit)
11175 self._task_queues.jobs.add(task)
11179 self._previous_job_start_time = time.time()
11180 self._status_display.running = self._jobs
11181 task.addExitListener(self._build_exit)
11182 self._task_queues.jobs.add(task)
11184 return bool(state_change)
11186 def _task(self, pkg):
11188 pkg_to_replace = None
11189 if pkg.operation != "uninstall":
11190 vardb = pkg.root_config.trees["vartree"].dbapi
11191 previous_cpv = vardb.match(pkg.slot_atom)
11193 previous_cpv = previous_cpv.pop()
11194 pkg_to_replace = self._pkg(previous_cpv,
11195 "installed", pkg.root_config, installed=True)
11197 task = MergeListItem(args_set=self._args_set,
11198 background=self._background, binpkg_opts=self._binpkg_opts,
11199 build_opts=self._build_opts,
11200 config_pool=self._ConfigPool(pkg.root,
11201 self._allocate_config, self._deallocate_config),
11202 emerge_opts=self.myopts,
11203 find_blockers=self._find_blockers(pkg), logger=self._logger,
11204 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11205 pkg_to_replace=pkg_to_replace,
11206 prefetcher=self._prefetchers.get(pkg),
11207 scheduler=self._sched_iface,
11208 settings=self._allocate_config(pkg.root),
11209 statusMessage=self._status_msg,
11210 world_atom=self._world_atom)
11214 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11215 pkg = failed_pkg.pkg
11216 msg = "%s to %s %s" % \
11217 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11218 if pkg.root != "/":
11219 msg += " %s %s" % (preposition, pkg.root)
11221 log_path = self._locate_failure_log(failed_pkg)
11222 if log_path is not None:
11223 msg += ", Log file:"
11224 self._status_msg(msg)
11226 if log_path is not None:
11227 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11229 def _status_msg(self, msg):
11231 Display a brief status message (no newlines) in the status display.
11232 This is called by tasks to provide feedback to the user. This
11233 delegates the resposibility of generating \r and \n control characters,
11234 to guarantee that lines are created or erased when necessary and
11238 @param msg: a brief status message (no newlines allowed)
11240 if not self._background:
11241 writemsg_level("\n")
11242 self._status_display.displayMessage(msg)
11244 def _save_resume_list(self):
11246 Do this before verifying the ebuild Manifests since it might
11247 be possible for the user to use --resume --skipfirst get past
11248 a non-essential package with a broken digest.
11250 mtimedb = self._mtimedb
11251 mtimedb["resume"]["mergelist"] = [list(x) \
11252 for x in self._mergelist \
11253 if isinstance(x, Package) and x.operation == "merge"]
11257 def _calc_resume_list(self):
11259 Use the current resume list to calculate a new one,
11260 dropping any packages with unsatisfied deps.
11262 @returns: True if successful, False otherwise.
11264 print colorize("GOOD", "*** Resuming merge...")
11266 if self._show_list():
11267 if "--tree" in self.myopts:
11268 portage.writemsg_stdout("\n" + \
11269 darkgreen("These are the packages that " + \
11270 "would be merged, in reverse order:\n\n"))
11273 portage.writemsg_stdout("\n" + \
11274 darkgreen("These are the packages that " + \
11275 "would be merged, in order:\n\n"))
11277 show_spinner = "--quiet" not in self.myopts and \
11278 "--nodeps" not in self.myopts
11281 print "Calculating dependencies ",
11283 myparams = create_depgraph_params(self.myopts, None)
11287 success, mydepgraph, dropped_tasks = resume_depgraph(
11288 self.settings, self.trees, self._mtimedb, self.myopts,
11289 myparams, self._spinner)
11290 except depgraph.UnsatisfiedResumeDep, exc:
11291 # rename variable to avoid python-3.0 error:
11292 # SyntaxError: can not delete variable 'e' referenced in nested
11295 mydepgraph = e.depgraph
11296 dropped_tasks = set()
11299 print "\b\b... done!"
11302 def unsatisfied_resume_dep_msg():
11303 mydepgraph.display_problems()
11304 out = portage.output.EOutput()
11305 out.eerror("One or more packages are either masked or " + \
11306 "have missing dependencies:")
11309 show_parents = set()
11310 for dep in e.value:
11311 if dep.parent in show_parents:
11313 show_parents.add(dep.parent)
11314 if dep.atom is None:
11315 out.eerror(indent + "Masked package:")
11316 out.eerror(2 * indent + str(dep.parent))
11319 out.eerror(indent + str(dep.atom) + " pulled in by:")
11320 out.eerror(2 * indent + str(dep.parent))
11322 msg = "The resume list contains packages " + \
11323 "that are either masked or have " + \
11324 "unsatisfied dependencies. " + \
11325 "Please restart/continue " + \
11326 "the operation manually, or use --skipfirst " + \
11327 "to skip the first package in the list and " + \
11328 "any other packages that may be " + \
11329 "masked or have missing dependencies."
11330 for line in textwrap.wrap(msg, 72):
11332 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11335 if success and self._show_list():
11336 mylist = mydepgraph.altlist()
11338 if "--tree" in self.myopts:
11340 mydepgraph.display(mylist, favorites=self._favorites)
11343 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11345 mydepgraph.display_problems()
11347 mylist = mydepgraph.altlist()
11348 mydepgraph.break_refs(mylist)
11349 mydepgraph.break_refs(dropped_tasks)
11350 self._mergelist = mylist
11351 self._set_digraph(mydepgraph.schedulerGraph())
11354 for task in dropped_tasks:
11355 if not (isinstance(task, Package) and task.operation == "merge"):
11358 msg = "emerge --keep-going:" + \
11360 if pkg.root != "/":
11361 msg += " for %s" % (pkg.root,)
11362 msg += " dropped due to unsatisfied dependency."
11363 for line in textwrap.wrap(msg, msg_width):
11364 eerror(line, phase="other", key=pkg.cpv)
11365 settings = self.pkgsettings[pkg.root]
11366 # Ensure that log collection from $T is disabled inside
11367 # elog_process(), since any logs that might exist are
11369 settings.pop("T", None)
11370 portage.elog.elog_process(pkg.cpv, settings)
11371 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11375 def _show_list(self):
11376 myopts = self.myopts
11377 if "--quiet" not in myopts and \
11378 ("--ask" in myopts or "--tree" in myopts or \
11379 "--verbose" in myopts):
11383 def _world_atom(self, pkg):
11385 Add the package to the world file, but only if
11386 it's supposed to be added. Otherwise, do nothing.
11389 if set(("--buildpkgonly", "--fetchonly",
11391 "--oneshot", "--onlydeps",
11392 "--pretend")).intersection(self.myopts):
11395 if pkg.root != self.target_root:
11398 args_set = self._args_set
11399 if not args_set.findAtomForPackage(pkg):
11402 logger = self._logger
11403 pkg_count = self._pkg_count
11404 root_config = pkg.root_config
11405 world_set = root_config.sets["world"]
11406 world_locked = False
11407 if hasattr(world_set, "lock"):
11409 world_locked = True
11412 if hasattr(world_set, "load"):
11413 world_set.load() # maybe it's changed on disk
11415 atom = create_world_atom(pkg, args_set, root_config)
11417 if hasattr(world_set, "add"):
11418 self._status_msg(('Recording %s in "world" ' + \
11419 'favorites file...') % atom)
11420 logger.log(" === (%s of %s) Updating world file (%s)" % \
11421 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11422 world_set.add(atom)
11424 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11425 (atom,), level=logging.WARN, noiselevel=-1)
11430 def _pkg(self, cpv, type_name, root_config, installed=False):
11432 Get a package instance from the cache, or create a new
11433 one if necessary. Raises KeyError from aux_get if it
11434 failures for some reason (package does not exist or is
11437 operation = "merge"
11439 operation = "nomerge"
11441 if self._digraph is not None:
11442 # Reuse existing instance when available.
11443 pkg = self._digraph.get(
11444 (type_name, root_config.root, cpv, operation))
11445 if pkg is not None:
11448 tree_type = depgraph.pkg_tree_map[type_name]
11449 db = root_config.trees[tree_type].dbapi
11450 db_keys = list(self.trees[root_config.root][
11451 tree_type].dbapi._aux_cache_keys)
11452 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11453 pkg = Package(cpv=cpv, metadata=metadata,
11454 root_config=root_config, installed=installed)
11455 if type_name == "ebuild":
11456 settings = self.pkgsettings[root_config.root]
11457 settings.setcpv(pkg)
11458 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11462 class MetadataRegen(PollScheduler):
11464 def __init__(self, portdb, max_jobs=None, max_load=None):
11465 PollScheduler.__init__(self)
11466 self._portdb = portdb
11468 if max_jobs is None:
11471 self._max_jobs = max_jobs
11472 self._max_load = max_load
11473 self._sched_iface = self._sched_iface_class(
11474 register=self._register,
11475 schedule=self._schedule_wait,
11476 unregister=self._unregister)
11478 self._valid_pkgs = set()
11479 self._process_iter = self._iter_metadata_processes()
11480 self.returncode = os.EX_OK
11481 self._error_count = 0
11483 def _iter_metadata_processes(self):
11484 portdb = self._portdb
11485 valid_pkgs = self._valid_pkgs
11486 every_cp = portdb.cp_all()
11487 every_cp.sort(reverse=True)
11490 cp = every_cp.pop()
11491 portage.writemsg_stdout("Processing %s\n" % cp)
11492 cpv_list = portdb.cp_list(cp)
11493 for cpv in cpv_list:
11494 valid_pkgs.add(cpv)
11495 ebuild_path, repo_path = portdb.findname2(cpv)
11496 metadata_process = portdb._metadata_process(
11497 cpv, ebuild_path, repo_path)
11498 if metadata_process is None:
11500 yield metadata_process
11504 portdb = self._portdb
11505 from portage.cache.cache_errors import CacheError
11508 for mytree in portdb.porttrees:
11510 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11511 except CacheError, e:
11512 portage.writemsg("Error listing cache entries for " + \
11513 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11518 while self._schedule():
11525 for y in self._valid_pkgs:
11526 for mytree in portdb.porttrees:
11527 if portdb.findname2(y, mytree=mytree)[0]:
11528 dead_nodes[mytree].discard(y)
11530 for mytree, nodes in dead_nodes.iteritems():
11531 auxdb = portdb.auxdb[mytree]
11535 except (KeyError, CacheError):
11538 def _schedule_tasks(self):
11541 @returns: True if there may be remaining tasks to schedule,
11544 while self._can_add_job():
11546 metadata_process = self._process_iter.next()
11547 except StopIteration:
11551 metadata_process.scheduler = self._sched_iface
11552 metadata_process.addExitListener(self._metadata_exit)
11553 metadata_process.start()
11556 def _metadata_exit(self, metadata_process):
11558 if metadata_process.returncode != os.EX_OK:
11559 self.returncode = 1
11560 self._error_count += 1
11561 self._valid_pkgs.discard(metadata_process.cpv)
11562 portage.writemsg("Error processing %s, continuing...\n" % \
11563 (metadata_process.cpv,))
11566 class UninstallFailure(portage.exception.PortageException):
11568 An instance of this class is raised by unmerge() when
11569 an uninstallation fails.
11572 def __init__(self, *pargs):
11573 portage.exception.PortageException.__init__(self, pargs)
11575 self.status = pargs[0]
11577 def unmerge(root_config, myopts, unmerge_action,
11578 unmerge_files, ldpath_mtimes, autoclean=0,
11579 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11580 scheduler=None, writemsg_level=portage.util.writemsg_level):
11582 quiet = "--quiet" in myopts
11583 settings = root_config.settings
11584 sets = root_config.sets
11585 vartree = root_config.trees["vartree"]
11586 candidate_catpkgs=[]
11588 xterm_titles = "notitles" not in settings.features
11589 out = portage.output.EOutput()
11591 db_keys = list(vartree.dbapi._aux_cache_keys)
11594 pkg = pkg_cache.get(cpv)
11596 pkg = Package(cpv=cpv, installed=True,
11597 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11598 root_config=root_config,
11599 type_name="installed")
11600 pkg_cache[cpv] = pkg
11603 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11605 # At least the parent needs to exist for the lock file.
11606 portage.util.ensure_dirs(vdb_path)
11607 except portage.exception.PortageException:
11611 if os.access(vdb_path, os.W_OK):
11612 vdb_lock = portage.locks.lockdir(vdb_path)
11613 realsyslist = sets["system"].getAtoms()
11615 for x in realsyslist:
11616 mycp = portage.dep_getkey(x)
11617 if mycp in settings.getvirtuals():
11619 for provider in settings.getvirtuals()[mycp]:
11620 if vartree.dbapi.match(provider):
11621 providers.append(provider)
11622 if len(providers) == 1:
11623 syslist.extend(providers)
11625 syslist.append(mycp)
11627 mysettings = portage.config(clone=settings)
11629 if not unmerge_files:
11630 if unmerge_action == "unmerge":
11632 print bold("emerge unmerge") + " can only be used with specific package names"
11638 localtree = vartree
11639 # process all arguments and add all
11640 # valid db entries to candidate_catpkgs
11642 if not unmerge_files:
11643 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11645 #we've got command-line arguments
11646 if not unmerge_files:
11647 print "\nNo packages to unmerge have been provided.\n"
11649 for x in unmerge_files:
11650 arg_parts = x.split('/')
11651 if x[0] not in [".","/"] and \
11652 arg_parts[-1][-7:] != ".ebuild":
11653 #possible cat/pkg or dep; treat as such
11654 candidate_catpkgs.append(x)
11655 elif unmerge_action in ["prune","clean"]:
11656 print "\n!!! Prune and clean do not accept individual" + \
11657 " ebuilds as arguments;\n skipping.\n"
11660 # it appears that the user is specifying an installed
11661 # ebuild and we're in "unmerge" mode, so it's ok.
11662 if not os.path.exists(x):
11663 print "\n!!! The path '"+x+"' doesn't exist.\n"
11666 absx = os.path.abspath(x)
11667 sp_absx = absx.split("/")
11668 if sp_absx[-1][-7:] == ".ebuild":
11670 absx = "/".join(sp_absx)
11672 sp_absx_len = len(sp_absx)
11674 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11675 vdb_len = len(vdb_path)
11677 sp_vdb = vdb_path.split("/")
11678 sp_vdb_len = len(sp_vdb)
11680 if not os.path.exists(absx+"/CONTENTS"):
11681 print "!!! Not a valid db dir: "+str(absx)
11684 if sp_absx_len <= sp_vdb_len:
11685 # The Path is shorter... so it can't be inside the vdb.
11688 print "\n!!!",x,"cannot be inside "+ \
11689 vdb_path+"; aborting.\n"
11692 for idx in range(0,sp_vdb_len):
11693 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11696 print "\n!!!", x, "is not inside "+\
11697 vdb_path+"; aborting.\n"
11700 print "="+"/".join(sp_absx[sp_vdb_len:])
11701 candidate_catpkgs.append(
11702 "="+"/".join(sp_absx[sp_vdb_len:]))
11705 if (not "--quiet" in myopts):
11707 if settings["ROOT"] != "/":
11708 writemsg_level(darkgreen(newline+ \
11709 ">>> Using system located in ROOT tree %s\n" % \
11712 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11713 not ("--quiet" in myopts):
11714 writemsg_level(darkgreen(newline+\
11715 ">>> These are the packages that would be unmerged:\n"))
11717 # Preservation of order is required for --depclean and --prune so
11718 # that dependencies are respected. Use all_selected to eliminate
11719 # duplicate packages since the same package may be selected by
11722 all_selected = set()
11723 for x in candidate_catpkgs:
11724 # cycle through all our candidate deps and determine
11725 # what will and will not get unmerged
11727 mymatch = vartree.dbapi.match(x)
11728 except portage.exception.AmbiguousPackageName, errpkgs:
11729 print "\n\n!!! The short ebuild name \"" + \
11730 x + "\" is ambiguous. Please specify"
11731 print "!!! one of the following fully-qualified " + \
11732 "ebuild names instead:\n"
11733 for i in errpkgs[0]:
11734 print " " + green(i)
11738 if not mymatch and x[0] not in "<>=~":
11739 mymatch = localtree.dep_match(x)
11741 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11742 (x, unmerge_action), noiselevel=-1)
11746 {"protected": set(), "selected": set(), "omitted": set()})
11747 mykey = len(pkgmap) - 1
11748 if unmerge_action=="unmerge":
11750 if y not in all_selected:
11751 pkgmap[mykey]["selected"].add(y)
11752 all_selected.add(y)
11753 elif unmerge_action == "prune":
11754 if len(mymatch) == 1:
11756 best_version = mymatch[0]
11757 best_slot = vartree.getslot(best_version)
11758 best_counter = vartree.dbapi.cpv_counter(best_version)
11759 for mypkg in mymatch[1:]:
11760 myslot = vartree.getslot(mypkg)
11761 mycounter = vartree.dbapi.cpv_counter(mypkg)
11762 if (myslot == best_slot and mycounter > best_counter) or \
11763 mypkg == portage.best([mypkg, best_version]):
11764 if myslot == best_slot:
11765 if mycounter < best_counter:
11766 # On slot collision, keep the one with the
11767 # highest counter since it is the most
11768 # recently installed.
11770 best_version = mypkg
11772 best_counter = mycounter
11773 pkgmap[mykey]["protected"].add(best_version)
11774 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11775 if mypkg != best_version and mypkg not in all_selected)
11776 all_selected.update(pkgmap[mykey]["selected"])
11778 # unmerge_action == "clean"
11780 for mypkg in mymatch:
11781 if unmerge_action == "clean":
11782 myslot = localtree.getslot(mypkg)
11784 # since we're pruning, we don't care about slots
11785 # and put all the pkgs in together
11787 if myslot not in slotmap:
11788 slotmap[myslot] = {}
11789 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11791 for mypkg in vartree.dbapi.cp_list(
11792 portage.dep_getkey(mymatch[0])):
11793 myslot = vartree.getslot(mypkg)
11794 if myslot not in slotmap:
11795 slotmap[myslot] = {}
11796 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11798 for myslot in slotmap:
11799 counterkeys = slotmap[myslot].keys()
11800 if not counterkeys:
11803 pkgmap[mykey]["protected"].add(
11804 slotmap[myslot][counterkeys[-1]])
11805 del counterkeys[-1]
11807 for counter in counterkeys[:]:
11808 mypkg = slotmap[myslot][counter]
11809 if mypkg not in mymatch:
11810 counterkeys.remove(counter)
11811 pkgmap[mykey]["protected"].add(
11812 slotmap[myslot][counter])
11814 #be pretty and get them in order of merge:
11815 for ckey in counterkeys:
11816 mypkg = slotmap[myslot][ckey]
11817 if mypkg not in all_selected:
11818 pkgmap[mykey]["selected"].add(mypkg)
11819 all_selected.add(mypkg)
11820 # ok, now the last-merged package
11821 # is protected, and the rest are selected
11822 numselected = len(all_selected)
11823 if global_unmerge and not numselected:
11824 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11827 if not numselected:
11828 portage.writemsg_stdout(
11829 "\n>>> No packages selected for removal by " + \
11830 unmerge_action + "\n")
11834 vartree.dbapi.flush_cache()
11835 portage.locks.unlockdir(vdb_lock)
11837 from portage.sets.base import EditablePackageSet
11839 # generate a list of package sets that are directly or indirectly listed in "world",
11840 # as there is no persistent list of "installed" sets
11841 installed_sets = ["world"]
11846 pos = len(installed_sets)
11847 for s in installed_sets[pos - 1:]:
11850 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11853 installed_sets += candidates
11854 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11857 # we don't want to unmerge packages that are still listed in user-editable package sets
11858 # listed in "world" as they would be remerged on the next update of "world" or the
11859 # relevant package sets.
11860 unknown_sets = set()
11861 for cp in xrange(len(pkgmap)):
11862 for cpv in pkgmap[cp]["selected"].copy():
11866 # It could have been uninstalled
11867 # by a concurrent process.
11870 if unmerge_action != "clean" and \
11871 root_config.root == "/" and \
11872 portage.match_from_list(
11873 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11874 msg = ("Not unmerging package %s since there is no valid " + \
11875 "reason for portage to unmerge itself.") % (pkg.cpv,)
11876 for line in textwrap.wrap(msg, 75):
11878 # adjust pkgmap so the display output is correct
11879 pkgmap[cp]["selected"].remove(cpv)
11880 all_selected.remove(cpv)
11881 pkgmap[cp]["protected"].add(cpv)
11885 for s in installed_sets:
11886 # skip sets that the user requested to unmerge, and skip world
11887 # unless we're unmerging a package set (as the package would be
11888 # removed from "world" later on)
11889 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11893 if s in unknown_sets:
11895 unknown_sets.add(s)
11896 out = portage.output.EOutput()
11897 out.eerror(("Unknown set '@%s' in " + \
11898 "%svar/lib/portage/world_sets") % \
11899 (s, root_config.root))
11902 # only check instances of EditablePackageSet as other classes are generally used for
11903 # special purposes and can be ignored here (and are usually generated dynamically, so the
11904 # user can't do much about them anyway)
11905 if isinstance(sets[s], EditablePackageSet):
11907 # This is derived from a snippet of code in the
11908 # depgraph._iter_atoms_for_pkg() method.
11909 for atom in sets[s].iterAtomsForPackage(pkg):
11910 inst_matches = vartree.dbapi.match(atom)
11911 inst_matches.reverse() # descending order
11913 for inst_cpv in inst_matches:
11915 inst_pkg = _pkg(inst_cpv)
11917 # It could have been uninstalled
11918 # by a concurrent process.
11921 if inst_pkg.cp != atom.cp:
11923 if pkg >= inst_pkg:
11924 # This is descending order, and we're not
11925 # interested in any versions <= pkg given.
11927 if pkg.slot_atom != inst_pkg.slot_atom:
11928 higher_slot = inst_pkg
11930 if higher_slot is None:
11934 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11935 #print colorize("WARN", "but still listed in the following package sets:")
11936 #print " %s\n" % ", ".join(parents)
11937 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11938 print colorize("WARN", "still referenced by the following package sets:")
11939 print " %s\n" % ", ".join(parents)
11940 # adjust pkgmap so the display output is correct
11941 pkgmap[cp]["selected"].remove(cpv)
11942 all_selected.remove(cpv)
11943 pkgmap[cp]["protected"].add(cpv)
11947 numselected = len(all_selected)
11948 if not numselected:
11950 "\n>>> No packages selected for removal by " + \
11951 unmerge_action + "\n")
11954 # Unmerge order only matters in some cases
11958 selected = d["selected"]
11961 cp = portage.cpv_getkey(iter(selected).next())
11962 cp_dict = unordered.get(cp)
11963 if cp_dict is None:
11965 unordered[cp] = cp_dict
11968 for k, v in d.iteritems():
11969 cp_dict[k].update(v)
11970 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11972 for x in xrange(len(pkgmap)):
11973 selected = pkgmap[x]["selected"]
11976 for mytype, mylist in pkgmap[x].iteritems():
11977 if mytype == "selected":
11979 mylist.difference_update(all_selected)
11980 cp = portage.cpv_getkey(iter(selected).next())
11981 for y in localtree.dep_match(cp):
11982 if y not in pkgmap[x]["omitted"] and \
11983 y not in pkgmap[x]["selected"] and \
11984 y not in pkgmap[x]["protected"] and \
11985 y not in all_selected:
11986 pkgmap[x]["omitted"].add(y)
11987 if global_unmerge and not pkgmap[x]["selected"]:
11988 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11990 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11991 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11992 "'%s' is part of your system profile.\n" % cp),
11993 level=logging.WARNING, noiselevel=-1)
11994 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11995 "be damaging to your system.\n\n"),
11996 level=logging.WARNING, noiselevel=-1)
11997 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11998 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11999 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12001 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12003 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12004 for mytype in ["selected","protected","omitted"]:
12006 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12007 if pkgmap[x][mytype]:
12008 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12009 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12010 for pn, ver, rev in sorted_pkgs:
12014 myversion = ver + "-" + rev
12015 if mytype == "selected":
12017 colorize("UNMERGE_WARN", myversion + " "),
12021 colorize("GOOD", myversion + " "), noiselevel=-1)
12023 writemsg_level("none ", noiselevel=-1)
12025 writemsg_level("\n", noiselevel=-1)
12027 writemsg_level("\n", noiselevel=-1)
12029 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12030 " packages are slated for removal.\n")
12031 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12032 " and " + colorize("GOOD", "'omitted'") + \
12033 " packages will not be removed.\n\n")
12035 if "--pretend" in myopts:
12036 #we're done... return
12038 if "--ask" in myopts:
12039 if userquery("Would you like to unmerge these packages?")=="No":
12040 # enter pretend mode for correct formatting of results
12041 myopts["--pretend"] = True
12046 #the real unmerging begins, after a short delay....
12047 if clean_delay and not autoclean:
12048 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12050 for x in xrange(len(pkgmap)):
12051 for y in pkgmap[x]["selected"]:
12052 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12053 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12054 mysplit = y.split("/")
12056 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12057 mysettings, unmerge_action not in ["clean","prune"],
12058 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12059 scheduler=scheduler)
12061 if retval != os.EX_OK:
12062 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12064 raise UninstallFailure(retval)
12067 if clean_world and hasattr(sets["world"], "cleanPackage"):
12068 sets["world"].cleanPackage(vartree.dbapi, y)
12069 emergelog(xterm_titles, " >>> unmerge success: "+y)
12070 if clean_world and hasattr(sets["world"], "remove"):
12071 for s in root_config.setconfig.active:
12072 sets["world"].remove(SETPREFIX+s)
12075 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12077 if os.path.exists("/usr/bin/install-info"):
12078 out = portage.output.EOutput()
12083 inforoot=normpath(root+z)
12084 if os.path.isdir(inforoot):
12085 infomtime = long(os.stat(inforoot).st_mtime)
12086 if inforoot not in prev_mtimes or \
12087 prev_mtimes[inforoot] != infomtime:
12088 regen_infodirs.append(inforoot)
12090 if not regen_infodirs:
12091 portage.writemsg_stdout("\n")
12092 out.einfo("GNU info directory index is up-to-date.")
12094 portage.writemsg_stdout("\n")
12095 out.einfo("Regenerating GNU info directory index...")
12097 dir_extensions = ("", ".gz", ".bz2")
12101 for inforoot in regen_infodirs:
12105 if not os.path.isdir(inforoot) or \
12106 not os.access(inforoot, os.W_OK):
12109 file_list = os.listdir(inforoot)
12111 dir_file = os.path.join(inforoot, "dir")
12112 moved_old_dir = False
12113 processed_count = 0
12114 for x in file_list:
12115 if x.startswith(".") or \
12116 os.path.isdir(os.path.join(inforoot, x)):
12118 if x.startswith("dir"):
12120 for ext in dir_extensions:
12121 if x == "dir" + ext or \
12122 x == "dir" + ext + ".old":
12127 if processed_count == 0:
12128 for ext in dir_extensions:
12130 os.rename(dir_file + ext, dir_file + ext + ".old")
12131 moved_old_dir = True
12132 except EnvironmentError, e:
12133 if e.errno != errno.ENOENT:
12136 processed_count += 1
12137 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12138 existsstr="already exists, for file `"
12140 if re.search(existsstr,myso):
12141 # Already exists... Don't increment the count for this.
12143 elif myso[:44]=="install-info: warning: no info dir entry in ":
12144 # This info file doesn't contain a DIR-header: install-info produces this
12145 # (harmless) warning (the --quiet switch doesn't seem to work).
12146 # Don't increment the count for this.
12149 badcount=badcount+1
12150 errmsg += myso + "\n"
12153 if moved_old_dir and not os.path.exists(dir_file):
12154 # We didn't generate a new dir file, so put the old file
12155 # back where it was originally found.
12156 for ext in dir_extensions:
12158 os.rename(dir_file + ext + ".old", dir_file + ext)
12159 except EnvironmentError, e:
12160 if e.errno != errno.ENOENT:
12164 # Clean dir.old cruft so that they don't prevent
12165 # unmerge of otherwise empty directories.
12166 for ext in dir_extensions:
12168 os.unlink(dir_file + ext + ".old")
12169 except EnvironmentError, e:
12170 if e.errno != errno.ENOENT:
12174 #update mtime so we can potentially avoid regenerating.
12175 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12178 out.eerror("Processed %d info files; %d errors." % \
12179 (icount, badcount))
12180 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12183 out.einfo("Processed %d info files." % (icount,))
12186 def display_news_notification(root_config, myopts):
12187 target_root = root_config.root
12188 trees = root_config.trees
12189 settings = trees["vartree"].settings
12190 portdb = trees["porttree"].dbapi
12191 vardb = trees["vartree"].dbapi
12192 NEWS_PATH = os.path.join("metadata", "news")
12193 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12194 newsReaderDisplay = False
12195 update = "--pretend" not in myopts
12197 for repo in portdb.getRepositories():
12198 unreadItems = checkUpdatedNewsItems(
12199 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12201 if not newsReaderDisplay:
12202 newsReaderDisplay = True
12204 print colorize("WARN", " * IMPORTANT:"),
12205 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12208 if newsReaderDisplay:
12209 print colorize("WARN", " *"),
12210 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12213 def display_preserved_libs(vardbapi):
12216 # Ensure the registry is consistent with existing files.
12217 vardbapi.plib_registry.pruneNonExisting()
12219 if vardbapi.plib_registry.hasEntries():
12221 print colorize("WARN", "!!!") + " existing preserved libs:"
12222 plibdata = vardbapi.plib_registry.getPreservedLibs()
12223 linkmap = vardbapi.linkmap
12226 linkmap_broken = False
12230 except portage.exception.CommandNotFound, e:
12231 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12232 level=logging.ERROR, noiselevel=-1)
12234 linkmap_broken = True
12236 search_for_owners = set()
12237 for cpv in plibdata:
12238 internal_plib_keys = set(linkmap._obj_key(f) \
12239 for f in plibdata[cpv])
12240 for f in plibdata[cpv]:
12241 if f in consumer_map:
12244 for c in linkmap.findConsumers(f):
12245 # Filter out any consumers that are also preserved libs
12246 # belonging to the same package as the provider.
12247 if linkmap._obj_key(c) not in internal_plib_keys:
12248 consumers.append(c)
12250 consumer_map[f] = consumers
12251 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12253 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12255 for cpv in plibdata:
12256 print colorize("WARN", ">>>") + " package: %s" % cpv
12258 for f in plibdata[cpv]:
12259 obj_key = linkmap._obj_key(f)
12260 alt_paths = samefile_map.get(obj_key)
12261 if alt_paths is None:
12263 samefile_map[obj_key] = alt_paths
12266 for alt_paths in samefile_map.itervalues():
12267 alt_paths = sorted(alt_paths)
12268 for p in alt_paths:
12269 print colorize("WARN", " * ") + " - %s" % (p,)
12271 consumers = consumer_map.get(f, [])
12272 for c in consumers[:MAX_DISPLAY]:
12273 print colorize("WARN", " * ") + " used by %s (%s)" % \
12274 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12275 if len(consumers) == MAX_DISPLAY + 1:
12276 print colorize("WARN", " * ") + " used by %s (%s)" % \
12277 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12278 for x in owners.get(consumers[MAX_DISPLAY], [])))
12279 elif len(consumers) > MAX_DISPLAY:
12280 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12281 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12284 def _flush_elog_mod_echo():
12286 Dump the mod_echo output now so that our other
12287 notifications are shown last.
12289 @returns: True if messages were shown, False otherwise.
12291 messages_shown = False
12293 from portage.elog import mod_echo
12294 except ImportError:
12295 pass # happens during downgrade to a version without the module
12297 messages_shown = bool(mod_echo._items)
12298 mod_echo.finalize()
12299 return messages_shown
12301 def post_emerge(root_config, myopts, mtimedb, retval):
12303 Misc. things to run at the end of a merge session.
12306 Update Config Files
12309 Display preserved libs warnings
12312 @param trees: A dictionary mapping each ROOT to it's package databases
12314 @param mtimedb: The mtimeDB to store data needed across merge invocations
12315 @type mtimedb: MtimeDB class instance
12316 @param retval: Emerge's return value
12320 1. Calls sys.exit(retval)
12323 target_root = root_config.root
12324 trees = { target_root : root_config.trees }
12325 vardbapi = trees[target_root]["vartree"].dbapi
12326 settings = vardbapi.settings
12327 info_mtimes = mtimedb["info"]
12329 # Load the most current variables from ${ROOT}/etc/profile.env
12332 settings.regenerate()
12335 config_protect = settings.get("CONFIG_PROTECT","").split()
12336 infodirs = settings.get("INFOPATH","").split(":") + \
12337 settings.get("INFODIR","").split(":")
12341 if retval == os.EX_OK:
12342 exit_msg = " *** exiting successfully."
12344 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12345 emergelog("notitles" not in settings.features, exit_msg)
12347 _flush_elog_mod_echo()
12349 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12350 if "--pretend" in myopts or (counter_hash is not None and \
12351 counter_hash == vardbapi._counter_hash()):
12352 display_news_notification(root_config, myopts)
12353 # If vdb state has not changed then there's nothing else to do.
12356 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12357 portage.util.ensure_dirs(vdb_path)
12359 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12360 vdb_lock = portage.locks.lockdir(vdb_path)
12364 if "noinfo" not in settings.features:
12365 chk_updated_info_files(target_root,
12366 infodirs, info_mtimes, retval)
12370 portage.locks.unlockdir(vdb_lock)
12372 chk_updated_cfg_files(target_root, config_protect)
12374 display_news_notification(root_config, myopts)
12375 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12376 display_preserved_libs(vardbapi)
12381 def chk_updated_cfg_files(target_root, config_protect):
12383 #number of directories with some protect files in them
12385 for x in config_protect:
12386 x = os.path.join(target_root, x.lstrip(os.path.sep))
12387 if not os.access(x, os.W_OK):
12388 # Avoid Permission denied errors generated
12392 mymode = os.lstat(x).st_mode
12395 if stat.S_ISLNK(mymode):
12396 # We want to treat it like a directory if it
12397 # is a symlink to an existing directory.
12399 real_mode = os.stat(x).st_mode
12400 if stat.S_ISDIR(real_mode):
12404 if stat.S_ISDIR(mymode):
12405 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12407 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12408 os.path.split(x.rstrip(os.path.sep))
12409 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12410 a = commands.getstatusoutput(mycommand)
12412 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12414 # Show the error message alone, sending stdout to /dev/null.
12415 os.system(mycommand + " 1>/dev/null")
12417 files = a[1].split('\0')
12418 # split always produces an empty string as the last element
12419 if files and not files[-1]:
12423 print "\n"+colorize("WARN", " * IMPORTANT:"),
12424 if stat.S_ISDIR(mymode):
12425 print "%d config files in '%s' need updating." % \
12428 print "config file '%s' needs updating." % x
12431 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12432 " section of the " + bold("emerge")
12433 print " "+yellow("*")+" man page to learn how to update config files."
12435 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12438 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12439 Returns the number of unread (yet relevent) items.
12441 @param portdb: a portage tree database
12442 @type portdb: pordbapi
12443 @param vardb: an installed package database
12444 @type vardb: vardbapi
12447 @param UNREAD_PATH:
12453 1. The number of unread but relevant news items.
12456 from portage.news import NewsManager
12457 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12458 return manager.getUnreadItems( repo_id, update=update )
12460 def insert_category_into_atom(atom, category):
12461 alphanum = re.search(r'\w', atom)
12463 ret = atom[:alphanum.start()] + "%s/" % category + \
12464 atom[alphanum.start():]
12469 def is_valid_package_atom(x):
12471 alphanum = re.search(r'\w', x)
12473 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12474 return portage.isvalidatom(x)
12476 def show_blocker_docs_link():
12478 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12479 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12481 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12484 def show_mask_docs():
12485 print "For more information, see the MASKED PACKAGES section in the emerge"
12486 print "man page or refer to the Gentoo Handbook."
12488 def action_sync(settings, trees, mtimedb, myopts, myaction):
12489 xterm_titles = "notitles" not in settings.features
12490 emergelog(xterm_titles, " === sync")
12491 myportdir = settings.get("PORTDIR", None)
12492 out = portage.output.EOutput()
12494 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12496 if myportdir[-1]=="/":
12497 myportdir=myportdir[:-1]
12499 st = os.stat(myportdir)
12503 print ">>>",myportdir,"not found, creating it."
12504 os.makedirs(myportdir,0755)
12505 st = os.stat(myportdir)
12508 spawn_kwargs["env"] = settings.environ()
12509 if 'usersync' in settings.features and \
12510 portage.data.secpass >= 2 and \
12511 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12512 st.st_gid != os.getgid() and st.st_mode & 0070):
12514 homedir = pwd.getpwuid(st.st_uid).pw_dir
12518 # Drop privileges when syncing, in order to match
12519 # existing uid/gid settings.
12520 spawn_kwargs["uid"] = st.st_uid
12521 spawn_kwargs["gid"] = st.st_gid
12522 spawn_kwargs["groups"] = [st.st_gid]
12523 spawn_kwargs["env"]["HOME"] = homedir
12525 if not st.st_mode & 0020:
12526 umask = umask | 0020
12527 spawn_kwargs["umask"] = umask
12529 syncuri = settings.get("SYNC", "").strip()
12531 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12532 noiselevel=-1, level=logging.ERROR)
12535 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12536 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12539 dosyncuri = syncuri
12540 updatecache_flg = False
12541 if myaction == "metadata":
12542 print "skipping sync"
12543 updatecache_flg = True
12544 elif ".git" in vcs_dirs:
12545 # Update existing git repository, and ignore the syncuri. We are
12546 # going to trust the user and assume that the user is in the branch
12547 # that he/she wants updated. We'll let the user manage branches with
12549 if portage.process.find_binary("git") is None:
12550 msg = ["Command not found: git",
12551 "Type \"emerge dev-util/git\" to enable git support."]
12553 writemsg_level("!!! %s\n" % l,
12554 level=logging.ERROR, noiselevel=-1)
12556 msg = ">>> Starting git pull in %s..." % myportdir
12557 emergelog(xterm_titles, msg )
12558 writemsg_level(msg + "\n")
12559 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12560 (portage._shell_quote(myportdir),), **spawn_kwargs)
12561 if exitcode != os.EX_OK:
12562 msg = "!!! git pull error in %s." % myportdir
12563 emergelog(xterm_titles, msg)
12564 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12566 msg = ">>> Git pull in %s successful" % myportdir
12567 emergelog(xterm_titles, msg)
12568 writemsg_level(msg + "\n")
12569 exitcode = git_sync_timestamps(settings, myportdir)
12570 if exitcode == os.EX_OK:
12571 updatecache_flg = True
12572 elif syncuri[:8]=="rsync://":
12573 for vcs_dir in vcs_dirs:
12574 writemsg_level(("!!! %s appears to be under revision " + \
12575 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12576 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12578 if not os.path.exists("/usr/bin/rsync"):
12579 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12580 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12585 if settings["PORTAGE_RSYNC_OPTS"] == "":
12586 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12587 rsync_opts.extend([
12588 "--recursive", # Recurse directories
12589 "--links", # Consider symlinks
12590 "--safe-links", # Ignore links outside of tree
12591 "--perms", # Preserve permissions
12592 "--times", # Preserive mod times
12593 "--compress", # Compress the data transmitted
12594 "--force", # Force deletion on non-empty dirs
12595 "--whole-file", # Don't do block transfers, only entire files
12596 "--delete", # Delete files that aren't in the master tree
12597 "--stats", # Show final statistics about what was transfered
12598 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12599 "--exclude=/distfiles", # Exclude distfiles from consideration
12600 "--exclude=/local", # Exclude local from consideration
12601 "--exclude=/packages", # Exclude packages from consideration
12605 # The below validation is not needed when using the above hardcoded
12608 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12610 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12611 for opt in ("--recursive", "--times"):
12612 if opt not in rsync_opts:
12613 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12614 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12615 rsync_opts.append(opt)
12617 for exclude in ("distfiles", "local", "packages"):
12618 opt = "--exclude=/%s" % exclude
12619 if opt not in rsync_opts:
12620 portage.writemsg(yellow("WARNING:") + \
12621 " adding required option %s not included in " % opt + \
12622 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12623 rsync_opts.append(opt)
12625 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12626 def rsync_opt_startswith(opt_prefix):
12627 for x in rsync_opts:
12628 if x.startswith(opt_prefix):
12632 if not rsync_opt_startswith("--timeout="):
12633 rsync_opts.append("--timeout=%d" % mytimeout)
12635 for opt in ("--compress", "--whole-file"):
12636 if opt not in rsync_opts:
12637 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12638 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12639 rsync_opts.append(opt)
12641 if "--quiet" in myopts:
12642 rsync_opts.append("--quiet") # Shut up a lot
12644 rsync_opts.append("--verbose") # Print filelist
12646 if "--verbose" in myopts:
12647 rsync_opts.append("--progress") # Progress meter for each file
12649 if "--debug" in myopts:
12650 rsync_opts.append("--checksum") # Force checksum on all files
12652 # Real local timestamp file.
12653 servertimestampfile = os.path.join(
12654 myportdir, "metadata", "timestamp.chk")
12656 content = portage.util.grabfile(servertimestampfile)
12660 mytimestamp = time.mktime(time.strptime(content[0],
12661 "%a, %d %b %Y %H:%M:%S +0000"))
12662 except (OverflowError, ValueError):
12667 rsync_initial_timeout = \
12668 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12670 rsync_initial_timeout = 15
12673 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12674 except SystemExit, e:
12675 raise # Needed else can't exit
12677 maxretries=3 #default number of retries
12680 user_name, hostname, port = re.split(
12681 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12684 if user_name is None:
12686 updatecache_flg=True
12687 all_rsync_opts = set(rsync_opts)
12688 extra_rsync_opts = shlex.split(
12689 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12690 all_rsync_opts.update(extra_rsync_opts)
12691 family = socket.AF_INET
12692 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12693 family = socket.AF_INET
12694 elif socket.has_ipv6 and \
12695 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12696 family = socket.AF_INET6
12698 SERVER_OUT_OF_DATE = -1
12699 EXCEEDED_MAX_RETRIES = -2
12705 for addrinfo in socket.getaddrinfo(
12706 hostname, None, family, socket.SOCK_STREAM):
12707 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12708 # IPv6 addresses need to be enclosed in square brackets
12709 ips.append("[%s]" % addrinfo[4][0])
12711 ips.append(addrinfo[4][0])
12712 from random import shuffle
12714 except SystemExit, e:
12715 raise # Needed else can't exit
12716 except Exception, e:
12717 print "Notice:",str(e)
12722 dosyncuri = syncuri.replace(
12723 "//" + user_name + hostname + port + "/",
12724 "//" + user_name + ips[0] + port + "/", 1)
12725 except SystemExit, e:
12726 raise # Needed else can't exit
12727 except Exception, e:
12728 print "Notice:",str(e)
12732 if "--ask" in myopts:
12733 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12738 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12739 if "--quiet" not in myopts:
12740 print ">>> Starting rsync with "+dosyncuri+"..."
12742 emergelog(xterm_titles,
12743 ">>> Starting retry %d of %d with %s" % \
12744 (retries,maxretries,dosyncuri))
12745 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12747 if mytimestamp != 0 and "--quiet" not in myopts:
12748 print ">>> Checking server timestamp ..."
12750 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12752 if "--debug" in myopts:
12755 exitcode = os.EX_OK
12756 servertimestamp = 0
12757 # Even if there's no timestamp available locally, fetch the
12758 # timestamp anyway as an initial probe to verify that the server is
12759 # responsive. This protects us from hanging indefinitely on a
12760 # connection attempt to an unresponsive server which rsync's
12761 # --timeout option does not prevent.
12763 # Temporary file for remote server timestamp comparison.
12764 from tempfile import mkstemp
12765 fd, tmpservertimestampfile = mkstemp()
12767 mycommand = rsynccommand[:]
12768 mycommand.append(dosyncuri.rstrip("/") + \
12769 "/metadata/timestamp.chk")
12770 mycommand.append(tmpservertimestampfile)
12774 def timeout_handler(signum, frame):
12775 raise portage.exception.PortageException("timed out")
12776 signal.signal(signal.SIGALRM, timeout_handler)
12777 # Timeout here in case the server is unresponsive. The
12778 # --timeout rsync option doesn't apply to the initial
12779 # connection attempt.
12780 if rsync_initial_timeout:
12781 signal.alarm(rsync_initial_timeout)
12783 mypids.extend(portage.process.spawn(
12784 mycommand, env=settings.environ(), returnpid=True))
12785 exitcode = os.waitpid(mypids[0], 0)[1]
12786 content = portage.grabfile(tmpservertimestampfile)
12788 if rsync_initial_timeout:
12791 os.unlink(tmpservertimestampfile)
12794 except portage.exception.PortageException, e:
12798 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12799 os.kill(mypids[0], signal.SIGTERM)
12800 os.waitpid(mypids[0], 0)
12801 # This is the same code rsync uses for timeout.
12804 if exitcode != os.EX_OK:
12805 if exitcode & 0xff:
12806 exitcode = (exitcode & 0xff) << 8
12808 exitcode = exitcode >> 8
12810 portage.process.spawned_pids.remove(mypids[0])
12813 servertimestamp = time.mktime(time.strptime(
12814 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12815 except (OverflowError, ValueError):
12817 del mycommand, mypids, content
12818 if exitcode == os.EX_OK:
12819 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12820 emergelog(xterm_titles,
12821 ">>> Cancelling sync -- Already current.")
12824 print ">>> Timestamps on the server and in the local repository are the same."
12825 print ">>> Cancelling all further sync action. You are already up to date."
12827 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12831 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12832 emergelog(xterm_titles,
12833 ">>> Server out of date: %s" % dosyncuri)
12836 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12838 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12841 exitcode = SERVER_OUT_OF_DATE
12842 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12844 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12845 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12846 if exitcode in [0,1,3,4,11,14,20,21]:
12848 elif exitcode in [1,3,4,11,14,20,21]:
12851 # Code 2 indicates protocol incompatibility, which is expected
12852 # for servers with protocol < 29 that don't support
12853 # --prune-empty-directories. Retry for a server that supports
12854 # at least rsync protocol version 29 (>=rsync-2.6.4).
12859 if retries<=maxretries:
12860 print ">>> Retrying..."
12865 updatecache_flg=False
12866 exitcode = EXCEEDED_MAX_RETRIES
12870 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12871 elif exitcode == SERVER_OUT_OF_DATE:
12873 elif exitcode == EXCEEDED_MAX_RETRIES:
12875 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12880 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12881 msg.append("that your SYNC statement is proper.")
12882 msg.append("SYNC=" + settings["SYNC"])
12884 msg.append("Rsync has reported that there is a File IO error. Normally")
12885 msg.append("this means your disk is full, but can be caused by corruption")
12886 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12887 msg.append("and try again after the problem has been fixed.")
12888 msg.append("PORTDIR=" + settings["PORTDIR"])
12890 msg.append("Rsync was killed before it finished.")
12892 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12893 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12894 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12895 msg.append("temporary problem unless complications exist with your network")
12896 msg.append("(and possibly your system's filesystem) configuration.")
12900 elif syncuri[:6]=="cvs://":
12901 if not os.path.exists("/usr/bin/cvs"):
12902 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12903 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12905 cvsroot=syncuri[6:]
12906 cvsdir=os.path.dirname(myportdir)
12907 if not os.path.exists(myportdir+"/CVS"):
12909 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12910 if os.path.exists(cvsdir+"/gentoo-x86"):
12911 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12914 os.rmdir(myportdir)
12916 if e.errno != errno.ENOENT:
12918 "!!! existing '%s' directory; exiting.\n" % myportdir)
12921 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12922 print "!!! cvs checkout error; exiting."
12924 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12927 print ">>> Starting cvs update with "+syncuri+"..."
12928 retval = portage.process.spawn_bash(
12929 "cd %s; cvs -z0 -q update -dP" % \
12930 (portage._shell_quote(myportdir),), **spawn_kwargs)
12931 if retval != os.EX_OK:
12933 dosyncuri = syncuri
12935 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12936 noiselevel=-1, level=logging.ERROR)
12939 if updatecache_flg and \
12940 myaction != "metadata" and \
12941 "metadata-transfer" not in settings.features:
12942 updatecache_flg = False
12944 # Reload the whole config from scratch.
12945 settings, trees, mtimedb = load_emerge_config(trees=trees)
12946 root_config = trees[settings["ROOT"]]["root_config"]
12947 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12949 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12950 action_metadata(settings, portdb, myopts)
12952 if portage._global_updates(trees, mtimedb["updates"]):
12954 # Reload the whole config from scratch.
12955 settings, trees, mtimedb = load_emerge_config(trees=trees)
12956 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12957 root_config = trees[settings["ROOT"]]["root_config"]
12959 mybestpv = portdb.xmatch("bestmatch-visible",
12960 portage.const.PORTAGE_PACKAGE_ATOM)
12961 mypvs = portage.best(
12962 trees[settings["ROOT"]]["vartree"].dbapi.match(
12963 portage.const.PORTAGE_PACKAGE_ATOM))
12965 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12967 if myaction != "metadata":
12968 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12969 retval = portage.process.spawn(
12970 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12971 dosyncuri], env=settings.environ())
12972 if retval != os.EX_OK:
12973 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12975 if(mybestpv != mypvs) and not "--quiet" in myopts:
12977 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12978 print red(" * ")+"that you update portage now, before any other packages are updated."
12980 print red(" * ")+"To update portage, run 'emerge portage' now."
12983 display_news_notification(root_config, myopts)
12986 def git_sync_timestamps(settings, portdir):
12988 Since git doesn't preserve timestamps, synchronize timestamps between
12989 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12990 for a given file as long as the file in the working tree is not modified
12991 (relative to HEAD).
12993 cache_dir = os.path.join(portdir, "metadata", "cache")
12994 if not os.path.isdir(cache_dir):
12996 writemsg_level(">>> Synchronizing timestamps...\n")
12998 from portage.cache.cache_errors import CacheError
13000 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13001 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13002 except CacheError, e:
13003 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13004 level=logging.ERROR, noiselevel=-1)
13007 ec_dir = os.path.join(portdir, "eclass")
13009 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13010 if f.endswith(".eclass"))
13012 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13013 level=logging.ERROR, noiselevel=-1)
13016 args = [portage.const.BASH_BINARY, "-c",
13017 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13018 portage._shell_quote(portdir)]
13020 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13021 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13023 if rval != os.EX_OK:
13026 modified_eclasses = set(ec for ec in ec_names \
13027 if os.path.join("eclass", ec + ".eclass") in modified_files)
13029 updated_ec_mtimes = {}
13031 for cpv in cache_db:
13032 cpv_split = portage.catpkgsplit(cpv)
13033 if cpv_split is None:
13034 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13035 level=logging.ERROR, noiselevel=-1)
13038 cat, pn, ver, rev = cpv_split
13039 cat, pf = portage.catsplit(cpv)
13040 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13041 if relative_eb_path in modified_files:
13045 cache_entry = cache_db[cpv]
13046 eb_mtime = cache_entry.get("_mtime_")
13047 ec_mtimes = cache_entry.get("_eclasses_")
13049 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13050 level=logging.ERROR, noiselevel=-1)
13052 except CacheError, e:
13053 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13054 (cpv, e), level=logging.ERROR, noiselevel=-1)
13057 if eb_mtime is None:
13058 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13059 level=logging.ERROR, noiselevel=-1)
13063 eb_mtime = long(eb_mtime)
13065 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13066 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13069 if ec_mtimes is None:
13070 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13071 level=logging.ERROR, noiselevel=-1)
13074 if modified_eclasses.intersection(ec_mtimes):
13077 missing_eclasses = set(ec_mtimes).difference(ec_names)
13078 if missing_eclasses:
13079 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13080 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13084 eb_path = os.path.join(portdir, relative_eb_path)
13086 current_eb_mtime = os.stat(eb_path)
13088 writemsg_level("!!! Missing ebuild: %s\n" % \
13089 (cpv,), level=logging.ERROR, noiselevel=-1)
13092 inconsistent = False
13093 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13094 updated_mtime = updated_ec_mtimes.get(ec)
13095 if updated_mtime is not None and updated_mtime != ec_mtime:
13096 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13097 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13098 inconsistent = True
13104 if current_eb_mtime != eb_mtime:
13105 os.utime(eb_path, (eb_mtime, eb_mtime))
13107 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13108 if ec in updated_ec_mtimes:
13110 ec_path = os.path.join(ec_dir, ec + ".eclass")
13111 current_mtime = long(os.stat(ec_path).st_mtime)
13112 if current_mtime != ec_mtime:
13113 os.utime(ec_path, (ec_mtime, ec_mtime))
13114 updated_ec_mtimes[ec] = ec_mtime
13118 def action_metadata(settings, portdb, myopts):
13119 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13120 old_umask = os.umask(0002)
13121 cachedir = os.path.normpath(settings.depcachedir)
13122 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13123 "/lib", "/opt", "/proc", "/root", "/sbin",
13124 "/sys", "/tmp", "/usr", "/var"]:
13125 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13126 "ROOT DIRECTORY ON YOUR SYSTEM."
13127 print >> sys.stderr, \
13128 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13130 if not os.path.exists(cachedir):
13133 ec = portage.eclass_cache.cache(portdb.porttree_root)
13134 myportdir = os.path.realpath(settings["PORTDIR"])
13135 cm = settings.load_best_module("portdbapi.metadbmodule")(
13136 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13138 from portage.cache import util
13140 class percentage_noise_maker(util.quiet_mirroring):
13141 def __init__(self, dbapi):
13143 self.cp_all = dbapi.cp_all()
13144 l = len(self.cp_all)
13145 self.call_update_min = 100000000
13146 self.min_cp_all = l/100.0
13150 def __iter__(self):
13151 for x in self.cp_all:
13153 if self.count > self.min_cp_all:
13154 self.call_update_min = 0
13156 for y in self.dbapi.cp_list(x):
13158 self.call_update_mine = 0
13160 def update(self, *arg):
13161 try: self.pstr = int(self.pstr) + 1
13162 except ValueError: self.pstr = 1
13163 sys.stdout.write("%s%i%%" % \
13164 ("\b" * (len(str(self.pstr))+1), self.pstr))
13166 self.call_update_min = 10000000
13168 def finish(self, *arg):
13169 sys.stdout.write("\b\b\b\b100%\n")
13172 if "--quiet" in myopts:
13173 def quicky_cpv_generator(cp_all_list):
13174 for x in cp_all_list:
13175 for y in portdb.cp_list(x):
13177 source = quicky_cpv_generator(portdb.cp_all())
13178 noise_maker = portage.cache.util.quiet_mirroring()
13180 noise_maker = source = percentage_noise_maker(portdb)
13181 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13182 eclass_cache=ec, verbose_instance=noise_maker)
13185 os.umask(old_umask)
13187 def action_regen(settings, portdb, max_jobs, max_load):
13188 xterm_titles = "notitles" not in settings.features
13189 emergelog(xterm_titles, " === regen")
13190 #regenerate cache entries
13191 portage.writemsg_stdout("Regenerating cache entries...\n")
13193 os.close(sys.stdin.fileno())
13194 except SystemExit, e:
13195 raise # Needed else can't exit
13200 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13203 portage.writemsg_stdout("done!\n")
13204 return regen.returncode
13206 def action_config(settings, trees, myopts, myfiles):
13207 if len(myfiles) != 1:
13208 print red("!!! config can only take a single package atom at this time\n")
13210 if not is_valid_package_atom(myfiles[0]):
13211 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13213 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13214 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13218 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13219 except portage.exception.AmbiguousPackageName, e:
13220 # Multiple matches thrown from cpv_expand
13223 print "No packages found.\n"
13225 elif len(pkgs) > 1:
13226 if "--ask" in myopts:
13228 print "Please select a package to configure:"
13232 options.append(str(idx))
13233 print options[-1]+") "+pkg
13235 options.append("X")
13236 idx = userquery("Selection?", options)
13239 pkg = pkgs[int(idx)-1]
13241 print "The following packages available:"
13244 print "\nPlease use a specific atom or the --ask option."
13250 if "--ask" in myopts:
13251 if userquery("Ready to configure "+pkg+"?") == "No":
13254 print "Configuring pkg..."
13256 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13257 mysettings = portage.config(clone=settings)
13258 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13259 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13260 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13262 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13263 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13264 if retval == os.EX_OK:
13265 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13266 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13269 def action_info(settings, trees, myopts, myfiles):
13270 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13271 settings.profile_path, settings["CHOST"],
13272 trees[settings["ROOT"]]["vartree"].dbapi)
13274 header_title = "System Settings"
13276 print header_width * "="
13277 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13278 print header_width * "="
13279 print "System uname: "+platform.platform(aliased=1)
13281 lastSync = portage.grabfile(os.path.join(
13282 settings["PORTDIR"], "metadata", "timestamp.chk"))
13283 print "Timestamp of tree:",
13289 output=commands.getstatusoutput("distcc --version")
13291 print str(output[1].split("\n",1)[0]),
13292 if "distcc" in settings.features:
13297 output=commands.getstatusoutput("ccache -V")
13299 print str(output[1].split("\n",1)[0]),
13300 if "ccache" in settings.features:
13305 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13306 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13307 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13308 myvars = portage.util.unique_array(myvars)
13312 if portage.isvalidatom(x):
13313 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13314 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13315 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13317 for pn, ver, rev in pkg_matches:
13319 pkgs.append(ver + "-" + rev)
13323 pkgs = ", ".join(pkgs)
13324 print "%-20s %s" % (x+":", pkgs)
13326 print "%-20s %s" % (x+":", "[NOT VALID]")
13328 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13330 if "--verbose" in myopts:
13331 myvars=settings.keys()
13333 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13334 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13335 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13336 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13338 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13340 myvars = portage.util.unique_array(myvars)
13346 print '%s="%s"' % (x, settings[x])
13348 use = set(settings["USE"].split())
13349 use_expand = settings["USE_EXPAND"].split()
13351 for varname in use_expand:
13352 flag_prefix = varname.lower() + "_"
13353 for f in list(use):
13354 if f.startswith(flag_prefix):
13358 print 'USE="%s"' % " ".join(use),
13359 for varname in use_expand:
13360 myval = settings.get(varname)
13362 print '%s="%s"' % (varname, myval),
13365 unset_vars.append(x)
13367 print "Unset: "+", ".join(unset_vars)
13370 if "--debug" in myopts:
13371 for x in dir(portage):
13372 module = getattr(portage, x)
13373 if "cvs_id_string" in dir(module):
13374 print "%s: %s" % (str(x), str(module.cvs_id_string))
13376 # See if we can find any packages installed matching the strings
13377 # passed on the command line
13379 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13380 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13382 mypkgs.extend(vardb.match(x))
13384 # If some packages were found...
13386 # Get our global settings (we only print stuff if it varies from
13387 # the current config)
13388 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13389 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13391 pkgsettings = portage.config(clone=settings)
13393 for myvar in mydesiredvars:
13394 global_vals[myvar] = set(settings.get(myvar, "").split())
13396 # Loop through each package
13397 # Only print settings if they differ from global settings
13398 header_title = "Package Settings"
13399 print header_width * "="
13400 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13401 print header_width * "="
13402 from portage.output import EOutput
13405 # Get all package specific variables
13406 auxvalues = vardb.aux_get(pkg, auxkeys)
13408 for i in xrange(len(auxkeys)):
13409 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13411 for myvar in mydesiredvars:
13412 # If the package variable doesn't match the
13413 # current global variable, something has changed
13414 # so set diff_found so we know to print
13415 if valuesmap[myvar] != global_vals[myvar]:
13416 diff_values[myvar] = valuesmap[myvar]
13417 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13418 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13419 pkgsettings.reset()
13420 # If a matching ebuild is no longer available in the tree, maybe it
13421 # would make sense to compare against the flags for the best
13422 # available version with the same slot?
13424 if portdb.cpv_exists(pkg):
13426 pkgsettings.setcpv(pkg, mydb=mydb)
13427 if valuesmap["IUSE"].intersection(
13428 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13429 diff_values["USE"] = valuesmap["USE"]
13430 # If a difference was found, print the info for
13433 # Print package info
13434 print "%s was built with the following:" % pkg
13435 for myvar in mydesiredvars + ["USE"]:
13436 if myvar in diff_values:
13437 mylist = list(diff_values[myvar])
13439 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13441 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13442 ebuildpath = vardb.findname(pkg)
13443 if not ebuildpath or not os.path.exists(ebuildpath):
13444 out.ewarn("No ebuild found for '%s'" % pkg)
13446 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13447 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13448 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13451 def action_search(root_config, myopts, myfiles, spinner):
13453 print "emerge: no search terms provided."
13455 searchinstance = search(root_config,
13456 spinner, "--searchdesc" in myopts,
13457 "--quiet" not in myopts, "--usepkg" in myopts,
13458 "--usepkgonly" in myopts)
13459 for mysearch in myfiles:
13461 searchinstance.execute(mysearch)
13462 except re.error, comment:
13463 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13465 searchinstance.output()
13467 def action_depclean(settings, trees, ldpath_mtimes,
13468 myopts, action, myfiles, spinner):
13469 # Kill packages that aren't explicitly merged or are required as a
13470 # dependency of another package. World file is explicit.
13472 # Global depclean or prune operations are not very safe when there are
13473 # missing dependencies since it's unknown how badly incomplete
13474 # the dependency graph is, and we might accidentally remove packages
13475 # that should have been pulled into the graph. On the other hand, it's
13476 # relatively safe to ignore missing deps when only asked to remove
13477 # specific packages.
13478 allow_missing_deps = len(myfiles) > 0
13481 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13482 msg.append("mistakes. Packages that are part of the world set will always\n")
13483 msg.append("be kept. They can be manually added to this set with\n")
13484 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13485 msg.append("package.provided (see portage(5)) will be removed by\n")
13486 msg.append("depclean, even if they are part of the world set.\n")
13488 msg.append("As a safety measure, depclean will not remove any packages\n")
13489 msg.append("unless *all* required dependencies have been resolved. As a\n")
13490 msg.append("consequence, it is often necessary to run %s\n" % \
13491 good("`emerge --update"))
13492 msg.append(good("--newuse --deep @system @world`") + \
13493 " prior to depclean.\n")
13495 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13496 portage.writemsg_stdout("\n")
13498 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13500 xterm_titles = "notitles" not in settings.features
13501 myroot = settings["ROOT"]
13502 root_config = trees[myroot]["root_config"]
13503 getSetAtoms = root_config.setconfig.getSetAtoms
13504 vardb = trees[myroot]["vartree"].dbapi
13506 required_set_names = ("system", "world")
13510 for s in required_set_names:
13511 required_sets[s] = InternalPackageSet(
13512 initial_atoms=getSetAtoms(s))
13515 # When removing packages, use a temporary version of world
13516 # which excludes packages that are intended to be eligible for
13518 world_temp_set = required_sets["world"]
13519 system_set = required_sets["system"]
13521 if not system_set or not world_temp_set:
13524 writemsg_level("!!! You have no system list.\n",
13525 level=logging.ERROR, noiselevel=-1)
13527 if not world_temp_set:
13528 writemsg_level("!!! You have no world file.\n",
13529 level=logging.WARNING, noiselevel=-1)
13531 writemsg_level("!!! Proceeding is likely to " + \
13532 "break your installation.\n",
13533 level=logging.WARNING, noiselevel=-1)
13534 if "--pretend" not in myopts:
13535 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13537 if action == "depclean":
13538 emergelog(xterm_titles, " >>> depclean")
13541 args_set = InternalPackageSet()
13544 if not is_valid_package_atom(x):
13545 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13546 level=logging.ERROR, noiselevel=-1)
13547 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13550 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13551 except portage.exception.AmbiguousPackageName, e:
13552 msg = "The short ebuild name \"" + x + \
13553 "\" is ambiguous. Please specify " + \
13554 "one of the following " + \
13555 "fully-qualified ebuild names instead:"
13556 for line in textwrap.wrap(msg, 70):
13557 writemsg_level("!!! %s\n" % (line,),
13558 level=logging.ERROR, noiselevel=-1)
13560 writemsg_level(" %s\n" % colorize("INFORM", i),
13561 level=logging.ERROR, noiselevel=-1)
13562 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13565 matched_packages = False
13568 matched_packages = True
13570 if not matched_packages:
13571 writemsg_level(">>> No packages selected for removal by %s\n" % \
13575 writemsg_level("\nCalculating dependencies ")
13576 resolver_params = create_depgraph_params(myopts, "remove")
13577 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13578 vardb = resolver.trees[myroot]["vartree"].dbapi
13580 if action == "depclean":
13583 # Pull in everything that's installed but not matched
13584 # by an argument atom since we don't want to clean any
13585 # package if something depends on it.
13587 world_temp_set.clear()
13592 if args_set.findAtomForPackage(pkg) is None:
13593 world_temp_set.add("=" + pkg.cpv)
13595 except portage.exception.InvalidDependString, e:
13596 show_invalid_depstring_notice(pkg,
13597 pkg.metadata["PROVIDE"], str(e))
13599 world_temp_set.add("=" + pkg.cpv)
13602 elif action == "prune":
13604 # Pull in everything that's installed since we don't
13605 # to prune a package if something depends on it.
13606 world_temp_set.clear()
13607 world_temp_set.update(vardb.cp_all())
13611 # Try to prune everything that's slotted.
13612 for cp in vardb.cp_all():
13613 if len(vardb.cp_list(cp)) > 1:
13616 # Remove atoms from world that match installed packages
13617 # that are also matched by argument atoms, but do not remove
13618 # them if they match the highest installed version.
13621 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13622 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13623 raise AssertionError("package expected in matches: " + \
13624 "cp = %s, cpv = %s matches = %s" % \
13625 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13627 highest_version = pkgs_for_cp[-1]
13628 if pkg == highest_version:
13629 # pkg is the highest version
13630 world_temp_set.add("=" + pkg.cpv)
13633 if len(pkgs_for_cp) <= 1:
13634 raise AssertionError("more packages expected: " + \
13635 "cp = %s, cpv = %s matches = %s" % \
13636 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13639 if args_set.findAtomForPackage(pkg) is None:
13640 world_temp_set.add("=" + pkg.cpv)
13642 except portage.exception.InvalidDependString, e:
13643 show_invalid_depstring_notice(pkg,
13644 pkg.metadata["PROVIDE"], str(e))
13646 world_temp_set.add("=" + pkg.cpv)
13650 for s, package_set in required_sets.iteritems():
13651 set_atom = SETPREFIX + s
13652 set_arg = SetArg(arg=set_atom, set=package_set,
13653 root_config=resolver.roots[myroot])
13654 set_args[s] = set_arg
13655 for atom in set_arg.set:
13656 resolver._dep_stack.append(
13657 Dependency(atom=atom, root=myroot, parent=set_arg))
13658 resolver.digraph.add(set_arg, None)
13660 success = resolver._complete_graph()
13661 writemsg_level("\b\b... done!\n")
13663 resolver.display_problems()
13668 def unresolved_deps():
13670 unresolvable = set()
13671 for dep in resolver._initially_unsatisfied_deps:
13672 if isinstance(dep.parent, Package) and \
13673 (dep.priority > UnmergeDepPriority.SOFT):
13674 unresolvable.add((dep.atom, dep.parent.cpv))
13676 if not unresolvable:
13679 if unresolvable and not allow_missing_deps:
13680 prefix = bad(" * ")
13682 msg.append("Dependencies could not be completely resolved due to")
13683 msg.append("the following required packages not being installed:")
13685 for atom, parent in unresolvable:
13686 msg.append(" %s pulled in by:" % (atom,))
13687 msg.append(" %s" % (parent,))
13689 msg.append("Have you forgotten to run " + \
13690 good("`emerge --update --newuse --deep @system @world`") + " prior")
13691 msg.append(("to %s? It may be necessary to manually " + \
13692 "uninstall packages that no longer") % action)
13693 msg.append("exist in the portage tree since " + \
13694 "it may not be possible to satisfy their")
13695 msg.append("dependencies. Also, be aware of " + \
13696 "the --with-bdeps option that is documented")
13697 msg.append("in " + good("`man emerge`") + ".")
13698 if action == "prune":
13700 msg.append("If you would like to ignore " + \
13701 "dependencies then use %s." % good("--nodeps"))
13702 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13703 level=logging.ERROR, noiselevel=-1)
13707 if unresolved_deps():
13710 graph = resolver.digraph.copy()
13711 required_pkgs_total = 0
13713 if isinstance(node, Package):
13714 required_pkgs_total += 1
13716 def show_parents(child_node):
13717 parent_nodes = graph.parent_nodes(child_node)
13718 if not parent_nodes:
13719 # With --prune, the highest version can be pulled in without any
13720 # real parent since all installed packages are pulled in. In that
13721 # case there's nothing to show here.
13724 for node in parent_nodes:
13725 parent_strs.append(str(getattr(node, "cpv", node)))
13728 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13729 for parent_str in parent_strs:
13730 msg.append(" %s\n" % (parent_str,))
13732 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13734 def cmp_pkg_cpv(pkg1, pkg2):
13735 """Sort Package instances by cpv."""
13736 if pkg1.cpv > pkg2.cpv:
13738 elif pkg1.cpv == pkg2.cpv:
13743 def create_cleanlist():
13744 pkgs_to_remove = []
13746 if action == "depclean":
13749 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13752 arg_atom = args_set.findAtomForPackage(pkg)
13753 except portage.exception.InvalidDependString:
13754 # this error has already been displayed by now
13758 if pkg not in graph:
13759 pkgs_to_remove.append(pkg)
13760 elif "--verbose" in myopts:
13764 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13765 if pkg not in graph:
13766 pkgs_to_remove.append(pkg)
13767 elif "--verbose" in myopts:
13770 elif action == "prune":
13771 # Prune really uses all installed instead of world. It's not
13772 # a real reverse dependency so don't display it as such.
13773 graph.remove(set_args["world"])
13775 for atom in args_set:
13776 for pkg in vardb.match_pkgs(atom):
13777 if pkg not in graph:
13778 pkgs_to_remove.append(pkg)
13779 elif "--verbose" in myopts:
13782 if not pkgs_to_remove:
13784 ">>> No packages selected for removal by %s\n" % action)
13785 if "--verbose" not in myopts:
13787 ">>> To see reverse dependencies, use %s\n" % \
13789 if action == "prune":
13791 ">>> To ignore dependencies, use %s\n" % \
13794 return pkgs_to_remove
13796 cleanlist = create_cleanlist()
13799 clean_set = set(cleanlist)
13801 # Check if any of these package are the sole providers of libraries
13802 # with consumers that have not been selected for removal. If so, these
13803 # packages and any dependencies need to be added to the graph.
13804 real_vardb = trees[myroot]["vartree"].dbapi
13805 linkmap = real_vardb.linkmap
13806 liblist = linkmap.listLibraryObjects()
13807 consumer_cache = {}
13808 provider_cache = {}
13812 writemsg_level(">>> Checking for lib consumers...\n")
13814 for pkg in cleanlist:
13815 pkg_dblink = real_vardb._dblink(pkg.cpv)
13816 provided_libs = set()
13818 for lib in liblist:
13819 if pkg_dblink.isowner(lib, myroot):
13820 provided_libs.add(lib)
13822 if not provided_libs:
13826 for lib in provided_libs:
13827 lib_consumers = consumer_cache.get(lib)
13828 if lib_consumers is None:
13829 lib_consumers = linkmap.findConsumers(lib)
13830 consumer_cache[lib] = lib_consumers
13832 consumers[lib] = lib_consumers
13837 for lib, lib_consumers in consumers.items():
13838 for consumer_file in list(lib_consumers):
13839 if pkg_dblink.isowner(consumer_file, myroot):
13840 lib_consumers.remove(consumer_file)
13841 if not lib_consumers:
13847 for lib, lib_consumers in consumers.iteritems():
13849 soname = soname_cache.get(lib)
13851 soname = linkmap.getSoname(lib)
13852 soname_cache[lib] = soname
13854 consumer_providers = []
13855 for lib_consumer in lib_consumers:
13856 providers = provider_cache.get(lib)
13857 if providers is None:
13858 providers = linkmap.findProviders(lib_consumer)
13859 provider_cache[lib_consumer] = providers
13860 if soname not in providers:
13861 # Why does this happen?
13863 consumer_providers.append(
13864 (lib_consumer, providers[soname]))
13866 consumers[lib] = consumer_providers
13868 consumer_map[pkg] = consumers
13872 search_files = set()
13873 for consumers in consumer_map.itervalues():
13874 for lib, consumer_providers in consumers.iteritems():
13875 for lib_consumer, providers in consumer_providers:
13876 search_files.add(lib_consumer)
13877 search_files.update(providers)
13879 writemsg_level(">>> Assigning files to packages...\n")
13880 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13882 for pkg, consumers in consumer_map.items():
13883 for lib, consumer_providers in consumers.items():
13884 lib_consumers = set()
13886 for lib_consumer, providers in consumer_providers:
13887 owner_set = file_owners.get(lib_consumer)
13888 provider_dblinks = set()
13889 provider_pkgs = set()
13891 if len(providers) > 1:
13892 for provider in providers:
13893 provider_set = file_owners.get(provider)
13894 if provider_set is not None:
13895 provider_dblinks.update(provider_set)
13897 if len(provider_dblinks) > 1:
13898 for provider_dblink in provider_dblinks:
13899 pkg_key = ("installed", myroot,
13900 provider_dblink.mycpv, "nomerge")
13901 if pkg_key not in clean_set:
13902 provider_pkgs.add(vardb.get(pkg_key))
13907 if owner_set is not None:
13908 lib_consumers.update(owner_set)
13910 for consumer_dblink in list(lib_consumers):
13911 if ("installed", myroot, consumer_dblink.mycpv,
13912 "nomerge") in clean_set:
13913 lib_consumers.remove(consumer_dblink)
13917 consumers[lib] = lib_consumers
13921 del consumer_map[pkg]
13924 # TODO: Implement a package set for rebuilding consumer packages.
13926 msg = "In order to avoid breakage of link level " + \
13927 "dependencies, one or more packages will not be removed. " + \
13928 "This can be solved by rebuilding " + \
13929 "the packages that pulled them in."
13931 prefix = bad(" * ")
13932 from textwrap import wrap
13933 writemsg_level("".join(prefix + "%s\n" % line for \
13934 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13937 for pkg, consumers in consumer_map.iteritems():
13938 unique_consumers = set(chain(*consumers.values()))
13939 unique_consumers = sorted(consumer.mycpv \
13940 for consumer in unique_consumers)
13942 msg.append(" %s pulled in by:" % (pkg.cpv,))
13943 for consumer in unique_consumers:
13944 msg.append(" %s" % (consumer,))
13946 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13947 level=logging.WARNING, noiselevel=-1)
13949 # Add lib providers to the graph as children of lib consumers,
13950 # and also add any dependencies pulled in by the provider.
13951 writemsg_level(">>> Adding lib providers to graph...\n")
13953 for pkg, consumers in consumer_map.iteritems():
13954 for consumer_dblink in set(chain(*consumers.values())):
13955 consumer_pkg = vardb.get(("installed", myroot,
13956 consumer_dblink.mycpv, "nomerge"))
13957 if not resolver._add_pkg(pkg,
13958 Dependency(parent=consumer_pkg,
13959 priority=UnmergeDepPriority(runtime=True),
13961 resolver.display_problems()
13964 writemsg_level("\nCalculating dependencies ")
13965 success = resolver._complete_graph()
13966 writemsg_level("\b\b... done!\n")
13967 resolver.display_problems()
13970 if unresolved_deps():
13973 graph = resolver.digraph.copy()
13974 required_pkgs_total = 0
13976 if isinstance(node, Package):
13977 required_pkgs_total += 1
13978 cleanlist = create_cleanlist()
13981 clean_set = set(cleanlist)
13983 # Use a topological sort to create an unmerge order such that
13984 # each package is unmerged before it's dependencies. This is
13985 # necessary to avoid breaking things that may need to run
13986 # during pkg_prerm or pkg_postrm phases.
13988 # Create a new graph to account for dependencies between the
13989 # packages being unmerged.
13993 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13994 runtime = UnmergeDepPriority(runtime=True)
13995 runtime_post = UnmergeDepPriority(runtime_post=True)
13996 buildtime = UnmergeDepPriority(buildtime=True)
13998 "RDEPEND": runtime,
13999 "PDEPEND": runtime_post,
14000 "DEPEND": buildtime,
14003 for node in clean_set:
14004 graph.add(node, None)
14006 node_use = node.metadata["USE"].split()
14007 for dep_type in dep_keys:
14008 depstr = node.metadata[dep_type]
14012 portage.dep._dep_check_strict = False
14013 success, atoms = portage.dep_check(depstr, None, settings,
14014 myuse=node_use, trees=resolver._graph_trees,
14017 portage.dep._dep_check_strict = True
14019 # Ignore invalid deps of packages that will
14020 # be uninstalled anyway.
14023 priority = priority_map[dep_type]
14025 if not isinstance(atom, portage.dep.Atom):
14026 # Ignore invalid atoms returned from dep_check().
14030 matches = vardb.match_pkgs(atom)
14033 for child_node in matches:
14034 if child_node in clean_set:
14035 graph.add(child_node, node, priority=priority)
14038 if len(graph.order) == len(graph.root_nodes()):
14039 # If there are no dependencies between packages
14040 # let unmerge() group them by cat/pn.
14042 cleanlist = [pkg.cpv for pkg in graph.order]
14044 # Order nodes from lowest to highest overall reference count for
14045 # optimal root node selection.
14046 node_refcounts = {}
14047 for node in graph.order:
14048 node_refcounts[node] = len(graph.parent_nodes(node))
14049 def cmp_reference_count(node1, node2):
14050 return node_refcounts[node1] - node_refcounts[node2]
14051 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14053 ignore_priority_range = [None]
14054 ignore_priority_range.extend(
14055 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14056 while not graph.empty():
14057 for ignore_priority in ignore_priority_range:
14058 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14062 raise AssertionError("no root nodes")
14063 if ignore_priority is not None:
14064 # Some deps have been dropped due to circular dependencies,
14065 # so only pop one node in order do minimize the number that
14070 cleanlist.append(node.cpv)
14072 unmerge(root_config, myopts, "unmerge", cleanlist,
14073 ldpath_mtimes, ordered=ordered)
14075 if action == "prune":
14078 if not cleanlist and "--quiet" in myopts:
14081 print "Packages installed: "+str(len(vardb.cpv_all()))
14082 print "Packages in world: " + \
14083 str(len(root_config.sets["world"].getAtoms()))
14084 print "Packages in system: " + \
14085 str(len(root_config.sets["system"].getAtoms()))
14086 print "Required packages: "+str(required_pkgs_total)
14087 if "--pretend" in myopts:
14088 print "Number to remove: "+str(len(cleanlist))
14090 print "Number removed: "+str(len(cleanlist))
14092 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14094 Construct a depgraph for the given resume list. This will raise
14095 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14097 @returns: (success, depgraph, dropped_tasks)
14100 skip_unsatisfied = True
14101 mergelist = mtimedb["resume"]["mergelist"]
14102 dropped_tasks = set()
14104 mydepgraph = depgraph(settings, trees,
14105 myopts, myparams, spinner)
14107 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14108 skip_masked=skip_masked)
14109 except depgraph.UnsatisfiedResumeDep, e:
14110 if not skip_unsatisfied:
14113 graph = mydepgraph.digraph
14114 unsatisfied_parents = dict((dep.parent, dep.parent) \
14115 for dep in e.value)
14116 traversed_nodes = set()
14117 unsatisfied_stack = list(unsatisfied_parents)
14118 while unsatisfied_stack:
14119 pkg = unsatisfied_stack.pop()
14120 if pkg in traversed_nodes:
14122 traversed_nodes.add(pkg)
14124 # If this package was pulled in by a parent
14125 # package scheduled for merge, removing this
14126 # package may cause the the parent package's
14127 # dependency to become unsatisfied.
14128 for parent_node in graph.parent_nodes(pkg):
14129 if not isinstance(parent_node, Package) \
14130 or parent_node.operation not in ("merge", "nomerge"):
14133 graph.child_nodes(parent_node,
14134 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14135 if pkg in unsatisfied:
14136 unsatisfied_parents[parent_node] = parent_node
14137 unsatisfied_stack.append(parent_node)
14139 pruned_mergelist = []
14140 for x in mergelist:
14141 if isinstance(x, list) and \
14142 tuple(x) not in unsatisfied_parents:
14143 pruned_mergelist.append(x)
14145 # If the mergelist doesn't shrink then this loop is infinite.
14146 if len(pruned_mergelist) == len(mergelist):
14147 # This happens if a package can't be dropped because
14148 # it's already installed, but it has unsatisfied PDEPEND.
14150 mergelist[:] = pruned_mergelist
14152 # Exclude installed packages that have been removed from the graph due
14153 # to failure to build/install runtime dependencies after the dependent
14154 # package has already been installed.
14155 dropped_tasks.update(pkg for pkg in \
14156 unsatisfied_parents if pkg.operation != "nomerge")
14157 mydepgraph.break_refs(unsatisfied_parents)
14159 del e, graph, traversed_nodes, \
14160 unsatisfied_parents, unsatisfied_stack
14164 return (success, mydepgraph, dropped_tasks)
14166 def action_build(settings, trees, mtimedb,
14167 myopts, myaction, myfiles, spinner):
14169 # validate the state of the resume data
14170 # so that we can make assumptions later.
14171 for k in ("resume", "resume_backup"):
14172 if k not in mtimedb:
14174 resume_data = mtimedb[k]
14175 if not isinstance(resume_data, dict):
14178 mergelist = resume_data.get("mergelist")
14179 if not isinstance(mergelist, list):
14182 for x in mergelist:
14183 if not (isinstance(x, list) and len(x) == 4):
14185 pkg_type, pkg_root, pkg_key, pkg_action = x
14186 if pkg_root not in trees:
14187 # Current $ROOT setting differs,
14188 # so the list must be stale.
14194 resume_opts = resume_data.get("myopts")
14195 if not isinstance(resume_opts, (dict, list)):
14198 favorites = resume_data.get("favorites")
14199 if not isinstance(favorites, list):
14204 if "--resume" in myopts and \
14205 ("resume" in mtimedb or
14206 "resume_backup" in mtimedb):
14208 if "resume" not in mtimedb:
14209 mtimedb["resume"] = mtimedb["resume_backup"]
14210 del mtimedb["resume_backup"]
14212 # "myopts" is a list for backward compatibility.
14213 resume_opts = mtimedb["resume"].get("myopts", [])
14214 if isinstance(resume_opts, list):
14215 resume_opts = dict((k,True) for k in resume_opts)
14216 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14217 resume_opts.pop(opt, None)
14218 myopts.update(resume_opts)
14220 if "--debug" in myopts:
14221 writemsg_level("myopts %s\n" % (myopts,))
14223 # Adjust config according to options of the command being resumed.
14224 for myroot in trees:
14225 mysettings = trees[myroot]["vartree"].settings
14226 mysettings.unlock()
14227 adjust_config(myopts, mysettings)
14229 del myroot, mysettings
14231 ldpath_mtimes = mtimedb["ldpath"]
14234 buildpkgonly = "--buildpkgonly" in myopts
14235 pretend = "--pretend" in myopts
14236 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14237 ask = "--ask" in myopts
14238 nodeps = "--nodeps" in myopts
14239 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14240 tree = "--tree" in myopts
14241 if nodeps and tree:
14243 del myopts["--tree"]
14244 portage.writemsg(colorize("WARN", " * ") + \
14245 "--tree is broken with --nodeps. Disabling...\n")
14246 debug = "--debug" in myopts
14247 verbose = "--verbose" in myopts
14248 quiet = "--quiet" in myopts
14249 if pretend or fetchonly:
14250 # make the mtimedb readonly
14251 mtimedb.filename = None
14252 if "--digest" in myopts:
14253 msg = "The --digest option can prevent corruption from being" + \
14254 " noticed. The `repoman manifest` command is the preferred" + \
14255 " way to generate manifests and it is capable of doing an" + \
14256 " entire repository or category at once."
14257 prefix = bad(" * ")
14258 writemsg(prefix + "\n")
14259 from textwrap import wrap
14260 for line in wrap(msg, 72):
14261 writemsg("%s%s\n" % (prefix, line))
14262 writemsg(prefix + "\n")
14264 if "--quiet" not in myopts and \
14265 ("--pretend" in myopts or "--ask" in myopts or \
14266 "--tree" in myopts or "--verbose" in myopts):
14268 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14270 elif "--buildpkgonly" in myopts:
14274 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14276 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14280 print darkgreen("These are the packages that would be %s, in order:") % action
14283 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14284 if not show_spinner:
14285 spinner.update = spinner.update_quiet
14288 favorites = mtimedb["resume"].get("favorites")
14289 if not isinstance(favorites, list):
14293 print "Calculating dependencies ",
14294 myparams = create_depgraph_params(myopts, myaction)
14296 resume_data = mtimedb["resume"]
14297 mergelist = resume_data["mergelist"]
14298 if mergelist and "--skipfirst" in myopts:
14299 for i, task in enumerate(mergelist):
14300 if isinstance(task, list) and \
14301 task and task[-1] == "merge":
14308 success, mydepgraph, dropped_tasks = resume_depgraph(
14309 settings, trees, mtimedb, myopts, myparams, spinner)
14310 except (portage.exception.PackageNotFound,
14311 depgraph.UnsatisfiedResumeDep), e:
14312 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14313 mydepgraph = e.depgraph
14316 from textwrap import wrap
14317 from portage.output import EOutput
14320 resume_data = mtimedb["resume"]
14321 mergelist = resume_data.get("mergelist")
14322 if not isinstance(mergelist, list):
14324 if mergelist and debug or (verbose and not quiet):
14325 out.eerror("Invalid resume list:")
14328 for task in mergelist:
14329 if isinstance(task, list):
14330 out.eerror(indent + str(tuple(task)))
14333 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14334 out.eerror("One or more packages are either masked or " + \
14335 "have missing dependencies:")
14338 for dep in e.value:
14339 if dep.atom is None:
14340 out.eerror(indent + "Masked package:")
14341 out.eerror(2 * indent + str(dep.parent))
14344 out.eerror(indent + str(dep.atom) + " pulled in by:")
14345 out.eerror(2 * indent + str(dep.parent))
14347 msg = "The resume list contains packages " + \
14348 "that are either masked or have " + \
14349 "unsatisfied dependencies. " + \
14350 "Please restart/continue " + \
14351 "the operation manually, or use --skipfirst " + \
14352 "to skip the first package in the list and " + \
14353 "any other packages that may be " + \
14354 "masked or have missing dependencies."
14355 for line in wrap(msg, 72):
14357 elif isinstance(e, portage.exception.PackageNotFound):
14358 out.eerror("An expected package is " + \
14359 "not available: %s" % str(e))
14361 msg = "The resume list contains one or more " + \
14362 "packages that are no longer " + \
14363 "available. Please restart/continue " + \
14364 "the operation manually."
14365 for line in wrap(msg, 72):
14369 print "\b\b... done!"
14373 portage.writemsg("!!! One or more packages have been " + \
14374 "dropped due to\n" + \
14375 "!!! masking or unsatisfied dependencies:\n\n",
14377 for task in dropped_tasks:
14378 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14379 portage.writemsg("\n", noiselevel=-1)
14382 if mydepgraph is not None:
14383 mydepgraph.display_problems()
14384 if not (ask or pretend):
14385 # delete the current list and also the backup
14386 # since it's probably stale too.
14387 for k in ("resume", "resume_backup"):
14388 mtimedb.pop(k, None)
14393 if ("--resume" in myopts):
14394 print darkgreen("emerge: It seems we have nothing to resume...")
14397 myparams = create_depgraph_params(myopts, myaction)
14398 if "--quiet" not in myopts and "--nodeps" not in myopts:
14399 print "Calculating dependencies ",
14401 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14403 retval, favorites = mydepgraph.select_files(myfiles)
14404 except portage.exception.PackageNotFound, e:
14405 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14407 except portage.exception.PackageSetNotFound, e:
14408 root_config = trees[settings["ROOT"]]["root_config"]
14409 display_missing_pkg_set(root_config, e.value)
14412 print "\b\b... done!"
14414 mydepgraph.display_problems()
14417 if "--pretend" not in myopts and \
14418 ("--ask" in myopts or "--tree" in myopts or \
14419 "--verbose" in myopts) and \
14420 not ("--quiet" in myopts and "--ask" not in myopts):
14421 if "--resume" in myopts:
14422 mymergelist = mydepgraph.altlist()
14423 if len(mymergelist) == 0:
14424 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14426 favorites = mtimedb["resume"]["favorites"]
14427 retval = mydepgraph.display(
14428 mydepgraph.altlist(reversed=tree),
14429 favorites=favorites)
14430 mydepgraph.display_problems()
14431 if retval != os.EX_OK:
14433 prompt="Would you like to resume merging these packages?"
14435 retval = mydepgraph.display(
14436 mydepgraph.altlist(reversed=("--tree" in myopts)),
14437 favorites=favorites)
14438 mydepgraph.display_problems()
14439 if retval != os.EX_OK:
14442 for x in mydepgraph.altlist():
14443 if isinstance(x, Package) and x.operation == "merge":
14447 sets = trees[settings["ROOT"]]["root_config"].sets
14448 world_candidates = None
14449 if "--noreplace" in myopts and \
14450 not oneshot and favorites:
14451 # Sets that are not world candidates are filtered
14452 # out here since the favorites list needs to be
14453 # complete for depgraph.loadResumeCommand() to
14454 # operate correctly.
14455 world_candidates = [x for x in favorites \
14456 if not (x.startswith(SETPREFIX) and \
14457 not sets[x[1:]].world_candidate)]
14458 if "--noreplace" in myopts and \
14459 not oneshot and world_candidates:
14461 for x in world_candidates:
14462 print " %s %s" % (good("*"), x)
14463 prompt="Would you like to add these packages to your world favorites?"
14464 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14465 prompt="Nothing to merge; would you like to auto-clean packages?"
14468 print "Nothing to merge; quitting."
14471 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14472 prompt="Would you like to fetch the source files for these packages?"
14474 prompt="Would you like to merge these packages?"
14476 if "--ask" in myopts and userquery(prompt) == "No":
14481 # Don't ask again (e.g. when auto-cleaning packages after merge)
14482 myopts.pop("--ask", None)
14484 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14485 if ("--resume" in myopts):
14486 mymergelist = mydepgraph.altlist()
14487 if len(mymergelist) == 0:
14488 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14490 favorites = mtimedb["resume"]["favorites"]
14491 retval = mydepgraph.display(
14492 mydepgraph.altlist(reversed=tree),
14493 favorites=favorites)
14494 mydepgraph.display_problems()
14495 if retval != os.EX_OK:
14498 retval = mydepgraph.display(
14499 mydepgraph.altlist(reversed=("--tree" in myopts)),
14500 favorites=favorites)
14501 mydepgraph.display_problems()
14502 if retval != os.EX_OK:
14504 if "--buildpkgonly" in myopts:
14505 graph_copy = mydepgraph.digraph.clone()
14506 removed_nodes = set()
14507 for node in list(graph_copy.order):
14508 if not isinstance(node, Package) or \
14509 node.operation == "nomerge":
14510 removed_nodes.add(node)
14511 graph_copy.difference_update(removed_nodes)
14512 if not graph_copy.hasallzeros(ignore_priority = \
14513 DepPrioritySatisfiedRange.ignore_medium):
14514 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14515 print "!!! You have to merge the dependencies before you can build this package.\n"
14518 if "--buildpkgonly" in myopts:
14519 graph_copy = mydepgraph.digraph.clone()
14520 removed_nodes = set()
14521 for node in list(graph_copy.order):
14522 if not isinstance(node, Package) or \
14523 node.operation == "nomerge":
14524 removed_nodes.add(node)
14525 graph_copy.difference_update(removed_nodes)
14526 if not graph_copy.hasallzeros(ignore_priority = \
14527 DepPrioritySatisfiedRange.ignore_medium):
14528 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14529 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14532 if ("--resume" in myopts):
14533 favorites=mtimedb["resume"]["favorites"]
14534 mymergelist = mydepgraph.altlist()
14535 mydepgraph.break_refs(mymergelist)
14536 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14537 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14538 del mydepgraph, mymergelist
14539 clear_caches(trees)
14541 retval = mergetask.merge()
14542 merge_count = mergetask.curval
14544 if "resume" in mtimedb and \
14545 "mergelist" in mtimedb["resume"] and \
14546 len(mtimedb["resume"]["mergelist"]) > 1:
14547 mtimedb["resume_backup"] = mtimedb["resume"]
14548 del mtimedb["resume"]
14550 mtimedb["resume"]={}
14551 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14552 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14553 # a list type for options.
14554 mtimedb["resume"]["myopts"] = myopts.copy()
14556 # Convert Atom instances to plain str.
14557 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14559 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14560 for pkgline in mydepgraph.altlist():
14561 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14562 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14563 tmpsettings = portage.config(clone=settings)
14565 if settings.get("PORTAGE_DEBUG", "") == "1":
14567 retval = portage.doebuild(
14568 y, "digest", settings["ROOT"], tmpsettings, edebug,
14569 ("--pretend" in myopts),
14570 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14573 pkglist = mydepgraph.altlist()
14574 mydepgraph.saveNomergeFavorites()
14575 mydepgraph.break_refs(pkglist)
14576 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14577 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14578 del mydepgraph, pkglist
14579 clear_caches(trees)
14581 retval = mergetask.merge()
14582 merge_count = mergetask.curval
14584 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14585 if "yes" == settings.get("AUTOCLEAN"):
14586 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14587 unmerge(trees[settings["ROOT"]]["root_config"],
14588 myopts, "clean", [],
14589 ldpath_mtimes, autoclean=1)
14591 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14592 + " AUTOCLEAN is disabled. This can cause serious"
14593 + " problems due to overlapping packages.\n")
14594 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14598 def multiple_actions(action1, action2):
14599 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14600 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14603 def insert_optional_args(args):
14605 Parse optional arguments and insert a value if one has
14606 not been provided. This is done before feeding the args
14607 to the optparse parser since that parser does not support
14608 this feature natively.
14612 jobs_opts = ("-j", "--jobs")
14613 arg_stack = args[:]
14614 arg_stack.reverse()
14616 arg = arg_stack.pop()
14618 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14619 if not (short_job_opt or arg in jobs_opts):
14620 new_args.append(arg)
14623 # Insert an empty placeholder in order to
14624 # satisfy the requirements of optparse.
14626 new_args.append("--jobs")
14629 if short_job_opt and len(arg) > 2:
14630 if arg[:2] == "-j":
14632 job_count = int(arg[2:])
14634 saved_opts = arg[2:]
14637 saved_opts = arg[1:].replace("j", "")
14639 if job_count is None and arg_stack:
14641 job_count = int(arg_stack[-1])
14645 # Discard the job count from the stack
14646 # since we're consuming it here.
14649 if job_count is None:
14650 # unlimited number of jobs
14651 new_args.append("True")
14653 new_args.append(str(job_count))
14655 if saved_opts is not None:
14656 new_args.append("-" + saved_opts)
14660 def parse_opts(tmpcmdline, silent=False):
14665 global actions, options, shortmapping
14667 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14668 argument_options = {
14670 "help":"specify the location for portage configuration files",
14674 "help":"enable or disable color output",
14676 "choices":("y", "n")
14681 "help" : "Specifies the number of packages to build " + \
14687 "--load-average": {
14689 "help" :"Specifies that no new builds should be started " + \
14690 "if there are other builds running and the load average " + \
14691 "is at least LOAD (a floating-point number).",
14697 "help":"include unnecessary build time dependencies",
14699 "choices":("y", "n")
14702 "help":"specify conditions to trigger package reinstallation",
14704 "choices":["changed-use"]
14708 from optparse import OptionParser
14709 parser = OptionParser()
14710 if parser.has_option("--help"):
14711 parser.remove_option("--help")
14713 for action_opt in actions:
14714 parser.add_option("--" + action_opt, action="store_true",
14715 dest=action_opt.replace("-", "_"), default=False)
14716 for myopt in options:
14717 parser.add_option(myopt, action="store_true",
14718 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14719 for shortopt, longopt in shortmapping.iteritems():
14720 parser.add_option("-" + shortopt, action="store_true",
14721 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14722 for myalias, myopt in longopt_aliases.iteritems():
14723 parser.add_option(myalias, action="store_true",
14724 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14726 for myopt, kwargs in argument_options.iteritems():
14727 parser.add_option(myopt,
14728 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14730 tmpcmdline = insert_optional_args(tmpcmdline)
14732 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14736 if myoptions.jobs == "True":
14740 jobs = int(myoptions.jobs)
14744 if jobs is not True and \
14748 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14749 (myoptions.jobs,), noiselevel=-1)
14751 myoptions.jobs = jobs
14753 if myoptions.load_average:
14755 load_average = float(myoptions.load_average)
14759 if load_average <= 0.0:
14760 load_average = None
14762 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14763 (myoptions.load_average,), noiselevel=-1)
14765 myoptions.load_average = load_average
14767 for myopt in options:
14768 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14770 myopts[myopt] = True
14772 for myopt in argument_options:
14773 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14777 for action_opt in actions:
14778 v = getattr(myoptions, action_opt.replace("-", "_"))
14781 multiple_actions(myaction, action_opt)
14783 myaction = action_opt
14787 return myaction, myopts, myfiles
14789 def validate_ebuild_environment(trees):
14790 for myroot in trees:
14791 settings = trees[myroot]["vartree"].settings
14792 settings.validate()
14794 def clear_caches(trees):
14795 for d in trees.itervalues():
14796 d["porttree"].dbapi.melt()
14797 d["porttree"].dbapi._aux_cache.clear()
14798 d["bintree"].dbapi._aux_cache.clear()
14799 d["bintree"].dbapi._clear_cache()
14800 d["vartree"].dbapi.linkmap._clear_cache()
14801 portage.dircache.clear()
14804 def load_emerge_config(trees=None):
14806 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14807 v = os.environ.get(envvar, None)
14808 if v and v.strip():
14810 trees = portage.create_trees(trees=trees, **kwargs)
14812 for root, root_trees in trees.iteritems():
14813 settings = root_trees["vartree"].settings
14814 setconfig = load_default_config(settings, root_trees)
14815 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14817 settings = trees["/"]["vartree"].settings
14819 for myroot in trees:
14821 settings = trees[myroot]["vartree"].settings
14824 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14825 mtimedb = portage.MtimeDB(mtimedbfile)
14827 return settings, trees, mtimedb
14829 def adjust_config(myopts, settings):
14830 """Make emerge specific adjustments to the config."""
14832 # To enhance usability, make some vars case insensitive by forcing them to
14834 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14835 if myvar in settings:
14836 settings[myvar] = settings[myvar].lower()
14837 settings.backup_changes(myvar)
14840 # Kill noauto as it will break merges otherwise.
14841 if "noauto" in settings.features:
14842 while "noauto" in settings.features:
14843 settings.features.remove("noauto")
14844 settings["FEATURES"] = " ".join(settings.features)
14845 settings.backup_changes("FEATURES")
14849 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14850 except ValueError, e:
14851 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14852 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14853 settings["CLEAN_DELAY"], noiselevel=-1)
14854 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14855 settings.backup_changes("CLEAN_DELAY")
14857 EMERGE_WARNING_DELAY = 10
14859 EMERGE_WARNING_DELAY = int(settings.get(
14860 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14861 except ValueError, e:
14862 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14863 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14864 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14865 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14866 settings.backup_changes("EMERGE_WARNING_DELAY")
14868 if "--quiet" in myopts:
14869 settings["PORTAGE_QUIET"]="1"
14870 settings.backup_changes("PORTAGE_QUIET")
14872 if "--verbose" in myopts:
14873 settings["PORTAGE_VERBOSE"] = "1"
14874 settings.backup_changes("PORTAGE_VERBOSE")
14876 # Set so that configs will be merged regardless of remembered status
14877 if ("--noconfmem" in myopts):
14878 settings["NOCONFMEM"]="1"
14879 settings.backup_changes("NOCONFMEM")
14881 # Set various debug markers... They should be merged somehow.
14884 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14885 if PORTAGE_DEBUG not in (0, 1):
14886 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14887 PORTAGE_DEBUG, noiselevel=-1)
14888 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14891 except ValueError, e:
14892 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14893 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14894 settings["PORTAGE_DEBUG"], noiselevel=-1)
14896 if "--debug" in myopts:
14898 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14899 settings.backup_changes("PORTAGE_DEBUG")
14901 if settings.get("NOCOLOR") not in ("yes","true"):
14902 portage.output.havecolor = 1
14904 """The explicit --color < y | n > option overrides the NOCOLOR environment
14905 variable and stdout auto-detection."""
14906 if "--color" in myopts:
14907 if "y" == myopts["--color"]:
14908 portage.output.havecolor = 1
14909 settings["NOCOLOR"] = "false"
14911 portage.output.havecolor = 0
14912 settings["NOCOLOR"] = "true"
14913 settings.backup_changes("NOCOLOR")
14914 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14915 portage.output.havecolor = 0
14916 settings["NOCOLOR"] = "true"
14917 settings.backup_changes("NOCOLOR")
14919 def apply_priorities(settings):
14923 def nice(settings):
14925 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14926 except (OSError, ValueError), e:
14927 out = portage.output.EOutput()
14928 out.eerror("Failed to change nice value to '%s'" % \
14929 settings["PORTAGE_NICENESS"])
14930 out.eerror("%s\n" % str(e))
14932 def ionice(settings):
14934 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14936 ionice_cmd = shlex.split(ionice_cmd)
14940 from portage.util import varexpand
14941 variables = {"PID" : str(os.getpid())}
14942 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14945 rval = portage.process.spawn(cmd, env=os.environ)
14946 except portage.exception.CommandNotFound:
14947 # The OS kernel probably doesn't support ionice,
14948 # so return silently.
14951 if rval != os.EX_OK:
14952 out = portage.output.EOutput()
14953 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14954 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14956 def display_missing_pkg_set(root_config, set_name):
14959 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14960 "The following sets exist:") % \
14961 colorize("INFORM", set_name))
14964 for s in sorted(root_config.sets):
14965 msg.append(" %s" % s)
14968 writemsg_level("".join("%s\n" % l for l in msg),
14969 level=logging.ERROR, noiselevel=-1)
14971 def expand_set_arguments(myfiles, myaction, root_config):
14973 setconfig = root_config.setconfig
14975 sets = setconfig.getSets()
14977 # In order to know exactly which atoms/sets should be added to the
14978 # world file, the depgraph performs set expansion later. It will get
14979 # confused about where the atoms came from if it's not allowed to
14980 # expand them itself.
14981 do_not_expand = (None, )
14984 if a in ("system", "world"):
14985 newargs.append(SETPREFIX+a)
14992 # separators for set arguments
14996 # WARNING: all operators must be of equal length
14998 DIFF_OPERATOR = "-@"
14999 UNION_OPERATOR = "+@"
15001 for i in range(0, len(myfiles)):
15002 if myfiles[i].startswith(SETPREFIX):
15005 x = myfiles[i][len(SETPREFIX):]
15008 start = x.find(ARG_START)
15009 end = x.find(ARG_END)
15010 if start > 0 and start < end:
15011 namepart = x[:start]
15012 argpart = x[start+1:end]
15014 # TODO: implement proper quoting
15015 args = argpart.split(",")
15019 k, v = a.split("=", 1)
15022 options[a] = "True"
15023 setconfig.update(namepart, options)
15024 newset += (x[:start-len(namepart)]+namepart)
15025 x = x[end+len(ARG_END):]
15029 myfiles[i] = SETPREFIX+newset
15031 sets = setconfig.getSets()
15033 # display errors that occured while loading the SetConfig instance
15034 for e in setconfig.errors:
15035 print colorize("BAD", "Error during set creation: %s" % e)
15037 # emerge relies on the existance of sets with names "world" and "system"
15038 required_sets = ("world", "system")
15041 for s in required_sets:
15043 missing_sets.append(s)
15045 if len(missing_sets) > 2:
15046 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15047 missing_sets_str += ', and "%s"' % missing_sets[-1]
15048 elif len(missing_sets) == 2:
15049 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15051 missing_sets_str = '"%s"' % missing_sets[-1]
15052 msg = ["emerge: incomplete set configuration, " + \
15053 "missing set(s): %s" % missing_sets_str]
15055 msg.append(" sets defined: %s" % ", ".join(sets))
15056 msg.append(" This usually means that '%s'" % \
15057 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15058 msg.append(" is missing or corrupt.")
15060 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15062 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15065 if a.startswith(SETPREFIX):
15066 # support simple set operations (intersection, difference and union)
15067 # on the commandline. Expressions are evaluated strictly left-to-right
15068 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15069 expression = a[len(SETPREFIX):]
15072 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15073 is_pos = expression.rfind(IS_OPERATOR)
15074 diff_pos = expression.rfind(DIFF_OPERATOR)
15075 union_pos = expression.rfind(UNION_OPERATOR)
15076 op_pos = max(is_pos, diff_pos, union_pos)
15077 s1 = expression[:op_pos]
15078 s2 = expression[op_pos+len(IS_OPERATOR):]
15079 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15081 display_missing_pkg_set(root_config, s2)
15083 expr_sets.insert(0, s2)
15084 expr_ops.insert(0, op)
15086 if not expression in sets:
15087 display_missing_pkg_set(root_config, expression)
15089 expr_sets.insert(0, expression)
15090 result = set(setconfig.getSetAtoms(expression))
15091 for i in range(0, len(expr_ops)):
15092 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15093 if expr_ops[i] == IS_OPERATOR:
15094 result.intersection_update(s2)
15095 elif expr_ops[i] == DIFF_OPERATOR:
15096 result.difference_update(s2)
15097 elif expr_ops[i] == UNION_OPERATOR:
15100 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15101 newargs.extend(result)
15103 s = a[len(SETPREFIX):]
15105 display_missing_pkg_set(root_config, s)
15107 setconfig.active.append(s)
15109 set_atoms = setconfig.getSetAtoms(s)
15110 except portage.exception.PackageSetNotFound, e:
15111 writemsg_level(("emerge: the given set '%s' " + \
15112 "contains a non-existent set named '%s'.\n") % \
15113 (s, e), level=logging.ERROR, noiselevel=-1)
15115 if myaction in unmerge_actions and \
15116 not sets[s].supportsOperation("unmerge"):
15117 sys.stderr.write("emerge: the given set '%s' does " % s + \
15118 "not support unmerge operations\n")
15120 elif not set_atoms:
15121 print "emerge: '%s' is an empty set" % s
15122 elif myaction not in do_not_expand:
15123 newargs.extend(set_atoms)
15125 newargs.append(SETPREFIX+s)
15126 for e in sets[s].errors:
15130 return (newargs, retval)
15132 def repo_name_check(trees):
15133 missing_repo_names = set()
15134 for root, root_trees in trees.iteritems():
15135 if "porttree" in root_trees:
15136 portdb = root_trees["porttree"].dbapi
15137 missing_repo_names.update(portdb.porttrees)
15138 repos = portdb.getRepositories()
15140 missing_repo_names.discard(portdb.getRepositoryPath(r))
15141 if portdb.porttree_root in missing_repo_names and \
15142 not os.path.exists(os.path.join(
15143 portdb.porttree_root, "profiles")):
15144 # This is normal if $PORTDIR happens to be empty,
15145 # so don't warn about it.
15146 missing_repo_names.remove(portdb.porttree_root)
15148 if missing_repo_names:
15150 msg.append("WARNING: One or more repositories " + \
15151 "have missing repo_name entries:")
15153 for p in missing_repo_names:
15154 msg.append("\t%s/profiles/repo_name" % (p,))
15156 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15157 "should be a plain text file containing a unique " + \
15158 "name for the repository on the first line.", 70))
15159 writemsg_level("".join("%s\n" % l for l in msg),
15160 level=logging.WARNING, noiselevel=-1)
15162 return bool(missing_repo_names)
15164 def config_protect_check(trees):
15165 for root, root_trees in trees.iteritems():
15166 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15167 msg = "!!! CONFIG_PROTECT is empty"
15169 msg += " for '%s'" % root
15170 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15172 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15174 if "--quiet" in myopts:
15175 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15176 print "!!! one of the following fully-qualified ebuild names instead:\n"
15177 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15178 print " " + colorize("INFORM", cp)
15181 s = search(root_config, spinner, "--searchdesc" in myopts,
15182 "--quiet" not in myopts, "--usepkg" in myopts,
15183 "--usepkgonly" in myopts)
15184 null_cp = portage.dep_getkey(insert_category_into_atom(
15186 cat, atom_pn = portage.catsplit(null_cp)
15187 s.searchkey = atom_pn
15188 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15191 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15192 print "!!! one of the above fully-qualified ebuild names instead.\n"
15194 def profile_check(trees, myaction, myopts):
15195 if myaction in ("info", "sync"):
15197 elif "--version" in myopts or "--help" in myopts:
15199 for root, root_trees in trees.iteritems():
15200 if root_trees["root_config"].settings.profiles:
15202 # generate some profile related warning messages
15203 validate_ebuild_environment(trees)
15204 msg = "If you have just changed your profile configuration, you " + \
15205 "should revert back to the previous configuration. Due to " + \
15206 "your current profile being invalid, allowed actions are " + \
15207 "limited to --help, --info, --sync, and --version."
15208 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15209 level=logging.ERROR, noiselevel=-1)
15214 global portage # NFC why this is necessary now - genone
15215 portage._disable_legacy_globals()
15216 # Disable color until we're sure that it should be enabled (after
15217 # EMERGE_DEFAULT_OPTS has been parsed).
15218 portage.output.havecolor = 0
15219 # This first pass is just for options that need to be known as early as
15220 # possible, such as --config-root. They will be parsed again later,
15221 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15222 # the value of --config-root).
15223 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15224 if "--debug" in myopts:
15225 os.environ["PORTAGE_DEBUG"] = "1"
15226 if "--config-root" in myopts:
15227 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15229 # Portage needs to ensure a sane umask for the files it creates.
15231 settings, trees, mtimedb = load_emerge_config()
15232 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15233 rval = profile_check(trees, myaction, myopts)
15234 if rval != os.EX_OK:
15237 if portage._global_updates(trees, mtimedb["updates"]):
15239 # Reload the whole config from scratch.
15240 settings, trees, mtimedb = load_emerge_config(trees=trees)
15241 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15243 xterm_titles = "notitles" not in settings.features
15246 if "--ignore-default-opts" not in myopts:
15247 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15248 tmpcmdline.extend(sys.argv[1:])
15249 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15251 if "--digest" in myopts:
15252 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15253 # Reload the whole config from scratch so that the portdbapi internal
15254 # config is updated with new FEATURES.
15255 settings, trees, mtimedb = load_emerge_config(trees=trees)
15256 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15258 for myroot in trees:
15259 mysettings = trees[myroot]["vartree"].settings
15260 mysettings.unlock()
15261 adjust_config(myopts, mysettings)
15262 if "--pretend" not in myopts:
15263 mysettings["PORTAGE_COUNTER_HASH"] = \
15264 trees[myroot]["vartree"].dbapi._counter_hash()
15265 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15267 del myroot, mysettings
15269 apply_priorities(settings)
15271 spinner = stdout_spinner()
15272 if "candy" in settings.features:
15273 spinner.update = spinner.update_scroll
15275 if "--quiet" not in myopts:
15276 portage.deprecated_profile_check(settings=settings)
15277 repo_name_check(trees)
15278 config_protect_check(trees)
15280 eclasses_overridden = {}
15281 for mytrees in trees.itervalues():
15282 mydb = mytrees["porttree"].dbapi
15283 # Freeze the portdbapi for performance (memoize all xmatch results).
15285 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15288 if eclasses_overridden and \
15289 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15290 prefix = bad(" * ")
15291 if len(eclasses_overridden) == 1:
15292 writemsg(prefix + "Overlay eclass overrides " + \
15293 "eclass from PORTDIR:\n", noiselevel=-1)
15295 writemsg(prefix + "Overlay eclasses override " + \
15296 "eclasses from PORTDIR:\n", noiselevel=-1)
15297 writemsg(prefix + "\n", noiselevel=-1)
15298 for eclass_name in sorted(eclasses_overridden):
15299 writemsg(prefix + " '%s/%s.eclass'\n" % \
15300 (eclasses_overridden[eclass_name], eclass_name),
15302 writemsg(prefix + "\n", noiselevel=-1)
15303 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15304 "because it will trigger invalidation of cached ebuild metadata " + \
15305 "that is distributed with the portage tree. If you must " + \
15306 "override eclasses from PORTDIR then you are advised to add " + \
15307 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15308 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15309 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15310 "you would like to disable this warning."
15311 from textwrap import wrap
15312 for line in wrap(msg, 72):
15313 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15315 if "moo" in myfiles:
15318 Larry loves Gentoo (""" + platform.system() + """)
15320 _______________________
15321 < Have you mooed today? >
15322 -----------------------
15332 ext = os.path.splitext(x)[1]
15333 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15334 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15337 root_config = trees[settings["ROOT"]]["root_config"]
15338 if myaction == "list-sets":
15339 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15343 # only expand sets for actions taking package arguments
15344 oldargs = myfiles[:]
15345 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15346 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15347 if retval != os.EX_OK:
15350 # Need to handle empty sets specially, otherwise emerge will react
15351 # with the help message for empty argument lists
15352 if oldargs and not myfiles:
15353 print "emerge: no targets left after set expansion"
15356 if ("--tree" in myopts) and ("--columns" in myopts):
15357 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15360 if ("--quiet" in myopts):
15361 spinner.update = spinner.update_quiet
15362 portage.util.noiselimit = -1
15364 # Always create packages if FEATURES=buildpkg
15365 # Imply --buildpkg if --buildpkgonly
15366 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15367 if "--buildpkg" not in myopts:
15368 myopts["--buildpkg"] = True
15370 # Also allow -S to invoke search action (-sS)
15371 if ("--searchdesc" in myopts):
15372 if myaction and myaction != "search":
15373 myfiles.append(myaction)
15374 if "--search" not in myopts:
15375 myopts["--search"] = True
15376 myaction = "search"
15378 # Always try and fetch binary packages if FEATURES=getbinpkg
15379 if ("getbinpkg" in settings.features):
15380 myopts["--getbinpkg"] = True
15382 if "--buildpkgonly" in myopts:
15383 # --buildpkgonly will not merge anything, so
15384 # it cancels all binary package options.
15385 for opt in ("--getbinpkg", "--getbinpkgonly",
15386 "--usepkg", "--usepkgonly"):
15387 myopts.pop(opt, None)
15389 if "--fetch-all-uri" in myopts:
15390 myopts["--fetchonly"] = True
15392 if "--skipfirst" in myopts and "--resume" not in myopts:
15393 myopts["--resume"] = True
15395 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15396 myopts["--usepkgonly"] = True
15398 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15399 myopts["--getbinpkg"] = True
15401 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15402 myopts["--usepkg"] = True
15404 # Also allow -K to apply --usepkg/-k
15405 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15406 myopts["--usepkg"] = True
15408 # Allow -p to remove --ask
15409 if ("--pretend" in myopts) and ("--ask" in myopts):
15410 print ">>> --pretend disables --ask... removing --ask from options."
15411 del myopts["--ask"]
15413 # forbid --ask when not in a terminal
15414 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15415 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15416 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15420 if settings.get("PORTAGE_DEBUG", "") == "1":
15421 spinner.update = spinner.update_quiet
15423 if "python-trace" in settings.features:
15424 import portage.debug
15425 portage.debug.set_trace(True)
15427 if not ("--quiet" in myopts):
15428 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15429 spinner.update = spinner.update_basic
15431 if "--version" in myopts:
15432 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15433 settings.profile_path, settings["CHOST"],
15434 trees[settings["ROOT"]]["vartree"].dbapi)
15436 elif "--help" in myopts:
15437 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15440 if "--debug" in myopts:
15441 print "myaction", myaction
15442 print "myopts", myopts
15444 if not myaction and not myfiles and "--resume" not in myopts:
15445 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15448 pretend = "--pretend" in myopts
15449 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15450 buildpkgonly = "--buildpkgonly" in myopts
15452 # check if root user is the current user for the actions where emerge needs this
15453 if portage.secpass < 2:
15454 # We've already allowed "--version" and "--help" above.
15455 if "--pretend" not in myopts and myaction not in ("search","info"):
15456 need_superuser = not \
15458 (buildpkgonly and secpass >= 1) or \
15459 myaction in ("metadata", "regen") or \
15460 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15461 if portage.secpass < 1 or \
15464 access_desc = "superuser"
15466 access_desc = "portage group"
15467 # Always show portage_group_warning() when only portage group
15468 # access is required but the user is not in the portage group.
15469 from portage.data import portage_group_warning
15470 if "--ask" in myopts:
15471 myopts["--pretend"] = True
15472 del myopts["--ask"]
15473 print ("%s access is required... " + \
15474 "adding --pretend to options.\n") % access_desc
15475 if portage.secpass < 1 and not need_superuser:
15476 portage_group_warning()
15478 sys.stderr.write(("emerge: %s access is " + \
15479 "required.\n\n") % access_desc)
15480 if portage.secpass < 1 and not need_superuser:
15481 portage_group_warning()
15484 disable_emergelog = False
15485 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15487 disable_emergelog = True
15489 if myaction in ("search", "info"):
15490 disable_emergelog = True
15491 if disable_emergelog:
15492 """ Disable emergelog for everything except build or unmerge
15493 operations. This helps minimize parallel emerge.log entries that can
15494 confuse log parsers. We especially want it disabled during
15495 parallel-fetch, which uses --resume --fetchonly."""
15497 def emergelog(*pargs, **kargs):
15500 if not "--pretend" in myopts:
15501 emergelog(xterm_titles, "Started emerge on: "+\
15502 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15505 myelogstr=" ".join(myopts)
15507 myelogstr+=" "+myaction
15509 myelogstr += " " + " ".join(oldargs)
15510 emergelog(xterm_titles, " *** emerge " + myelogstr)
15513 def emergeexitsig(signum, frame):
15514 signal.signal(signal.SIGINT, signal.SIG_IGN)
15515 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15516 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15517 sys.exit(100+signum)
15518 signal.signal(signal.SIGINT, emergeexitsig)
15519 signal.signal(signal.SIGTERM, emergeexitsig)
15522 """This gets out final log message in before we quit."""
15523 if "--pretend" not in myopts:
15524 emergelog(xterm_titles, " *** terminating.")
15525 if "notitles" not in settings.features:
15527 portage.atexit_register(emergeexit)
15529 if myaction in ("config", "metadata", "regen", "sync"):
15530 if "--pretend" in myopts:
15531 sys.stderr.write(("emerge: The '%s' action does " + \
15532 "not support '--pretend'.\n") % myaction)
15535 if "sync" == myaction:
15536 return action_sync(settings, trees, mtimedb, myopts, myaction)
15537 elif "metadata" == myaction:
15538 action_metadata(settings, portdb, myopts)
15539 elif myaction=="regen":
15540 validate_ebuild_environment(trees)
15541 return action_regen(settings, portdb, myopts.get("--jobs"),
15542 myopts.get("--load-average"))
15544 elif "config"==myaction:
15545 validate_ebuild_environment(trees)
15546 action_config(settings, trees, myopts, myfiles)
15549 elif "search"==myaction:
15550 validate_ebuild_environment(trees)
15551 action_search(trees[settings["ROOT"]]["root_config"],
15552 myopts, myfiles, spinner)
15553 elif myaction in ("clean", "unmerge") or \
15554 (myaction == "prune" and "--nodeps" in myopts):
15555 validate_ebuild_environment(trees)
15557 # Ensure atoms are valid before calling unmerge().
15558 # For backward compat, leading '=' is not required.
15560 if is_valid_package_atom(x) or \
15561 is_valid_package_atom("=" + x):
15564 msg.append("'%s' is not a valid package atom." % (x,))
15565 msg.append("Please check ebuild(5) for full details.")
15566 writemsg_level("".join("!!! %s\n" % line for line in msg),
15567 level=logging.ERROR, noiselevel=-1)
15570 # When given a list of atoms, unmerge
15571 # them in the order given.
15572 ordered = myaction == "unmerge"
15573 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15574 mtimedb["ldpath"], ordered=ordered):
15575 if not (buildpkgonly or fetchonly or pretend):
15576 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15578 elif myaction in ("depclean", "info", "prune"):
15580 # Ensure atoms are valid before calling unmerge().
15581 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15584 if is_valid_package_atom(x):
15586 valid_atoms.append(
15587 portage.dep_expand(x, mydb=vardb, settings=settings))
15588 except portage.exception.AmbiguousPackageName, e:
15589 msg = "The short ebuild name \"" + x + \
15590 "\" is ambiguous. Please specify " + \
15591 "one of the following " + \
15592 "fully-qualified ebuild names instead:"
15593 for line in textwrap.wrap(msg, 70):
15594 writemsg_level("!!! %s\n" % (line,),
15595 level=logging.ERROR, noiselevel=-1)
15597 writemsg_level(" %s\n" % colorize("INFORM", i),
15598 level=logging.ERROR, noiselevel=-1)
15599 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15603 msg.append("'%s' is not a valid package atom." % (x,))
15604 msg.append("Please check ebuild(5) for full details.")
15605 writemsg_level("".join("!!! %s\n" % line for line in msg),
15606 level=logging.ERROR, noiselevel=-1)
15609 if myaction == "info":
15610 return action_info(settings, trees, myopts, valid_atoms)
15612 validate_ebuild_environment(trees)
15613 action_depclean(settings, trees, mtimedb["ldpath"],
15614 myopts, myaction, valid_atoms, spinner)
15615 if not (buildpkgonly or fetchonly or pretend):
15616 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15617 # "update", "system", or just process files:
15619 validate_ebuild_environment(trees)
15620 if "--pretend" not in myopts:
15621 display_news_notification(root_config, myopts)
15622 retval = action_build(settings, trees, mtimedb,
15623 myopts, myaction, myfiles, spinner)
15624 root_config = trees[settings["ROOT"]]["root_config"]
15625 post_emerge(root_config, myopts, mtimedb, retval)