2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--rdeps-only", "--root-deps",
211 "--searchdesc", "--selective",
215 "--usepkg", "--usepkgonly",
222 "b":"--buildpkg", "B":"--buildpkgonly",
223 "c":"--clean", "C":"--unmerge",
224 "d":"--debug", "D":"--deep",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
229 "k":"--usepkg", "K":"--usepkgonly",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps", "O":"--nodeps",
233 "p":"--pretend", "P":"--prune",
235 "s":"--search", "S":"--searchdesc",
238 "v":"--verbose", "V":"--version"
241 def emergelog(xterm_titles, mystr, short_msg=None):
242 if xterm_titles and short_msg:
243 if "HOSTNAME" in os.environ:
244 short_msg = os.environ["HOSTNAME"]+": "+short_msg
245 xtermTitle(short_msg)
247 file_path = "/var/log/emerge.log"
248 mylogfile = open(file_path, "a")
249 portage.util.apply_secpass_permissions(file_path,
250 uid=portage.portage_uid, gid=portage.portage_gid,
254 mylock = portage.locks.lockfile(mylogfile)
255 # seek because we may have gotten held up by the lock.
256 # if so, we may not be positioned at the end of the file.
258 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
262 portage.locks.unlockfile(mylock)
264 except (IOError,OSError,portage.exception.PortageException), e:
266 print >> sys.stderr, "emergelog():",e
268 def countdown(secs=5, doing="Starting"):
270 print ">>> Waiting",secs,"seconds before starting..."
271 print ">>> (Control-C to abort)...\n"+doing+" in: ",
275 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282 if isinstance(mysize, basestring):
284 if 0 != mysize % 1024:
285 # Always round up to the next kB so that it doesn't show 0 kB when
286 # some small file still needs to be fetched.
287 mysize += 1024 - mysize % 1024
288 mystr=str(mysize/1024)
292 mystr=mystr[:mycount]+","+mystr[mycount:]
296 def getgccversion(chost):
299 return: the current in-use gcc version
302 gcc_ver_command = 'gcc -dumpversion'
303 gcc_ver_prefix = 'gcc-'
305 gcc_not_found_error = red(
306 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307 "!!! to update the environment of this terminal and possibly\n" +
308 "!!! other terminals also.\n"
311 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
315 mystatus, myoutput = commands.getstatusoutput(
316 chost + "-" + gcc_ver_command)
317 if mystatus == os.EX_OK:
318 return gcc_ver_prefix + myoutput
320 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321 if mystatus == os.EX_OK:
322 return gcc_ver_prefix + myoutput
324 portage.writemsg(gcc_not_found_error, noiselevel=-1)
325 return "[unavailable]"
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328 profilever = "unavailable"
330 realpath = os.path.realpath(profile)
331 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
332 if realpath.startswith(basepath):
333 profilever = realpath[1 + len(basepath):]
336 profilever = "!" + os.readlink(profile)
339 del realpath, basepath
342 libclist = vardb.match("virtual/libc")
343 libclist += vardb.match("virtual/glibc")
344 libclist = portage.util.unique_array(libclist)
346 xs=portage.catpkgsplit(x)
348 libcver+=","+"-".join(xs[1:])
350 libcver="-".join(xs[1:])
352 libcver="unavailable"
354 gccver = getgccversion(chost)
355 unameout=platform.release()+" "+platform.machine()
357 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
359 def create_depgraph_params(myopts, myaction):
360 #configure emerge engine parameters
362 # self: include _this_ package regardless of if it is merged.
363 # selective: exclude the package if it is merged
364 # recurse: go into the dependencies
365 # deep: go into the dependencies of already merged packages
366 # empty: pretend nothing is merged
367 # complete: completely account for all known dependencies
368 # remove: build graph for use in removing packages
369 myparams = set(["recurse"])
371 if myaction == "remove":
372 myparams.add("remove")
373 myparams.add("complete")
376 if "--update" in myopts or \
377 "--newuse" in myopts or \
378 "--reinstall" in myopts or \
379 "--noreplace" in myopts:
380 myparams.add("selective")
381 if "--emptytree" in myopts:
382 myparams.add("empty")
383 myparams.discard("selective")
384 if "--nodeps" in myopts:
385 myparams.discard("recurse")
386 if "--deep" in myopts:
388 if "--complete-graph" in myopts:
389 myparams.add("complete")
392 # search functionality
393 class search(object):
404 def __init__(self, root_config, spinner, searchdesc,
405 verbose, usepkg, usepkgonly):
406 """Searches the available and installed packages for the supplied search key.
407 The list of available and installed packages is created at object instantiation.
408 This makes successive searches faster."""
409 self.settings = root_config.settings
410 self.vartree = root_config.trees["vartree"]
411 self.spinner = spinner
412 self.verbose = verbose
413 self.searchdesc = searchdesc
414 self.root_config = root_config
415 self.setconfig = root_config.setconfig
416 self.matches = {"pkg" : []}
421 self.portdb = fake_portdb
422 for attrib in ("aux_get", "cp_all",
423 "xmatch", "findname", "getFetchMap"):
424 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
428 portdb = root_config.trees["porttree"].dbapi
429 bindb = root_config.trees["bintree"].dbapi
430 vardb = root_config.trees["vartree"].dbapi
432 if not usepkgonly and portdb._have_root_eclass_dir:
433 self._dbs.append(portdb)
435 if (usepkg or usepkgonly) and bindb.cp_all():
436 self._dbs.append(bindb)
438 self._dbs.append(vardb)
439 self._portdb = portdb
444 cp_all.update(db.cp_all())
445 return list(sorted(cp_all))
447 def _aux_get(self, *args, **kwargs):
450 return db.aux_get(*args, **kwargs)
455 def _findname(self, *args, **kwargs):
457 if db is not self._portdb:
458 # We don't want findname to return anything
459 # unless it's an ebuild in a portage tree.
460 # Otherwise, it's already built and we don't
463 func = getattr(db, "findname", None)
465 value = func(*args, **kwargs)
470 def _getFetchMap(self, *args, **kwargs):
472 func = getattr(db, "getFetchMap", None)
474 value = func(*args, **kwargs)
479 def _visible(self, db, cpv, metadata):
480 installed = db is self.vartree.dbapi
481 built = installed or db is not self._portdb
484 pkg_type = "installed"
487 return visible(self.settings,
488 Package(type_name=pkg_type, root_config=self.root_config,
489 cpv=cpv, built=built, installed=installed, metadata=metadata))
491 def _xmatch(self, level, atom):
493 This method does not expand old-style virtuals because it
494 is restricted to returning matches for a single ${CATEGORY}/${PN}
495 and old-style virual matches unreliable for that when querying
496 multiple package databases. If necessary, old-style virtuals
497 can be performed on atoms prior to calling this method.
499 cp = portage.dep_getkey(atom)
500 if level == "match-all":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 matches.update(db.match(atom))
507 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508 db._cpv_sort_ascending(result)
509 elif level == "match-visible":
512 if hasattr(db, "xmatch"):
513 matches.update(db.xmatch(level, atom))
515 db_keys = list(db._aux_cache_keys)
516 for cpv in db.match(atom):
517 metadata = izip(db_keys,
518 db.aux_get(cpv, db_keys))
519 if not self._visible(db, cpv, metadata):
522 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523 db._cpv_sort_ascending(result)
524 elif level == "bestmatch-visible":
527 if hasattr(db, "xmatch"):
528 cpv = db.xmatch("bestmatch-visible", atom)
529 if not cpv or portage.cpv_getkey(cpv) != cp:
531 if not result or cpv == portage.best([cpv, result]):
534 db_keys = Package.metadata_keys
535 # break out of this loop with highest visible
536 # match, checked in descending order
537 for cpv in reversed(db.match(atom)):
538 if portage.cpv_getkey(cpv) != cp:
540 metadata = izip(db_keys,
541 db.aux_get(cpv, db_keys))
542 if not self._visible(db, cpv, metadata):
544 if not result or cpv == portage.best([cpv, result]):
548 raise NotImplementedError(level)
551 def execute(self,searchkey):
552 """Performs the search for the supplied search key"""
554 self.searchkey=searchkey
555 self.packagematches = []
558 self.matches = {"pkg":[], "desc":[], "set":[]}
561 self.matches = {"pkg":[], "set":[]}
562 print "Searching... ",
565 if self.searchkey.startswith('%'):
567 self.searchkey = self.searchkey[1:]
568 if self.searchkey.startswith('@'):
570 self.searchkey = self.searchkey[1:]
572 self.searchre=re.compile(self.searchkey,re.I)
574 self.searchre=re.compile(re.escape(self.searchkey), re.I)
575 for package in self.portdb.cp_all():
576 self.spinner.update()
579 match_string = package[:]
581 match_string = package.split("/")[-1]
584 if self.searchre.search(match_string):
585 if not self.portdb.xmatch("match-visible", package):
587 self.matches["pkg"].append([package,masked])
588 elif self.searchdesc: # DESCRIPTION searching
589 full_package = self.portdb.xmatch("bestmatch-visible", package)
591 #no match found; we don't want to query description
592 full_package = portage.best(
593 self.portdb.xmatch("match-all", package))
599 full_desc = self.portdb.aux_get(
600 full_package, ["DESCRIPTION"])[0]
602 print "emerge: search: aux_get() failed, skipping"
604 if self.searchre.search(full_desc):
605 self.matches["desc"].append([full_package,masked])
607 self.sdict = self.setconfig.getSets()
608 for setname in self.sdict:
609 self.spinner.update()
611 match_string = setname
613 match_string = setname.split("/")[-1]
615 if self.searchre.search(match_string):
616 self.matches["set"].append([setname, False])
617 elif self.searchdesc:
618 if self.searchre.search(
619 self.sdict[setname].getMetadata("DESCRIPTION")):
620 self.matches["set"].append([setname, False])
623 for mtype in self.matches:
624 self.matches[mtype].sort()
625 self.mlen += len(self.matches[mtype])
628 if not self.portdb.xmatch("match-all", cp):
631 if not self.portdb.xmatch("bestmatch-visible", cp):
633 self.matches["pkg"].append([cp, masked])
637 """Outputs the results of the search."""
638 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
639 print "[ Applications found : "+white(str(self.mlen))+" ]"
641 vardb = self.vartree.dbapi
642 for mtype in self.matches:
643 for match,masked in self.matches[mtype]:
647 full_package = self.portdb.xmatch(
648 "bestmatch-visible", match)
650 #no match found; we don't want to query description
652 full_package = portage.best(
653 self.portdb.xmatch("match-all",match))
654 elif mtype == "desc":
656 match = portage.cpv_getkey(match)
658 print green("*")+" "+white(match)
659 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
663 desc, homepage, license = self.portdb.aux_get(
664 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
666 print "emerge: search: aux_get() failed, skipping"
669 print green("*")+" "+white(match)+" "+red("[ Masked ]")
671 print green("*")+" "+white(match)
672 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
676 mycat = match.split("/")[0]
677 mypkg = match.split("/")[1]
678 mycpv = match + "-" + myversion
679 myebuild = self.portdb.findname(mycpv)
681 pkgdir = os.path.dirname(myebuild)
682 from portage import manifest
683 mf = manifest.Manifest(
684 pkgdir, self.settings["DISTDIR"])
686 uri_map = self.portdb.getFetchMap(mycpv)
687 except portage.exception.InvalidDependString, e:
688 file_size_str = "Unknown (%s)" % (e,)
692 mysum[0] = mf.getDistfilesSize(uri_map)
694 file_size_str = "Unknown (missing " + \
695 "digest for %s)" % (e,)
700 if db is not vardb and \
701 db.cpv_exists(mycpv):
703 if not myebuild and hasattr(db, "bintree"):
704 myebuild = db.bintree.getname(mycpv)
706 mysum[0] = os.stat(myebuild).st_size
711 if myebuild and file_size_str is None:
712 mystr = str(mysum[0] / 1024)
716 mystr = mystr[:mycount] + "," + mystr[mycount:]
717 file_size_str = mystr + " kB"
721 print " ", darkgreen("Latest version available:"),myversion
722 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
725 (darkgreen("Size of files:"), file_size_str)
726 print " ", darkgreen("Homepage:")+" ",homepage
727 print " ", darkgreen("Description:")+" ",desc
728 print " ", darkgreen("License:")+" ",license
733 def getInstallationStatus(self,package):
734 installed_package = self.vartree.dep_bestmatch(package)
736 version = self.getVersion(installed_package,search.VERSION_RELEASE)
738 result = darkgreen("Latest version installed:")+" "+version
740 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
743 def getVersion(self,full_package,detail):
744 if len(full_package) > 1:
745 package_parts = portage.catpkgsplit(full_package)
746 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747 result = package_parts[2]+ "-" + package_parts[3]
749 result = package_parts[2]
754 class RootConfig(object):
755 """This is used internally by depgraph to track information about a
759 "ebuild" : "porttree",
760 "binary" : "bintree",
761 "installed" : "vartree"
765 for k, v in pkg_tree_map.iteritems():
768 def __init__(self, settings, trees, setconfig):
770 self.settings = settings
771 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772 self.root = self.settings["ROOT"]
773 self.setconfig = setconfig
774 self.sets = self.setconfig.getSets()
775 self.visible_pkgs = PackageVirtualDbapi(self.settings)
777 def create_world_atom(pkg, args_set, root_config):
778 """Create a new atom for the world file if one does not exist. If the
779 argument atom is precise enough to identify a specific slot then a slot
780 atom will be returned. Atoms that are in the system set may also be stored
781 in world since system atoms can only match one slot while world atoms can
782 be greedy with respect to slots. Unslotted system packages will not be
785 arg_atom = args_set.findAtomForPackage(pkg)
788 cp = portage.dep_getkey(arg_atom)
790 sets = root_config.sets
791 portdb = root_config.trees["porttree"].dbapi
792 vardb = root_config.trees["vartree"].dbapi
793 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
794 for cpv in portdb.match(cp))
795 slotted = len(available_slots) > 1 or \
796 (len(available_slots) == 1 and "0" not in available_slots)
798 # check the vdb in case this is multislot
799 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
800 for cpv in vardb.match(cp))
801 slotted = len(available_slots) > 1 or \
802 (len(available_slots) == 1 and "0" not in available_slots)
803 if slotted and arg_atom != cp:
804 # If the user gave a specific atom, store it as a
805 # slot atom in the world file.
806 slot_atom = pkg.slot_atom
808 # For USE=multislot, there are a couple of cases to
811 # 1) SLOT="0", but the real SLOT spontaneously changed to some
812 # unknown value, so just record an unslotted atom.
814 # 2) SLOT comes from an installed package and there is no
815 # matching SLOT in the portage tree.
817 # Make sure that the slot atom is available in either the
818 # portdb or the vardb, since otherwise the user certainly
819 # doesn't want the SLOT atom recorded in the world file
820 # (case 1 above). If it's only available in the vardb,
821 # the user may be trying to prevent a USE=multislot
822 # package from being removed by --depclean (case 2 above).
825 if not portdb.match(slot_atom):
826 # SLOT seems to come from an installed multislot package
828 # If there is no installed package matching the SLOT atom,
829 # it probably changed SLOT spontaneously due to USE=multislot,
830 # so just record an unslotted atom.
831 if vardb.match(slot_atom):
832 # Now verify that the argument is precise
833 # enough to identify a specific slot.
834 matches = mydb.match(arg_atom)
835 matched_slots = set()
837 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
838 if len(matched_slots) == 1:
839 new_world_atom = slot_atom
841 if new_world_atom == sets["world"].findAtomForPackage(pkg):
842 # Both atoms would be identical, so there's nothing to add.
845 # Unlike world atoms, system atoms are not greedy for slots, so they
846 # can't be safely excluded from world if they are slotted.
847 system_atom = sets["system"].findAtomForPackage(pkg)
849 if not portage.dep_getkey(system_atom).startswith("virtual/"):
851 # System virtuals aren't safe to exclude from world since they can
852 # match multiple old-style virtuals but only one of them will be
853 # pulled in by update or depclean.
854 providers = portdb.mysettings.getvirtuals().get(
855 portage.dep_getkey(system_atom))
856 if providers and len(providers) == 1 and providers[0] == cp:
858 return new_world_atom
860 def filter_iuse_defaults(iuse):
862 if flag.startswith("+") or flag.startswith("-"):
867 class SlotObject(object):
868 __slots__ = ("__weakref__",)
870 def __init__(self, **kwargs):
871 classes = [self.__class__]
876 classes.extend(c.__bases__)
877 slots = getattr(c, "__slots__", None)
881 myvalue = kwargs.get(myattr, None)
882 setattr(self, myattr, myvalue)
886 Create a new instance and copy all attributes
887 defined from __slots__ (including those from
890 obj = self.__class__()
892 classes = [self.__class__]
897 classes.extend(c.__bases__)
898 slots = getattr(c, "__slots__", None)
902 setattr(obj, myattr, getattr(self, myattr))
906 class AbstractDepPriority(SlotObject):
907 __slots__ = ("buildtime", "runtime", "runtime_post")
909 def __lt__(self, other):
910 return self.__int__() < other
912 def __le__(self, other):
913 return self.__int__() <= other
915 def __eq__(self, other):
916 return self.__int__() == other
918 def __ne__(self, other):
919 return self.__int__() != other
921 def __gt__(self, other):
922 return self.__int__() > other
924 def __ge__(self, other):
925 return self.__int__() >= other
929 return copy.copy(self)
931 class DepPriority(AbstractDepPriority):
933 __slots__ = ("satisfied", "optional", "rebuild")
945 if self.runtime_post:
946 return "runtime_post"
949 class BlockerDepPriority(DepPriority):
957 BlockerDepPriority.instance = BlockerDepPriority()
959 class UnmergeDepPriority(AbstractDepPriority):
960 __slots__ = ("optional", "satisfied",)
962 Combination of properties Priority Category
967 (none of the above) -2 SOFT
977 if self.runtime_post:
984 myvalue = self.__int__()
985 if myvalue > self.SOFT:
989 class DepPriorityNormalRange(object):
991 DepPriority properties Index Category
995 runtime_post 2 MEDIUM_SOFT
997 (none of the above) 0 NONE
1005 def _ignore_optional(cls, priority):
1006 if priority.__class__ is not DepPriority:
1008 return bool(priority.optional)
1011 def _ignore_runtime_post(cls, priority):
1012 if priority.__class__ is not DepPriority:
1014 return bool(priority.optional or priority.runtime_post)
1017 def _ignore_runtime(cls, priority):
1018 if priority.__class__ is not DepPriority:
1020 return not priority.buildtime
1022 ignore_medium = _ignore_runtime
1023 ignore_medium_soft = _ignore_runtime_post
1024 ignore_soft = _ignore_optional
1026 DepPriorityNormalRange.ignore_priority = (
1028 DepPriorityNormalRange._ignore_optional,
1029 DepPriorityNormalRange._ignore_runtime_post,
1030 DepPriorityNormalRange._ignore_runtime
1033 class DepPrioritySatisfiedRange(object):
1035 DepPriority Index Category
1037 not satisfied and buildtime HARD
1038 not satisfied and runtime 7 MEDIUM
1039 not satisfied and runtime_post 6 MEDIUM_SOFT
1040 satisfied and buildtime and rebuild 5 SOFT
1041 satisfied and buildtime 4 SOFT
1042 satisfied and runtime 3 SOFT
1043 satisfied and runtime_post 2 SOFT
1045 (none of the above) 0 NONE
1053 def _ignore_optional(cls, priority):
1054 if priority.__class__ is not DepPriority:
1056 return bool(priority.optional)
1059 def _ignore_satisfied_runtime_post(cls, priority):
1060 if priority.__class__ is not DepPriority:
1062 if priority.optional:
1064 if not priority.satisfied:
1066 return bool(priority.runtime_post)
1069 def _ignore_satisfied_runtime(cls, priority):
1070 if priority.__class__ is not DepPriority:
1072 if priority.optional:
1074 if not priority.satisfied:
1076 return not priority.buildtime
1079 def _ignore_satisfied_buildtime(cls, priority):
1080 if priority.__class__ is not DepPriority:
1082 if priority.optional:
1084 if not priority.satisfied:
1086 if priority.buildtime:
1087 return not priority.rebuild
1091 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1092 if priority.__class__ is not DepPriority:
1094 if priority.optional:
1096 return bool(priority.satisfied)
1099 def _ignore_runtime_post(cls, priority):
1100 if priority.__class__ is not DepPriority:
1102 return bool(priority.optional or \
1103 priority.satisfied or \
1104 priority.runtime_post)
1107 def _ignore_runtime(cls, priority):
1108 if priority.__class__ is not DepPriority:
1110 return bool(priority.satisfied or \
1111 not priority.buildtime)
1113 ignore_medium = _ignore_runtime
1114 ignore_medium_soft = _ignore_runtime_post
1115 ignore_soft = _ignore_satisfied_buildtime_rebuild
1117 DepPrioritySatisfiedRange.ignore_priority = (
1119 DepPrioritySatisfiedRange._ignore_optional,
1120 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1121 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1122 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1123 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1124 DepPrioritySatisfiedRange._ignore_runtime_post,
1125 DepPrioritySatisfiedRange._ignore_runtime
1128 def _find_deep_system_runtime_deps(graph):
1129 deep_system_deps = set()
1132 if not isinstance(node, Package) or \
1133 node.operation == 'uninstall':
1135 if node.root_config.sets['system'].findAtomForPackage(node):
1136 node_stack.append(node)
1138 def ignore_priority(priority):
1140 Ignore non-runtime priorities.
1142 if isinstance(priority, DepPriority) and \
1143 (priority.runtime or priority.runtime_post):
1148 node = node_stack.pop()
1149 if node in deep_system_deps:
1151 deep_system_deps.add(node)
1152 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1153 if not isinstance(child, Package) or \
1154 child.operation == 'uninstall':
1156 node_stack.append(child)
1158 return deep_system_deps
1160 class FakeVartree(portage.vartree):
1161 """This is implements an in-memory copy of a vartree instance that provides
1162 all the interfaces required for use by the depgraph. The vardb is locked
1163 during the constructor call just long enough to read a copy of the
1164 installed package information. This allows the depgraph to do it's
1165 dependency calculations without holding a lock on the vardb. It also
1166 allows things like vardb global updates to be done in memory so that the
1167 user doesn't necessarily need write access to the vardb in cases where
1168 global updates are necessary (updates are performed when necessary if there
1169 is not a matching ebuild in the tree)."""
1170 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1171 self._root_config = root_config
1172 if pkg_cache is None:
1174 real_vartree = root_config.trees["vartree"]
1175 portdb = root_config.trees["porttree"].dbapi
1176 self.root = real_vartree.root
1177 self.settings = real_vartree.settings
1178 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1179 if "_mtime_" not in mykeys:
1180 mykeys.append("_mtime_")
1181 self._db_keys = mykeys
1182 self._pkg_cache = pkg_cache
1183 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1184 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1186 # At least the parent needs to exist for the lock file.
1187 portage.util.ensure_dirs(vdb_path)
1188 except portage.exception.PortageException:
1192 if acquire_lock and os.access(vdb_path, os.W_OK):
1193 vdb_lock = portage.locks.lockdir(vdb_path)
1194 real_dbapi = real_vartree.dbapi
1196 for cpv in real_dbapi.cpv_all():
1197 cache_key = ("installed", self.root, cpv, "nomerge")
1198 pkg = self._pkg_cache.get(cache_key)
1200 metadata = pkg.metadata
1202 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1203 myslot = metadata["SLOT"]
1204 mycp = portage.dep_getkey(cpv)
1205 myslot_atom = "%s:%s" % (mycp, myslot)
1207 mycounter = long(metadata["COUNTER"])
1210 metadata["COUNTER"] = str(mycounter)
1211 other_counter = slot_counters.get(myslot_atom, None)
1212 if other_counter is not None:
1213 if other_counter > mycounter:
1215 slot_counters[myslot_atom] = mycounter
1217 pkg = Package(built=True, cpv=cpv,
1218 installed=True, metadata=metadata,
1219 root_config=root_config, type_name="installed")
1220 self._pkg_cache[pkg] = pkg
1221 self.dbapi.cpv_inject(pkg)
1222 real_dbapi.flush_cache()
1225 portage.locks.unlockdir(vdb_lock)
1226 # Populate the old-style virtuals using the cached values.
1227 if not self.settings.treeVirtuals:
1228 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1229 portage.getCPFromCPV, self.get_all_provides())
1231 # Intialize variables needed for lazy cache pulls of the live ebuild
1232 # metadata. This ensures that the vardb lock is released ASAP, without
1233 # being delayed in case cache generation is triggered.
1234 self._aux_get = self.dbapi.aux_get
1235 self.dbapi.aux_get = self._aux_get_wrapper
1236 self._match = self.dbapi.match
1237 self.dbapi.match = self._match_wrapper
1238 self._aux_get_history = set()
1239 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1240 self._portdb = portdb
1241 self._global_updates = None
1243 def _match_wrapper(self, cpv, use_cache=1):
1245 Make sure the metadata in Package instances gets updated for any
1246 cpv that is returned from a match() call, since the metadata can
1247 be accessed directly from the Package instance instead of via
1250 matches = self._match(cpv, use_cache=use_cache)
1252 if cpv in self._aux_get_history:
1254 self._aux_get_wrapper(cpv, [])
1257 def _aux_get_wrapper(self, pkg, wants):
1258 if pkg in self._aux_get_history:
1259 return self._aux_get(pkg, wants)
1260 self._aux_get_history.add(pkg)
1262 # Use the live ebuild metadata if possible.
1263 live_metadata = dict(izip(self._portdb_keys,
1264 self._portdb.aux_get(pkg, self._portdb_keys)))
1265 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1267 self.dbapi.aux_update(pkg, live_metadata)
1268 except (KeyError, portage.exception.PortageException):
1269 if self._global_updates is None:
1270 self._global_updates = \
1271 grab_global_updates(self._portdb.porttree_root)
1272 perform_global_updates(
1273 pkg, self.dbapi, self._global_updates)
1274 return self._aux_get(pkg, wants)
1276 def sync(self, acquire_lock=1):
1278 Call this method to synchronize state with the real vardb
1279 after one or more packages may have been installed or
1282 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1284 # At least the parent needs to exist for the lock file.
1285 portage.util.ensure_dirs(vdb_path)
1286 except portage.exception.PortageException:
1290 if acquire_lock and os.access(vdb_path, os.W_OK):
1291 vdb_lock = portage.locks.lockdir(vdb_path)
1295 portage.locks.unlockdir(vdb_lock)
1299 real_vardb = self._root_config.trees["vartree"].dbapi
1300 current_cpv_set = frozenset(real_vardb.cpv_all())
1301 pkg_vardb = self.dbapi
1302 aux_get_history = self._aux_get_history
1304 # Remove any packages that have been uninstalled.
1305 for pkg in list(pkg_vardb):
1306 if pkg.cpv not in current_cpv_set:
1307 pkg_vardb.cpv_remove(pkg)
1308 aux_get_history.discard(pkg.cpv)
1310 # Validate counters and timestamps.
1313 validation_keys = ["COUNTER", "_mtime_"]
1314 for cpv in current_cpv_set:
1316 pkg_hash_key = ("installed", root, cpv, "nomerge")
1317 pkg = pkg_vardb.get(pkg_hash_key)
1319 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1321 counter = long(counter)
1325 if counter != pkg.counter or \
1327 pkg_vardb.cpv_remove(pkg)
1328 aux_get_history.discard(pkg.cpv)
1332 pkg = self._pkg(cpv)
1334 other_counter = slot_counters.get(pkg.slot_atom)
1335 if other_counter is not None:
1336 if other_counter > pkg.counter:
1339 slot_counters[pkg.slot_atom] = pkg.counter
1340 pkg_vardb.cpv_inject(pkg)
1342 real_vardb.flush_cache()
1344 def _pkg(self, cpv):
1345 root_config = self._root_config
1346 real_vardb = root_config.trees["vartree"].dbapi
1347 pkg = Package(cpv=cpv, installed=True,
1348 metadata=izip(self._db_keys,
1349 real_vardb.aux_get(cpv, self._db_keys)),
1350 root_config=root_config,
1351 type_name="installed")
1354 mycounter = long(pkg.metadata["COUNTER"])
1357 pkg.metadata["COUNTER"] = str(mycounter)
1361 def grab_global_updates(portdir):
1362 from portage.update import grab_updates, parse_updates
1363 updpath = os.path.join(portdir, "profiles", "updates")
1365 rawupdates = grab_updates(updpath)
1366 except portage.exception.DirectoryNotFound:
1369 for mykey, mystat, mycontent in rawupdates:
1370 commands, errors = parse_updates(mycontent)
1371 upd_commands.extend(commands)
1374 def perform_global_updates(mycpv, mydb, mycommands):
1375 from portage.update import update_dbentries
1376 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1377 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1378 updates = update_dbentries(mycommands, aux_dict)
1380 mydb.aux_update(mycpv, updates)
1382 def visible(pkgsettings, pkg):
1384 Check if a package is visible. This can raise an InvalidDependString
1385 exception if LICENSE is invalid.
1386 TODO: optionally generate a list of masking reasons
1388 @returns: True if the package is visible, False otherwise.
1390 if not pkg.metadata["SLOT"]:
1392 if not pkg.installed:
1393 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1395 eapi = pkg.metadata["EAPI"]
1396 if not portage.eapi_is_supported(eapi):
1398 if not pkg.installed:
1399 if portage._eapi_is_deprecated(eapi):
1401 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1405 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1408 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1410 except portage.exception.InvalidDependString:
1414 def get_masking_status(pkg, pkgsettings, root_config):
1416 mreasons = portage.getmaskingstatus(
1417 pkg, settings=pkgsettings,
1418 portdb=root_config.trees["porttree"].dbapi)
1420 if not pkg.installed:
1421 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1422 mreasons.append("CHOST: %s" % \
1423 pkg.metadata["CHOST"])
1425 if not pkg.metadata["SLOT"]:
1426 mreasons.append("invalid: SLOT is undefined")
1430 def get_mask_info(root_config, cpv, pkgsettings,
1431 db, pkg_type, built, installed, db_keys):
1434 metadata = dict(izip(db_keys,
1435 db.aux_get(cpv, db_keys)))
1438 if metadata and not built:
1439 pkgsettings.setcpv(cpv, mydb=metadata)
1440 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1441 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1442 if metadata is None:
1443 mreasons = ["corruption"]
1445 eapi = metadata['EAPI']
1448 if not portage.eapi_is_supported(eapi):
1449 mreasons = ['EAPI %s' % eapi]
1451 pkg = Package(type_name=pkg_type, root_config=root_config,
1452 cpv=cpv, built=built, installed=installed, metadata=metadata)
1453 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1454 return metadata, mreasons
1456 def show_masked_packages(masked_packages):
1457 shown_licenses = set()
1458 shown_comments = set()
1459 # Maybe there is both an ebuild and a binary. Only
1460 # show one of them to avoid redundant appearance.
1462 have_eapi_mask = False
1463 for (root_config, pkgsettings, cpv,
1464 metadata, mreasons) in masked_packages:
1465 if cpv in shown_cpvs:
1468 comment, filename = None, None
1469 if "package.mask" in mreasons:
1470 comment, filename = \
1471 portage.getmaskingreason(
1472 cpv, metadata=metadata,
1473 settings=pkgsettings,
1474 portdb=root_config.trees["porttree"].dbapi,
1475 return_location=True)
1476 missing_licenses = []
1478 if not portage.eapi_is_supported(metadata["EAPI"]):
1479 have_eapi_mask = True
1481 missing_licenses = \
1482 pkgsettings._getMissingLicenses(
1484 except portage.exception.InvalidDependString:
1485 # This will have already been reported
1486 # above via mreasons.
1489 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1490 if comment and comment not in shown_comments:
1493 shown_comments.add(comment)
1494 portdb = root_config.trees["porttree"].dbapi
1495 for l in missing_licenses:
1496 l_path = portdb.findLicensePath(l)
1497 if l in shown_licenses:
1499 msg = ("A copy of the '%s' license" + \
1500 " is located at '%s'.") % (l, l_path)
1503 shown_licenses.add(l)
1504 return have_eapi_mask
1506 class Task(SlotObject):
1507 __slots__ = ("_hash_key", "_hash_value")
1509 def _get_hash_key(self):
1510 hash_key = getattr(self, "_hash_key", None)
1511 if hash_key is None:
1512 raise NotImplementedError(self)
1515 def __eq__(self, other):
1516 return self._get_hash_key() == other
1518 def __ne__(self, other):
1519 return self._get_hash_key() != other
1522 hash_value = getattr(self, "_hash_value", None)
1523 if hash_value is None:
1524 self._hash_value = hash(self._get_hash_key())
1525 return self._hash_value
1528 return len(self._get_hash_key())
1530 def __getitem__(self, key):
1531 return self._get_hash_key()[key]
1534 return iter(self._get_hash_key())
1536 def __contains__(self, key):
1537 return key in self._get_hash_key()
1540 return str(self._get_hash_key())
1542 class Blocker(Task):
1544 __hash__ = Task.__hash__
1545 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1547 def __init__(self, **kwargs):
1548 Task.__init__(self, **kwargs)
1549 self.cp = portage.dep_getkey(self.atom)
1551 def _get_hash_key(self):
1552 hash_key = getattr(self, "_hash_key", None)
1553 if hash_key is None:
1555 ("blocks", self.root, self.atom, self.eapi)
1556 return self._hash_key
1558 class Package(Task):
1560 __hash__ = Task.__hash__
1561 __slots__ = ("built", "cpv", "depth",
1562 "installed", "metadata", "onlydeps", "operation",
1563 "root_config", "type_name",
1564 "category", "counter", "cp", "cpv_split",
1565 "inherited", "iuse", "mtime",
1566 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1569 "CHOST", "COUNTER", "DEPEND", "EAPI",
1570 "INHERITED", "IUSE", "KEYWORDS",
1571 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1572 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1574 def __init__(self, **kwargs):
1575 Task.__init__(self, **kwargs)
1576 self.root = self.root_config.root
1577 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1578 self.cp = portage.cpv_getkey(self.cpv)
1581 # Avoid an InvalidAtom exception when creating slot_atom.
1582 # This package instance will be masked due to empty SLOT.
1584 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1585 self.category, self.pf = portage.catsplit(self.cpv)
1586 self.cpv_split = portage.catpkgsplit(self.cpv)
1587 self.pv_split = self.cpv_split[1:]
1591 __slots__ = ("__weakref__", "enabled")
1593 def __init__(self, use):
1594 self.enabled = frozenset(use)
1596 class _iuse(object):
1598 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1600 def __init__(self, tokens, iuse_implicit):
1601 self.tokens = tuple(tokens)
1602 self.iuse_implicit = iuse_implicit
1609 enabled.append(x[1:])
1611 disabled.append(x[1:])
1614 self.enabled = frozenset(enabled)
1615 self.disabled = frozenset(disabled)
1616 self.all = frozenset(chain(enabled, disabled, other))
1618 def __getattribute__(self, name):
1621 return object.__getattribute__(self, "regex")
1622 except AttributeError:
1623 all = object.__getattribute__(self, "all")
1624 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1625 # Escape anything except ".*" which is supposed
1626 # to pass through from _get_implicit_iuse()
1627 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1628 regex = "^(%s)$" % "|".join(regex)
1629 regex = regex.replace("\\.\\*", ".*")
1630 self.regex = re.compile(regex)
1631 return object.__getattribute__(self, name)
1633 def _get_hash_key(self):
1634 hash_key = getattr(self, "_hash_key", None)
1635 if hash_key is None:
1636 if self.operation is None:
1637 self.operation = "merge"
1638 if self.onlydeps or self.installed:
1639 self.operation = "nomerge"
1641 (self.type_name, self.root, self.cpv, self.operation)
1642 return self._hash_key
1644 def __lt__(self, other):
1645 if other.cp != self.cp:
1647 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1651 def __le__(self, other):
1652 if other.cp != self.cp:
1654 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1658 def __gt__(self, other):
1659 if other.cp != self.cp:
1661 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1665 def __ge__(self, other):
1666 if other.cp != self.cp:
1668 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1672 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1673 if not x.startswith("UNUSED_"))
1674 _all_metadata_keys.discard("CDEPEND")
1675 _all_metadata_keys.update(Package.metadata_keys)
1677 from portage.cache.mappings import slot_dict_class
1678 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1680 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1682 Detect metadata updates and synchronize Package attributes.
1685 __slots__ = ("_pkg",)
1686 _wrapped_keys = frozenset(
1687 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1689 def __init__(self, pkg, metadata):
1690 _PackageMetadataWrapperBase.__init__(self)
1692 self.update(metadata)
1694 def __setitem__(self, k, v):
1695 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1696 if k in self._wrapped_keys:
1697 getattr(self, "_set_" + k.lower())(k, v)
1699 def _set_inherited(self, k, v):
1700 if isinstance(v, basestring):
1701 v = frozenset(v.split())
1702 self._pkg.inherited = v
1704 def _set_iuse(self, k, v):
1705 self._pkg.iuse = self._pkg._iuse(
1706 v.split(), self._pkg.root_config.iuse_implicit)
1708 def _set_slot(self, k, v):
1711 def _set_use(self, k, v):
1712 self._pkg.use = self._pkg._use(v.split())
1714 def _set_counter(self, k, v):
1715 if isinstance(v, basestring):
1720 self._pkg.counter = v
1722 def _set__mtime_(self, k, v):
1723 if isinstance(v, basestring):
1730 class EbuildFetchonly(SlotObject):
1732 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1735 settings = self.settings
1737 portdb = pkg.root_config.trees["porttree"].dbapi
1738 ebuild_path = portdb.findname(pkg.cpv)
1739 settings.setcpv(pkg)
1740 debug = settings.get("PORTAGE_DEBUG") == "1"
1741 use_cache = 1 # always true
1742 portage.doebuild_environment(ebuild_path, "fetch",
1743 settings["ROOT"], settings, debug, use_cache, portdb)
1744 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1747 rval = self._execute_with_builddir()
1749 rval = portage.doebuild(ebuild_path, "fetch",
1750 settings["ROOT"], settings, debug=debug,
1751 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1752 mydbapi=portdb, tree="porttree")
1754 if rval != os.EX_OK:
1755 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1756 eerror(msg, phase="unpack", key=pkg.cpv)
1760 def _execute_with_builddir(self):
1761 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1762 # ensuring sane $PWD (bug #239560) and storing elog
1763 # messages. Use a private temp directory, in order
1764 # to avoid locking the main one.
1765 settings = self.settings
1766 global_tmpdir = settings["PORTAGE_TMPDIR"]
1767 from tempfile import mkdtemp
1769 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1771 if e.errno != portage.exception.PermissionDenied.errno:
1773 raise portage.exception.PermissionDenied(global_tmpdir)
1774 settings["PORTAGE_TMPDIR"] = private_tmpdir
1775 settings.backup_changes("PORTAGE_TMPDIR")
1777 retval = self._execute()
1779 settings["PORTAGE_TMPDIR"] = global_tmpdir
1780 settings.backup_changes("PORTAGE_TMPDIR")
1781 shutil.rmtree(private_tmpdir)
1785 settings = self.settings
1787 root_config = pkg.root_config
1788 portdb = root_config.trees["porttree"].dbapi
1789 ebuild_path = portdb.findname(pkg.cpv)
1790 debug = settings.get("PORTAGE_DEBUG") == "1"
1791 retval = portage.doebuild(ebuild_path, "fetch",
1792 self.settings["ROOT"], self.settings, debug=debug,
1793 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1794 mydbapi=portdb, tree="porttree")
1796 if retval != os.EX_OK:
1797 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1798 eerror(msg, phase="unpack", key=pkg.cpv)
1800 portage.elog.elog_process(self.pkg.cpv, self.settings)
1803 class PollConstants(object):
1806 Provides POLL* constants that are equivalent to those from the
1807 select module, for use by PollSelectAdapter.
1810 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1813 locals()[k] = getattr(select, k, v)
1817 class AsynchronousTask(SlotObject):
1819 Subclasses override _wait() and _poll() so that calls
1820 to public methods can be wrapped for implementing
1821 hooks such as exit listener notification.
1823 Sublasses should call self.wait() to notify exit listeners after
1824 the task is complete and self.returncode has been set.
1827 __slots__ = ("background", "cancelled", "returncode") + \
1828 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1832 Start an asynchronous task and then return as soon as possible.
1838 raise NotImplementedError(self)
1841 return self.returncode is None
1848 return self.returncode
1851 if self.returncode is None:
1854 return self.returncode
1857 return self.returncode
1860 self.cancelled = True
1863 def addStartListener(self, f):
1865 The function will be called with one argument, a reference to self.
1867 if self._start_listeners is None:
1868 self._start_listeners = []
1869 self._start_listeners.append(f)
1871 def removeStartListener(self, f):
1872 if self._start_listeners is None:
1874 self._start_listeners.remove(f)
1876 def _start_hook(self):
1877 if self._start_listeners is not None:
1878 start_listeners = self._start_listeners
1879 self._start_listeners = None
1881 for f in start_listeners:
1884 def addExitListener(self, f):
1886 The function will be called with one argument, a reference to self.
1888 if self._exit_listeners is None:
1889 self._exit_listeners = []
1890 self._exit_listeners.append(f)
1892 def removeExitListener(self, f):
1893 if self._exit_listeners is None:
1894 if self._exit_listener_stack is not None:
1895 self._exit_listener_stack.remove(f)
1897 self._exit_listeners.remove(f)
1899 def _wait_hook(self):
1901 Call this method after the task completes, just before returning
1902 the returncode from wait() or poll(). This hook is
1903 used to trigger exit listeners when the returncode first
1906 if self.returncode is not None and \
1907 self._exit_listeners is not None:
1909 # This prevents recursion, in case one of the
1910 # exit handlers triggers this method again by
1911 # calling wait(). Use a stack that gives
1912 # removeExitListener() an opportunity to consume
1913 # listeners from the stack, before they can get
1914 # called below. This is necessary because a call
1915 # to one exit listener may result in a call to
1916 # removeExitListener() for another listener on
1917 # the stack. That listener needs to be removed
1918 # from the stack since it would be inconsistent
1919 # to call it after it has been been passed into
1920 # removeExitListener().
1921 self._exit_listener_stack = self._exit_listeners
1922 self._exit_listeners = None
1924 self._exit_listener_stack.reverse()
1925 while self._exit_listener_stack:
1926 self._exit_listener_stack.pop()(self)
1928 class AbstractPollTask(AsynchronousTask):
1930 __slots__ = ("scheduler",) + \
1934 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1935 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1938 def _unregister(self):
1939 raise NotImplementedError(self)
1941 def _unregister_if_appropriate(self, event):
1942 if self._registered:
1943 if event & self._exceptional_events:
1946 elif event & PollConstants.POLLHUP:
1950 class PipeReader(AbstractPollTask):
1953 Reads output from one or more files and saves it in memory,
1954 for retrieval via the getvalue() method. This is driven by
1955 the scheduler's poll() loop, so it runs entirely within the
1959 __slots__ = ("input_files",) + \
1960 ("_read_data", "_reg_ids")
1963 self._reg_ids = set()
1964 self._read_data = []
1965 for k, f in self.input_files.iteritems():
1966 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1967 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1968 self._reg_ids.add(self.scheduler.register(f.fileno(),
1969 self._registered_events, self._output_handler))
1970 self._registered = True
1973 return self._registered
1976 if self.returncode is None:
1978 self.cancelled = True
1982 if self.returncode is not None:
1983 return self.returncode
1985 if self._registered:
1986 self.scheduler.schedule(self._reg_ids)
1989 self.returncode = os.EX_OK
1990 return self.returncode
1993 """Retrieve the entire contents"""
1994 if sys.hexversion >= 0x3000000:
1995 return bytes().join(self._read_data)
1996 return "".join(self._read_data)
1999 """Free the memory buffer."""
2000 self._read_data = None
2002 def _output_handler(self, fd, event):
2004 if event & PollConstants.POLLIN:
2006 for f in self.input_files.itervalues():
2007 if fd == f.fileno():
2010 buf = array.array('B')
2012 buf.fromfile(f, self._bufsize)
2017 self._read_data.append(buf.tostring())
2022 self._unregister_if_appropriate(event)
2023 return self._registered
2025 def _unregister(self):
2027 Unregister from the scheduler and close open files.
2030 self._registered = False
2032 if self._reg_ids is not None:
2033 for reg_id in self._reg_ids:
2034 self.scheduler.unregister(reg_id)
2035 self._reg_ids = None
2037 if self.input_files is not None:
2038 for f in self.input_files.itervalues():
2040 self.input_files = None
2042 class CompositeTask(AsynchronousTask):
2044 __slots__ = ("scheduler",) + ("_current_task",)
2047 return self._current_task is not None
2050 self.cancelled = True
2051 if self._current_task is not None:
2052 self._current_task.cancel()
2056 This does a loop calling self._current_task.poll()
2057 repeatedly as long as the value of self._current_task
2058 keeps changing. It calls poll() a maximum of one time
2059 for a given self._current_task instance. This is useful
2060 since calling poll() on a task can trigger advance to
2061 the next task could eventually lead to the returncode
2062 being set in cases when polling only a single task would
2063 not have the same effect.
2068 task = self._current_task
2069 if task is None or task is prev:
2070 # don't poll the same task more than once
2075 return self.returncode
2081 task = self._current_task
2083 # don't wait for the same task more than once
2086 # Before the task.wait() method returned, an exit
2087 # listener should have set self._current_task to either
2088 # a different task or None. Something is wrong.
2089 raise AssertionError("self._current_task has not " + \
2090 "changed since calling wait", self, task)
2094 return self.returncode
2096 def _assert_current(self, task):
2098 Raises an AssertionError if the given task is not the
2099 same one as self._current_task. This can be useful
2102 if task is not self._current_task:
2103 raise AssertionError("Unrecognized task: %s" % (task,))
2105 def _default_exit(self, task):
2107 Calls _assert_current() on the given task and then sets the
2108 composite returncode attribute if task.returncode != os.EX_OK.
2109 If the task failed then self._current_task will be set to None.
2110 Subclasses can use this as a generic task exit callback.
2113 @returns: The task.returncode attribute.
2115 self._assert_current(task)
2116 if task.returncode != os.EX_OK:
2117 self.returncode = task.returncode
2118 self._current_task = None
2119 return task.returncode
2121 def _final_exit(self, task):
2123 Assumes that task is the final task of this composite task.
2124 Calls _default_exit() and sets self.returncode to the task's
2125 returncode and sets self._current_task to None.
2127 self._default_exit(task)
2128 self._current_task = None
2129 self.returncode = task.returncode
2130 return self.returncode
2132 def _default_final_exit(self, task):
2134 This calls _final_exit() and then wait().
2136 Subclasses can use this as a generic final task exit callback.
2139 self._final_exit(task)
2142 def _start_task(self, task, exit_handler):
2144 Register exit handler for the given task, set it
2145 as self._current_task, and call task.start().
2147 Subclasses can use this as a generic way to start
2151 task.addExitListener(exit_handler)
2152 self._current_task = task
2155 class TaskSequence(CompositeTask):
2157 A collection of tasks that executes sequentially. Each task
2158 must have a addExitListener() method that can be used as
2159 a means to trigger movement from one task to the next.
2162 __slots__ = ("_task_queue",)
2164 def __init__(self, **kwargs):
2165 AsynchronousTask.__init__(self, **kwargs)
2166 self._task_queue = deque()
2168 def add(self, task):
2169 self._task_queue.append(task)
2172 self._start_next_task()
2175 self._task_queue.clear()
2176 CompositeTask.cancel(self)
2178 def _start_next_task(self):
2179 self._start_task(self._task_queue.popleft(),
2180 self._task_exit_handler)
2182 def _task_exit_handler(self, task):
2183 if self._default_exit(task) != os.EX_OK:
2185 elif self._task_queue:
2186 self._start_next_task()
2188 self._final_exit(task)
2191 class SubProcess(AbstractPollTask):
2193 __slots__ = ("pid",) + \
2194 ("_files", "_reg_id")
2196 # A file descriptor is required for the scheduler to monitor changes from
2197 # inside a poll() loop. When logging is not enabled, create a pipe just to
2198 # serve this purpose alone.
2202 if self.returncode is not None:
2203 return self.returncode
2204 if self.pid is None:
2205 return self.returncode
2206 if self._registered:
2207 return self.returncode
2210 retval = os.waitpid(self.pid, os.WNOHANG)
2212 if e.errno != errno.ECHILD:
2215 retval = (self.pid, 1)
2217 if retval == (0, 0):
2219 self._set_returncode(retval)
2220 return self.returncode
2225 os.kill(self.pid, signal.SIGTERM)
2227 if e.errno != errno.ESRCH:
2231 self.cancelled = True
2232 if self.pid is not None:
2234 return self.returncode
2237 return self.pid is not None and \
2238 self.returncode is None
2242 if self.returncode is not None:
2243 return self.returncode
2245 if self._registered:
2246 self.scheduler.schedule(self._reg_id)
2248 if self.returncode is not None:
2249 return self.returncode
2252 wait_retval = os.waitpid(self.pid, 0)
2254 if e.errno != errno.ECHILD:
2257 self._set_returncode((self.pid, 1))
2259 self._set_returncode(wait_retval)
2261 return self.returncode
2263 def _unregister(self):
2265 Unregister from the scheduler and close open files.
2268 self._registered = False
2270 if self._reg_id is not None:
2271 self.scheduler.unregister(self._reg_id)
2274 if self._files is not None:
2275 for f in self._files.itervalues():
2279 def _set_returncode(self, wait_retval):
2281 retval = wait_retval[1]
2283 if retval != os.EX_OK:
2285 retval = (retval & 0xff) << 8
2287 retval = retval >> 8
2289 self.returncode = retval
2291 class SpawnProcess(SubProcess):
2294 Constructor keyword args are passed into portage.process.spawn().
2295 The required "args" keyword argument will be passed as the first
2299 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2300 "uid", "gid", "groups", "umask", "logfile",
2301 "path_lookup", "pre_exec")
2303 __slots__ = ("args",) + \
2306 _file_names = ("log", "process", "stdout")
2307 _files_dict = slot_dict_class(_file_names, prefix="")
2314 if self.fd_pipes is None:
2316 fd_pipes = self.fd_pipes
2317 fd_pipes.setdefault(0, sys.stdin.fileno())
2318 fd_pipes.setdefault(1, sys.stdout.fileno())
2319 fd_pipes.setdefault(2, sys.stderr.fileno())
2321 # flush any pending output
2322 for fd in fd_pipes.itervalues():
2323 if fd == sys.stdout.fileno():
2325 if fd == sys.stderr.fileno():
2328 logfile = self.logfile
2329 self._files = self._files_dict()
2332 master_fd, slave_fd = self._pipe(fd_pipes)
2333 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2334 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2337 fd_pipes_orig = fd_pipes.copy()
2339 # TODO: Use job control functions like tcsetpgrp() to control
2340 # access to stdin. Until then, use /dev/null so that any
2341 # attempts to read from stdin will immediately return EOF
2342 # instead of blocking indefinitely.
2343 null_input = open('/dev/null', 'rb')
2344 fd_pipes[0] = null_input.fileno()
2346 fd_pipes[0] = fd_pipes_orig[0]
2348 files.process = os.fdopen(master_fd, 'rb')
2349 if logfile is not None:
2351 fd_pipes[1] = slave_fd
2352 fd_pipes[2] = slave_fd
2354 files.log = open(logfile, mode='ab')
2355 portage.util.apply_secpass_permissions(logfile,
2356 uid=portage.portage_uid, gid=portage.portage_gid,
2359 if not self.background:
2360 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2362 output_handler = self._output_handler
2366 # Create a dummy pipe so the scheduler can monitor
2367 # the process from inside a poll() loop.
2368 fd_pipes[self._dummy_pipe_fd] = slave_fd
2370 fd_pipes[1] = slave_fd
2371 fd_pipes[2] = slave_fd
2372 output_handler = self._dummy_handler
2375 for k in self._spawn_kwarg_names:
2376 v = getattr(self, k)
2380 kwargs["fd_pipes"] = fd_pipes
2381 kwargs["returnpid"] = True
2382 kwargs.pop("logfile", None)
2384 self._reg_id = self.scheduler.register(files.process.fileno(),
2385 self._registered_events, output_handler)
2386 self._registered = True
2388 retval = self._spawn(self.args, **kwargs)
2391 if null_input is not None:
2394 if isinstance(retval, int):
2397 self.returncode = retval
2401 self.pid = retval[0]
2402 portage.process.spawned_pids.remove(self.pid)
2404 def _pipe(self, fd_pipes):
2406 @type fd_pipes: dict
2407 @param fd_pipes: pipes from which to copy terminal size if desired.
2411 def _spawn(self, args, **kwargs):
2412 return portage.process.spawn(args, **kwargs)
2414 def _output_handler(self, fd, event):
2416 if event & PollConstants.POLLIN:
2419 buf = array.array('B')
2421 buf.fromfile(files.process, self._bufsize)
2426 if not self.background:
2427 buf.tofile(files.stdout)
2428 files.stdout.flush()
2429 buf.tofile(files.log)
2435 self._unregister_if_appropriate(event)
2436 return self._registered
2438 def _dummy_handler(self, fd, event):
2440 This method is mainly interested in detecting EOF, since
2441 the only purpose of the pipe is to allow the scheduler to
2442 monitor the process from inside a poll() loop.
2445 if event & PollConstants.POLLIN:
2447 buf = array.array('B')
2449 buf.fromfile(self._files.process, self._bufsize)
2459 self._unregister_if_appropriate(event)
2460 return self._registered
2462 class MiscFunctionsProcess(SpawnProcess):
2464 Spawns misc-functions.sh with an existing ebuild environment.
2467 __slots__ = ("commands", "phase", "pkg", "settings")
2470 settings = self.settings
2471 settings.pop("EBUILD_PHASE", None)
2472 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2473 misc_sh_binary = os.path.join(portage_bin_path,
2474 os.path.basename(portage.const.MISC_SH_BINARY))
2476 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2477 self.logfile = settings.get("PORTAGE_LOG_FILE")
2479 portage._doebuild_exit_status_unlink(
2480 settings.get("EBUILD_EXIT_STATUS_FILE"))
2482 SpawnProcess._start(self)
2484 def _spawn(self, args, **kwargs):
2485 settings = self.settings
2486 debug = settings.get("PORTAGE_DEBUG") == "1"
2487 return portage.spawn(" ".join(args), settings,
2488 debug=debug, **kwargs)
2490 def _set_returncode(self, wait_retval):
2491 SpawnProcess._set_returncode(self, wait_retval)
2492 self.returncode = portage._doebuild_exit_status_check_and_log(
2493 self.settings, self.phase, self.returncode)
2495 class EbuildFetcher(SpawnProcess):
2497 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2502 root_config = self.pkg.root_config
2503 portdb = root_config.trees["porttree"].dbapi
2504 ebuild_path = portdb.findname(self.pkg.cpv)
2505 settings = self.config_pool.allocate()
2506 settings.setcpv(self.pkg)
2508 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2509 # should not be touched since otherwise it could interfere with
2510 # another instance of the same cpv concurrently being built for a
2511 # different $ROOT (currently, builds only cooperate with prefetchers
2512 # that are spawned for the same $ROOT).
2513 if not self.prefetch:
2514 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2515 self._build_dir.lock()
2516 self._build_dir.clean_log()
2517 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2518 if self.logfile is None:
2519 self.logfile = settings.get("PORTAGE_LOG_FILE")
2525 # If any incremental variables have been overridden
2526 # via the environment, those values need to be passed
2527 # along here so that they are correctly considered by
2528 # the config instance in the subproccess.
2529 fetch_env = os.environ.copy()
2531 nocolor = settings.get("NOCOLOR")
2532 if nocolor is not None:
2533 fetch_env["NOCOLOR"] = nocolor
2535 fetch_env["PORTAGE_NICENESS"] = "0"
2537 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2539 ebuild_binary = os.path.join(
2540 settings["PORTAGE_BIN_PATH"], "ebuild")
2542 fetch_args = [ebuild_binary, ebuild_path, phase]
2543 debug = settings.get("PORTAGE_DEBUG") == "1"
2545 fetch_args.append("--debug")
2547 self.args = fetch_args
2548 self.env = fetch_env
2549 SpawnProcess._start(self)
2551 def _pipe(self, fd_pipes):
2552 """When appropriate, use a pty so that fetcher progress bars,
2553 like wget has, will work properly."""
2554 if self.background or not sys.stdout.isatty():
2555 # When the output only goes to a log file,
2556 # there's no point in creating a pty.
2558 stdout_pipe = fd_pipes.get(1)
2559 got_pty, master_fd, slave_fd = \
2560 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2561 return (master_fd, slave_fd)
2563 def _set_returncode(self, wait_retval):
2564 SpawnProcess._set_returncode(self, wait_retval)
2565 # Collect elog messages that might have been
2566 # created by the pkg_nofetch phase.
2567 if self._build_dir is not None:
2568 # Skip elog messages for prefetch, in order to avoid duplicates.
2569 if not self.prefetch and self.returncode != os.EX_OK:
2571 if self.logfile is not None:
2573 elog_out = open(self.logfile, 'a')
2574 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2575 if self.logfile is not None:
2576 msg += ", Log file:"
2577 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2578 if self.logfile is not None:
2579 eerror(" '%s'" % (self.logfile,),
2580 phase="unpack", key=self.pkg.cpv, out=elog_out)
2581 if elog_out is not None:
2583 if not self.prefetch:
2584 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2585 features = self._build_dir.settings.features
2586 if self.returncode == os.EX_OK:
2587 self._build_dir.clean_log()
2588 self._build_dir.unlock()
2589 self.config_pool.deallocate(self._build_dir.settings)
2590 self._build_dir = None
2592 class EbuildBuildDir(SlotObject):
2594 __slots__ = ("dir_path", "pkg", "settings",
2595 "locked", "_catdir", "_lock_obj")
2597 def __init__(self, **kwargs):
2598 SlotObject.__init__(self, **kwargs)
2603 This raises an AlreadyLocked exception if lock() is called
2604 while a lock is already held. In order to avoid this, call
2605 unlock() or check whether the "locked" attribute is True
2606 or False before calling lock().
2608 if self._lock_obj is not None:
2609 raise self.AlreadyLocked((self._lock_obj,))
2611 dir_path = self.dir_path
2612 if dir_path is None:
2613 root_config = self.pkg.root_config
2614 portdb = root_config.trees["porttree"].dbapi
2615 ebuild_path = portdb.findname(self.pkg.cpv)
2616 settings = self.settings
2617 settings.setcpv(self.pkg)
2618 debug = settings.get("PORTAGE_DEBUG") == "1"
2619 use_cache = 1 # always true
2620 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2621 self.settings, debug, use_cache, portdb)
2622 dir_path = self.settings["PORTAGE_BUILDDIR"]
2624 catdir = os.path.dirname(dir_path)
2625 self._catdir = catdir
2627 portage.util.ensure_dirs(os.path.dirname(catdir),
2628 gid=portage.portage_gid,
2632 catdir_lock = portage.locks.lockdir(catdir)
2633 portage.util.ensure_dirs(catdir,
2634 gid=portage.portage_gid,
2636 self._lock_obj = portage.locks.lockdir(dir_path)
2638 self.locked = self._lock_obj is not None
2639 if catdir_lock is not None:
2640 portage.locks.unlockdir(catdir_lock)
2642 def clean_log(self):
2643 """Discard existing log."""
2644 settings = self.settings
2646 for x in ('.logid', 'temp/build.log'):
2648 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2653 if self._lock_obj is None:
2656 portage.locks.unlockdir(self._lock_obj)
2657 self._lock_obj = None
2660 catdir = self._catdir
2663 catdir_lock = portage.locks.lockdir(catdir)
2669 if e.errno not in (errno.ENOENT,
2670 errno.ENOTEMPTY, errno.EEXIST):
2673 portage.locks.unlockdir(catdir_lock)
2675 class AlreadyLocked(portage.exception.PortageException):
2678 class EbuildBuild(CompositeTask):
2680 __slots__ = ("args_set", "config_pool", "find_blockers",
2681 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2682 "prefetcher", "settings", "world_atom") + \
2683 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2687 logger = self.logger
2690 settings = self.settings
2691 world_atom = self.world_atom
2692 root_config = pkg.root_config
2695 portdb = root_config.trees[tree].dbapi
2696 settings.setcpv(pkg)
2697 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2698 ebuild_path = portdb.findname(self.pkg.cpv)
2699 self._ebuild_path = ebuild_path
2701 prefetcher = self.prefetcher
2702 if prefetcher is None:
2704 elif not prefetcher.isAlive():
2706 elif prefetcher.poll() is None:
2708 waiting_msg = "Fetching files " + \
2709 "in the background. " + \
2710 "To view fetch progress, run `tail -f " + \
2711 "/var/log/emerge-fetch.log` in another " + \
2713 msg_prefix = colorize("GOOD", " * ")
2714 from textwrap import wrap
2715 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2716 for line in wrap(waiting_msg, 65))
2717 if not self.background:
2718 writemsg(waiting_msg, noiselevel=-1)
2720 self._current_task = prefetcher
2721 prefetcher.addExitListener(self._prefetch_exit)
2724 self._prefetch_exit(prefetcher)
2726 def _prefetch_exit(self, prefetcher):
2730 settings = self.settings
2733 fetcher = EbuildFetchonly(
2734 fetch_all=opts.fetch_all_uri,
2735 pkg=pkg, pretend=opts.pretend,
2737 retval = fetcher.execute()
2738 self.returncode = retval
2742 fetcher = EbuildFetcher(config_pool=self.config_pool,
2743 fetchall=opts.fetch_all_uri,
2744 fetchonly=opts.fetchonly,
2745 background=self.background,
2746 pkg=pkg, scheduler=self.scheduler)
2748 self._start_task(fetcher, self._fetch_exit)
2750 def _fetch_exit(self, fetcher):
2754 fetch_failed = False
2756 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2758 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2760 if fetch_failed and fetcher.logfile is not None and \
2761 os.path.exists(fetcher.logfile):
2762 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2764 if not fetch_failed and fetcher.logfile is not None:
2765 # Fetch was successful, so remove the fetch log.
2767 os.unlink(fetcher.logfile)
2771 if fetch_failed or opts.fetchonly:
2775 logger = self.logger
2777 pkg_count = self.pkg_count
2778 scheduler = self.scheduler
2779 settings = self.settings
2780 features = settings.features
2781 ebuild_path = self._ebuild_path
2782 system_set = pkg.root_config.sets["system"]
2784 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2785 self._build_dir.lock()
2787 # Cleaning is triggered before the setup
2788 # phase, in portage.doebuild().
2789 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2790 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2791 short_msg = "emerge: (%s of %s) %s Clean" % \
2792 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2793 logger.log(msg, short_msg=short_msg)
2795 #buildsyspkg: Check if we need to _force_ binary package creation
2796 self._issyspkg = "buildsyspkg" in features and \
2797 system_set.findAtomForPackage(pkg) and \
2800 if opts.buildpkg or self._issyspkg:
2802 self._buildpkg = True
2804 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2805 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2806 short_msg = "emerge: (%s of %s) %s Compile" % \
2807 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2808 logger.log(msg, short_msg=short_msg)
2811 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2812 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2813 short_msg = "emerge: (%s of %s) %s Compile" % \
2814 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2815 logger.log(msg, short_msg=short_msg)
2817 build = EbuildExecuter(background=self.background, pkg=pkg,
2818 scheduler=scheduler, settings=settings)
2819 self._start_task(build, self._build_exit)
2821 def _unlock_builddir(self):
2822 portage.elog.elog_process(self.pkg.cpv, self.settings)
2823 self._build_dir.unlock()
2825 def _build_exit(self, build):
2826 if self._default_exit(build) != os.EX_OK:
2827 self._unlock_builddir()
2832 buildpkg = self._buildpkg
2835 self._final_exit(build)
2840 msg = ">>> This is a system package, " + \
2841 "let's pack a rescue tarball.\n"
2843 log_path = self.settings.get("PORTAGE_LOG_FILE")
2844 if log_path is not None:
2845 log_file = open(log_path, 'a')
2851 if not self.background:
2852 portage.writemsg_stdout(msg, noiselevel=-1)
2854 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2855 scheduler=self.scheduler, settings=self.settings)
2857 self._start_task(packager, self._buildpkg_exit)
2859 def _buildpkg_exit(self, packager):
2861 Released build dir lock when there is a failure or
2862 when in buildpkgonly mode. Otherwise, the lock will
2863 be released when merge() is called.
2866 if self._default_exit(packager) != os.EX_OK:
2867 self._unlock_builddir()
2871 if self.opts.buildpkgonly:
2872 # Need to call "clean" phase for buildpkgonly mode
2873 portage.elog.elog_process(self.pkg.cpv, self.settings)
2875 clean_phase = EbuildPhase(background=self.background,
2876 pkg=self.pkg, phase=phase,
2877 scheduler=self.scheduler, settings=self.settings,
2879 self._start_task(clean_phase, self._clean_exit)
2882 # Continue holding the builddir lock until
2883 # after the package has been installed.
2884 self._current_task = None
2885 self.returncode = packager.returncode
2888 def _clean_exit(self, clean_phase):
2889 if self._final_exit(clean_phase) != os.EX_OK or \
2890 self.opts.buildpkgonly:
2891 self._unlock_builddir()
2896 Install the package and then clean up and release locks.
2897 Only call this after the build has completed successfully
2898 and neither fetchonly nor buildpkgonly mode are enabled.
2901 find_blockers = self.find_blockers
2902 ldpath_mtimes = self.ldpath_mtimes
2903 logger = self.logger
2905 pkg_count = self.pkg_count
2906 settings = self.settings
2907 world_atom = self.world_atom
2908 ebuild_path = self._ebuild_path
2911 merge = EbuildMerge(find_blockers=self.find_blockers,
2912 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2913 pkg_count=pkg_count, pkg_path=ebuild_path,
2914 scheduler=self.scheduler,
2915 settings=settings, tree=tree, world_atom=world_atom)
2917 msg = " === (%s of %s) Merging (%s::%s)" % \
2918 (pkg_count.curval, pkg_count.maxval,
2919 pkg.cpv, ebuild_path)
2920 short_msg = "emerge: (%s of %s) %s Merge" % \
2921 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2922 logger.log(msg, short_msg=short_msg)
2925 rval = merge.execute()
2927 self._unlock_builddir()
2931 class EbuildExecuter(CompositeTask):
2933 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2935 _phases = ("prepare", "configure", "compile", "test", "install")
2937 _live_eclasses = frozenset([
2947 self._tree = "porttree"
2950 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2951 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2952 self._start_task(clean_phase, self._clean_phase_exit)
2954 def _clean_phase_exit(self, clean_phase):
2956 if self._default_exit(clean_phase) != os.EX_OK:
2961 scheduler = self.scheduler
2962 settings = self.settings
2965 # This initializes PORTAGE_LOG_FILE.
2966 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2968 setup_phase = EbuildPhase(background=self.background,
2969 pkg=pkg, phase="setup", scheduler=scheduler,
2970 settings=settings, tree=self._tree)
2972 setup_phase.addExitListener(self._setup_exit)
2973 self._current_task = setup_phase
2974 self.scheduler.scheduleSetup(setup_phase)
2976 def _setup_exit(self, setup_phase):
2978 if self._default_exit(setup_phase) != os.EX_OK:
2982 unpack_phase = EbuildPhase(background=self.background,
2983 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2984 settings=self.settings, tree=self._tree)
2986 if self._live_eclasses.intersection(self.pkg.inherited):
2987 # Serialize $DISTDIR access for live ebuilds since
2988 # otherwise they can interfere with eachother.
2990 unpack_phase.addExitListener(self._unpack_exit)
2991 self._current_task = unpack_phase
2992 self.scheduler.scheduleUnpack(unpack_phase)
2995 self._start_task(unpack_phase, self._unpack_exit)
2997 def _unpack_exit(self, unpack_phase):
2999 if self._default_exit(unpack_phase) != os.EX_OK:
3003 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3006 phases = self._phases
3007 eapi = pkg.metadata["EAPI"]
3008 if eapi in ("0", "1"):
3009 # skip src_prepare and src_configure
3012 for phase in phases:
3013 ebuild_phases.add(EbuildPhase(background=self.background,
3014 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3015 settings=self.settings, tree=self._tree))
3017 self._start_task(ebuild_phases, self._default_final_exit)
3019 class EbuildMetadataPhase(SubProcess):
3022 Asynchronous interface for the ebuild "depend" phase which is
3023 used to extract metadata from the ebuild.
3026 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3027 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3030 _file_names = ("ebuild",)
3031 _files_dict = slot_dict_class(_file_names, prefix="")
3035 settings = self.settings
3036 settings.setcpv(self.cpv)
3037 ebuild_path = self.ebuild_path
3040 if 'parse-eapi-glep-55' in settings.features:
3041 pf, eapi = portage._split_ebuild_name_glep55(
3042 os.path.basename(ebuild_path))
3043 if eapi is None and \
3044 'parse-eapi-ebuild-head' in settings.features:
3045 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3046 mode='r', encoding='utf_8', errors='replace'))
3048 if eapi is not None:
3049 if not portage.eapi_is_supported(eapi):
3050 self.metadata_callback(self.cpv, self.ebuild_path,
3051 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3052 self.returncode = os.EX_OK
3056 settings.configdict['pkg']['EAPI'] = eapi
3058 debug = settings.get("PORTAGE_DEBUG") == "1"
3062 if self.fd_pipes is not None:
3063 fd_pipes = self.fd_pipes.copy()
3067 fd_pipes.setdefault(0, sys.stdin.fileno())
3068 fd_pipes.setdefault(1, sys.stdout.fileno())
3069 fd_pipes.setdefault(2, sys.stderr.fileno())
3071 # flush any pending output
3072 for fd in fd_pipes.itervalues():
3073 if fd == sys.stdout.fileno():
3075 if fd == sys.stderr.fileno():
3078 fd_pipes_orig = fd_pipes.copy()
3079 self._files = self._files_dict()
3082 master_fd, slave_fd = os.pipe()
3083 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3084 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3086 fd_pipes[self._metadata_fd] = slave_fd
3088 self._raw_metadata = []
3089 files.ebuild = os.fdopen(master_fd, 'r')
3090 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3091 self._registered_events, self._output_handler)
3092 self._registered = True
3094 retval = portage.doebuild(ebuild_path, "depend",
3095 settings["ROOT"], settings, debug,
3096 mydbapi=self.portdb, tree="porttree",
3097 fd_pipes=fd_pipes, returnpid=True)
3101 if isinstance(retval, int):
3102 # doebuild failed before spawning
3104 self.returncode = retval
3108 self.pid = retval[0]
3109 portage.process.spawned_pids.remove(self.pid)
3111 def _output_handler(self, fd, event):
3113 if event & PollConstants.POLLIN:
3114 self._raw_metadata.append(self._files.ebuild.read())
3115 if not self._raw_metadata[-1]:
3119 self._unregister_if_appropriate(event)
3120 return self._registered
3122 def _set_returncode(self, wait_retval):
3123 SubProcess._set_returncode(self, wait_retval)
3124 if self.returncode == os.EX_OK:
3125 metadata_lines = "".join(self._raw_metadata).splitlines()
3126 if len(portage.auxdbkeys) != len(metadata_lines):
3127 # Don't trust bash's returncode if the
3128 # number of lines is incorrect.
3131 metadata = izip(portage.auxdbkeys, metadata_lines)
3132 self.metadata_callback(self.cpv, self.ebuild_path,
3133 self.repo_path, metadata, self.ebuild_mtime)
3135 class EbuildProcess(SpawnProcess):
3137 __slots__ = ("phase", "pkg", "settings", "tree")
3140 # Don't open the log file during the clean phase since the
3141 # open file can result in an nfs lock on $T/build.log which
3142 # prevents the clean phase from removing $T.
3143 if self.phase not in ("clean", "cleanrm"):
3144 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3145 SpawnProcess._start(self)
3147 def _pipe(self, fd_pipes):
3148 stdout_pipe = fd_pipes.get(1)
3149 got_pty, master_fd, slave_fd = \
3150 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3151 return (master_fd, slave_fd)
3153 def _spawn(self, args, **kwargs):
3155 root_config = self.pkg.root_config
3157 mydbapi = root_config.trees[tree].dbapi
3158 settings = self.settings
3159 ebuild_path = settings["EBUILD"]
3160 debug = settings.get("PORTAGE_DEBUG") == "1"
3162 rval = portage.doebuild(ebuild_path, self.phase,
3163 root_config.root, settings, debug,
3164 mydbapi=mydbapi, tree=tree, **kwargs)
3168 def _set_returncode(self, wait_retval):
3169 SpawnProcess._set_returncode(self, wait_retval)
3171 if self.phase not in ("clean", "cleanrm"):
3172 self.returncode = portage._doebuild_exit_status_check_and_log(
3173 self.settings, self.phase, self.returncode)
3175 if self.phase == "test" and self.returncode != os.EX_OK and \
3176 "test-fail-continue" in self.settings.features:
3177 self.returncode = os.EX_OK
3179 portage._post_phase_userpriv_perms(self.settings)
3181 class EbuildPhase(CompositeTask):
3183 __slots__ = ("background", "pkg", "phase",
3184 "scheduler", "settings", "tree")
3186 _post_phase_cmds = portage._post_phase_cmds
3190 ebuild_process = EbuildProcess(background=self.background,
3191 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3192 settings=self.settings, tree=self.tree)
3194 self._start_task(ebuild_process, self._ebuild_exit)
3196 def _ebuild_exit(self, ebuild_process):
3198 if self.phase == "install":
3200 log_path = self.settings.get("PORTAGE_LOG_FILE")
3202 if self.background and log_path is not None:
3203 log_file = open(log_path, 'a')
3206 portage._check_build_log(self.settings, out=out)
3208 if log_file is not None:
3211 if self._default_exit(ebuild_process) != os.EX_OK:
3215 settings = self.settings
3217 if self.phase == "install":
3218 portage._post_src_install_chost_fix(settings)
3219 portage._post_src_install_uid_fix(settings)
3221 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3222 if post_phase_cmds is not None:
3223 post_phase = MiscFunctionsProcess(background=self.background,
3224 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3225 scheduler=self.scheduler, settings=settings)
3226 self._start_task(post_phase, self._post_phase_exit)
3229 self.returncode = ebuild_process.returncode
3230 self._current_task = None
3233 def _post_phase_exit(self, post_phase):
3234 if self._final_exit(post_phase) != os.EX_OK:
3235 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3237 self._current_task = None
3241 class EbuildBinpkg(EbuildProcess):
3243 This assumes that src_install() has successfully completed.
3245 __slots__ = ("_binpkg_tmpfile",)
3248 self.phase = "package"
3249 self.tree = "porttree"
3251 root_config = pkg.root_config
3252 portdb = root_config.trees["porttree"].dbapi
3253 bintree = root_config.trees["bintree"]
3254 ebuild_path = portdb.findname(self.pkg.cpv)
3255 settings = self.settings
3256 debug = settings.get("PORTAGE_DEBUG") == "1"
3258 bintree.prevent_collision(pkg.cpv)
3259 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3260 pkg.cpv + ".tbz2." + str(os.getpid()))
3261 self._binpkg_tmpfile = binpkg_tmpfile
3262 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3263 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3266 EbuildProcess._start(self)
3268 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3270 def _set_returncode(self, wait_retval):
3271 EbuildProcess._set_returncode(self, wait_retval)
3274 bintree = pkg.root_config.trees["bintree"]
3275 binpkg_tmpfile = self._binpkg_tmpfile
3276 if self.returncode == os.EX_OK:
3277 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3279 class EbuildMerge(SlotObject):
3281 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3282 "pkg", "pkg_count", "pkg_path", "pretend",
3283 "scheduler", "settings", "tree", "world_atom")
3286 root_config = self.pkg.root_config
3287 settings = self.settings
3288 retval = portage.merge(settings["CATEGORY"],
3289 settings["PF"], settings["D"],
3290 os.path.join(settings["PORTAGE_BUILDDIR"],
3291 "build-info"), root_config.root, settings,
3292 myebuild=settings["EBUILD"],
3293 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3294 vartree=root_config.trees["vartree"],
3295 prev_mtimes=self.ldpath_mtimes,
3296 scheduler=self.scheduler,
3297 blockers=self.find_blockers)
3299 if retval == os.EX_OK:
3300 self.world_atom(self.pkg)
3305 def _log_success(self):
3307 pkg_count = self.pkg_count
3308 pkg_path = self.pkg_path
3309 logger = self.logger
3310 if "noclean" not in self.settings.features:
3311 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3312 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3313 logger.log((" === (%s of %s) " + \
3314 "Post-Build Cleaning (%s::%s)") % \
3315 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3316 short_msg=short_msg)
3317 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3318 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3320 class PackageUninstall(AsynchronousTask):
3322 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3326 unmerge(self.pkg.root_config, self.opts, "unmerge",
3327 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3328 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3329 writemsg_level=self._writemsg_level)
3330 except UninstallFailure, e:
3331 self.returncode = e.status
3333 self.returncode = os.EX_OK
3336 def _writemsg_level(self, msg, level=0, noiselevel=0):
3338 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339 background = self.background
3341 if log_path is None:
3342 if not (background and level < logging.WARNING):
3343 portage.util.writemsg_level(msg,
3344 level=level, noiselevel=noiselevel)
3347 portage.util.writemsg_level(msg,
3348 level=level, noiselevel=noiselevel)
3350 f = open(log_path, 'a')
3356 class Binpkg(CompositeTask):
3358 __slots__ = ("find_blockers",
3359 "ldpath_mtimes", "logger", "opts",
3360 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3361 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3362 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3364 def _writemsg_level(self, msg, level=0, noiselevel=0):
3366 if not self.background:
3367 portage.util.writemsg_level(msg,
3368 level=level, noiselevel=noiselevel)
3370 log_path = self.settings.get("PORTAGE_LOG_FILE")
3371 if log_path is not None:
3372 f = open(log_path, 'a')
3381 settings = self.settings
3382 settings.setcpv(pkg)
3383 self._tree = "bintree"
3384 self._bintree = self.pkg.root_config.trees[self._tree]
3385 self._verify = not self.opts.pretend
3387 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3388 "portage", pkg.category, pkg.pf)
3389 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3390 pkg=pkg, settings=settings)
3391 self._image_dir = os.path.join(dir_path, "image")
3392 self._infloc = os.path.join(dir_path, "build-info")
3393 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3394 settings["EBUILD"] = self._ebuild_path
3395 debug = settings.get("PORTAGE_DEBUG") == "1"
3396 portage.doebuild_environment(self._ebuild_path, "setup",
3397 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3398 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3400 # The prefetcher has already completed or it
3401 # could be running now. If it's running now,
3402 # wait for it to complete since it holds
3403 # a lock on the file being fetched. The
3404 # portage.locks functions are only designed
3405 # to work between separate processes. Since
3406 # the lock is held by the current process,
3407 # use the scheduler and fetcher methods to
3408 # synchronize with the fetcher.
3409 prefetcher = self.prefetcher
3410 if prefetcher is None:
3412 elif not prefetcher.isAlive():
3414 elif prefetcher.poll() is None:
3416 waiting_msg = ("Fetching '%s' " + \
3417 "in the background. " + \
3418 "To view fetch progress, run `tail -f " + \
3419 "/var/log/emerge-fetch.log` in another " + \
3420 "terminal.") % prefetcher.pkg_path
3421 msg_prefix = colorize("GOOD", " * ")
3422 from textwrap import wrap
3423 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3424 for line in wrap(waiting_msg, 65))
3425 if not self.background:
3426 writemsg(waiting_msg, noiselevel=-1)
3428 self._current_task = prefetcher
3429 prefetcher.addExitListener(self._prefetch_exit)
3432 self._prefetch_exit(prefetcher)
3434 def _prefetch_exit(self, prefetcher):
3437 pkg_count = self.pkg_count
3438 if not (self.opts.pretend or self.opts.fetchonly):
3439 self._build_dir.lock()
3440 # If necessary, discard old log so that we don't
3442 self._build_dir.clean_log()
3443 # Initialze PORTAGE_LOG_FILE.
3444 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3445 fetcher = BinpkgFetcher(background=self.background,
3446 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3447 pretend=self.opts.pretend, scheduler=self.scheduler)
3448 pkg_path = fetcher.pkg_path
3449 self._pkg_path = pkg_path
3451 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3453 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3454 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3455 short_msg = "emerge: (%s of %s) %s Fetch" % \
3456 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3457 self.logger.log(msg, short_msg=short_msg)
3458 self._start_task(fetcher, self._fetcher_exit)
3461 self._fetcher_exit(fetcher)
3463 def _fetcher_exit(self, fetcher):
3465 # The fetcher only has a returncode when
3466 # --getbinpkg is enabled.
3467 if fetcher.returncode is not None:
3468 self._fetched_pkg = True
3469 if self._default_exit(fetcher) != os.EX_OK:
3470 self._unlock_builddir()
3474 if self.opts.pretend:
3475 self._current_task = None
3476 self.returncode = os.EX_OK
3484 logfile = self.settings.get("PORTAGE_LOG_FILE")
3485 verifier = BinpkgVerifier(background=self.background,
3486 logfile=logfile, pkg=self.pkg)
3487 self._start_task(verifier, self._verifier_exit)
3490 self._verifier_exit(verifier)
3492 def _verifier_exit(self, verifier):
3493 if verifier is not None and \
3494 self._default_exit(verifier) != os.EX_OK:
3495 self._unlock_builddir()
3499 logger = self.logger
3501 pkg_count = self.pkg_count
3502 pkg_path = self._pkg_path
3504 if self._fetched_pkg:
3505 self._bintree.inject(pkg.cpv, filename=pkg_path)
3507 if self.opts.fetchonly:
3508 self._current_task = None
3509 self.returncode = os.EX_OK
3513 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3514 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3515 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3516 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3517 logger.log(msg, short_msg=short_msg)
3520 settings = self.settings
3521 ebuild_phase = EbuildPhase(background=self.background,
3522 pkg=pkg, phase=phase, scheduler=self.scheduler,
3523 settings=settings, tree=self._tree)
3525 self._start_task(ebuild_phase, self._clean_exit)
3527 def _clean_exit(self, clean_phase):
3528 if self._default_exit(clean_phase) != os.EX_OK:
3529 self._unlock_builddir()
3533 dir_path = self._build_dir.dir_path
3535 infloc = self._infloc
3537 pkg_path = self._pkg_path
3540 for mydir in (dir_path, self._image_dir, infloc):
3541 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3542 gid=portage.data.portage_gid, mode=dir_mode)
3544 # This initializes PORTAGE_LOG_FILE.
3545 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3546 self._writemsg_level(">>> Extracting info\n")
3548 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3549 check_missing_metadata = ("CATEGORY", "PF")
3550 missing_metadata = set()
3551 for k in check_missing_metadata:
3552 v = pkg_xpak.getfile(k)
3554 missing_metadata.add(k)
3556 pkg_xpak.unpackinfo(infloc)
3557 for k in missing_metadata:
3565 f = open(os.path.join(infloc, k), 'wb')
3571 # Store the md5sum in the vdb.
3572 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3574 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3578 # This gives bashrc users an opportunity to do various things
3579 # such as remove binary packages after they're installed.
3580 settings = self.settings
3581 settings.setcpv(self.pkg)
3582 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3583 settings.backup_changes("PORTAGE_BINPKG_FILE")
3586 setup_phase = EbuildPhase(background=self.background,
3587 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3588 settings=settings, tree=self._tree)
3590 setup_phase.addExitListener(self._setup_exit)
3591 self._current_task = setup_phase
3592 self.scheduler.scheduleSetup(setup_phase)
3594 def _setup_exit(self, setup_phase):
3595 if self._default_exit(setup_phase) != os.EX_OK:
3596 self._unlock_builddir()
3600 extractor = BinpkgExtractorAsync(background=self.background,
3601 image_dir=self._image_dir,
3602 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3603 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3604 self._start_task(extractor, self._extractor_exit)
3606 def _extractor_exit(self, extractor):
3607 if self._final_exit(extractor) != os.EX_OK:
3608 self._unlock_builddir()
3609 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3613 def _unlock_builddir(self):
3614 if self.opts.pretend or self.opts.fetchonly:
3616 portage.elog.elog_process(self.pkg.cpv, self.settings)
3617 self._build_dir.unlock()
3621 # This gives bashrc users an opportunity to do various things
3622 # such as remove binary packages after they're installed.
3623 settings = self.settings
3624 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3625 settings.backup_changes("PORTAGE_BINPKG_FILE")
3627 merge = EbuildMerge(find_blockers=self.find_blockers,
3628 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3629 pkg=self.pkg, pkg_count=self.pkg_count,
3630 pkg_path=self._pkg_path, scheduler=self.scheduler,
3631 settings=settings, tree=self._tree, world_atom=self.world_atom)
3634 retval = merge.execute()
3636 settings.pop("PORTAGE_BINPKG_FILE", None)
3637 self._unlock_builddir()
3640 class BinpkgFetcher(SpawnProcess):
3642 __slots__ = ("pkg", "pretend",
3643 "locked", "pkg_path", "_lock_obj")
3645 def __init__(self, **kwargs):
3646 SpawnProcess.__init__(self, **kwargs)
3648 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3656 pretend = self.pretend
3657 bintree = pkg.root_config.trees["bintree"]
3658 settings = bintree.settings
3659 use_locks = "distlocks" in settings.features
3660 pkg_path = self.pkg_path
3663 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3666 exists = os.path.exists(pkg_path)
3667 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3668 if not (pretend or resume):
3669 # Remove existing file or broken symlink.
3675 # urljoin doesn't work correctly with
3676 # unrecognized protocols like sftp
3677 if bintree._remote_has_index:
3678 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3680 rel_uri = pkg.cpv + ".tbz2"
3681 uri = bintree._remote_base_uri.rstrip("/") + \
3682 "/" + rel_uri.lstrip("/")
3684 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3685 "/" + pkg.pf + ".tbz2"
3688 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3689 self.returncode = os.EX_OK
3693 protocol = urlparse.urlparse(uri)[0]
3694 fcmd_prefix = "FETCHCOMMAND"
3696 fcmd_prefix = "RESUMECOMMAND"
3697 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3699 fcmd = settings.get(fcmd_prefix)
3702 "DISTDIR" : os.path.dirname(pkg_path),
3704 "FILE" : os.path.basename(pkg_path)
3707 fetch_env = dict(settings.iteritems())
3708 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3709 for x in shlex.split(fcmd)]
3711 if self.fd_pipes is None:
3713 fd_pipes = self.fd_pipes
3715 # Redirect all output to stdout since some fetchers like
3716 # wget pollute stderr (if portage detects a problem then it
3717 # can send it's own message to stderr).
3718 fd_pipes.setdefault(0, sys.stdin.fileno())
3719 fd_pipes.setdefault(1, sys.stdout.fileno())
3720 fd_pipes.setdefault(2, sys.stdout.fileno())
3722 self.args = fetch_args
3723 self.env = fetch_env
3724 SpawnProcess._start(self)
3726 def _set_returncode(self, wait_retval):
3727 SpawnProcess._set_returncode(self, wait_retval)
3728 if self.returncode == os.EX_OK:
3729 # If possible, update the mtime to match the remote package if
3730 # the fetcher didn't already do it automatically.
3731 bintree = self.pkg.root_config.trees["bintree"]
3732 if bintree._remote_has_index:
3733 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3734 if remote_mtime is not None:
3736 remote_mtime = long(remote_mtime)
3741 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3745 if remote_mtime != local_mtime:
3747 os.utime(self.pkg_path,
3748 (remote_mtime, remote_mtime))
3757 This raises an AlreadyLocked exception if lock() is called
3758 while a lock is already held. In order to avoid this, call
3759 unlock() or check whether the "locked" attribute is True
3760 or False before calling lock().
3762 if self._lock_obj is not None:
3763 raise self.AlreadyLocked((self._lock_obj,))
3765 self._lock_obj = portage.locks.lockfile(
3766 self.pkg_path, wantnewlockfile=1)
3769 class AlreadyLocked(portage.exception.PortageException):
3773 if self._lock_obj is None:
3775 portage.locks.unlockfile(self._lock_obj)
3776 self._lock_obj = None
3779 class BinpkgVerifier(AsynchronousTask):
3780 __slots__ = ("logfile", "pkg",)
3784 Note: Unlike a normal AsynchronousTask.start() method,
3785 this one does all work is synchronously. The returncode
3786 attribute will be set before it returns.
3790 root_config = pkg.root_config
3791 bintree = root_config.trees["bintree"]
3793 stdout_orig = sys.stdout
3794 stderr_orig = sys.stderr
3796 if self.background and self.logfile is not None:
3797 log_file = open(self.logfile, 'a')
3799 if log_file is not None:
3800 sys.stdout = log_file
3801 sys.stderr = log_file
3803 bintree.digestCheck(pkg)
3804 except portage.exception.FileNotFound:
3805 writemsg("!!! Fetching Binary failed " + \
3806 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3808 except portage.exception.DigestException, e:
3809 writemsg("\n!!! Digest verification failed:\n",
3811 writemsg("!!! %s\n" % e.value[0],
3813 writemsg("!!! Reason: %s\n" % e.value[1],
3815 writemsg("!!! Got: %s\n" % e.value[2],
3817 writemsg("!!! Expected: %s\n" % e.value[3],
3820 if rval != os.EX_OK:
3821 pkg_path = bintree.getname(pkg.cpv)
3822 head, tail = os.path.split(pkg_path)
3823 temp_filename = portage._checksum_failure_temp_file(head, tail)
3824 writemsg("File renamed to '%s'\n" % (temp_filename,),
3827 sys.stdout = stdout_orig
3828 sys.stderr = stderr_orig
3829 if log_file is not None:
3832 self.returncode = rval
3835 class BinpkgPrefetcher(CompositeTask):
3837 __slots__ = ("pkg",) + \
3838 ("pkg_path", "_bintree",)
3841 self._bintree = self.pkg.root_config.trees["bintree"]
3842 fetcher = BinpkgFetcher(background=self.background,
3843 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3844 scheduler=self.scheduler)
3845 self.pkg_path = fetcher.pkg_path
3846 self._start_task(fetcher, self._fetcher_exit)
3848 def _fetcher_exit(self, fetcher):
3850 if self._default_exit(fetcher) != os.EX_OK:
3854 verifier = BinpkgVerifier(background=self.background,
3855 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3856 self._start_task(verifier, self._verifier_exit)
3858 def _verifier_exit(self, verifier):
3859 if self._default_exit(verifier) != os.EX_OK:
3863 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3865 self._current_task = None
3866 self.returncode = os.EX_OK
3869 class BinpkgExtractorAsync(SpawnProcess):
3871 __slots__ = ("image_dir", "pkg", "pkg_path")
3873 _shell_binary = portage.const.BASH_BINARY
3876 self.args = [self._shell_binary, "-c",
3877 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3878 (portage._shell_quote(self.pkg_path),
3879 portage._shell_quote(self.image_dir))]
3881 self.env = self.pkg.root_config.settings.environ()
3882 SpawnProcess._start(self)
3884 class MergeListItem(CompositeTask):
3887 TODO: For parallel scheduling, everything here needs asynchronous
3888 execution support (start, poll, and wait methods).
3891 __slots__ = ("args_set",
3892 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3893 "find_blockers", "logger", "mtimedb", "pkg",
3894 "pkg_count", "pkg_to_replace", "prefetcher",
3895 "settings", "statusMessage", "world_atom") + \
3901 build_opts = self.build_opts
3904 # uninstall, executed by self.merge()
3905 self.returncode = os.EX_OK
3909 args_set = self.args_set
3910 find_blockers = self.find_blockers
3911 logger = self.logger
3912 mtimedb = self.mtimedb
3913 pkg_count = self.pkg_count
3914 scheduler = self.scheduler
3915 settings = self.settings
3916 world_atom = self.world_atom
3917 ldpath_mtimes = mtimedb["ldpath"]
3919 action_desc = "Emerging"
3921 if pkg.type_name == "binary":
3922 action_desc += " binary"
3924 if build_opts.fetchonly:
3925 action_desc = "Fetching"
3927 msg = "%s (%s of %s) %s" % \
3929 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3930 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3931 colorize("GOOD", pkg.cpv))
3933 portdb = pkg.root_config.trees["porttree"].dbapi
3934 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3935 if portdir_repo_name:
3936 pkg_repo_name = pkg.metadata.get("repository")
3937 if pkg_repo_name != portdir_repo_name:
3938 if not pkg_repo_name:
3939 pkg_repo_name = "unknown repo"
3940 msg += " from %s" % pkg_repo_name
3943 msg += " %s %s" % (preposition, pkg.root)
3945 if not build_opts.pretend:
3946 self.statusMessage(msg)
3947 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3948 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3950 if pkg.type_name == "ebuild":
3952 build = EbuildBuild(args_set=args_set,
3953 background=self.background,
3954 config_pool=self.config_pool,
3955 find_blockers=find_blockers,
3956 ldpath_mtimes=ldpath_mtimes, logger=logger,
3957 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3958 prefetcher=self.prefetcher, scheduler=scheduler,
3959 settings=settings, world_atom=world_atom)
3961 self._install_task = build
3962 self._start_task(build, self._default_final_exit)
3965 elif pkg.type_name == "binary":
3967 binpkg = Binpkg(background=self.background,
3968 find_blockers=find_blockers,
3969 ldpath_mtimes=ldpath_mtimes, logger=logger,
3970 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3971 prefetcher=self.prefetcher, settings=settings,
3972 scheduler=scheduler, world_atom=world_atom)
3974 self._install_task = binpkg
3975 self._start_task(binpkg, self._default_final_exit)
3979 self._install_task.poll()
3980 return self.returncode
3983 self._install_task.wait()
3984 return self.returncode
3989 build_opts = self.build_opts
3990 find_blockers = self.find_blockers
3991 logger = self.logger
3992 mtimedb = self.mtimedb
3993 pkg_count = self.pkg_count
3994 prefetcher = self.prefetcher
3995 scheduler = self.scheduler
3996 settings = self.settings
3997 world_atom = self.world_atom
3998 ldpath_mtimes = mtimedb["ldpath"]
4001 if not (build_opts.buildpkgonly or \
4002 build_opts.fetchonly or build_opts.pretend):
4004 uninstall = PackageUninstall(background=self.background,
4005 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4006 pkg=pkg, scheduler=scheduler, settings=settings)
4009 retval = uninstall.wait()
4010 if retval != os.EX_OK:
4014 if build_opts.fetchonly or \
4015 build_opts.buildpkgonly:
4016 return self.returncode
4018 retval = self._install_task.install()
4021 class PackageMerge(AsynchronousTask):
4023 TODO: Implement asynchronous merge so that the scheduler can
4024 run while a merge is executing.
4027 __slots__ = ("merge",)
4031 pkg = self.merge.pkg
4032 pkg_count = self.merge.pkg_count
4035 action_desc = "Uninstalling"
4036 preposition = "from"
4039 action_desc = "Installing"
4041 counter_str = "(%s of %s) " % \
4042 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4043 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4048 colorize("GOOD", pkg.cpv))
4051 msg += " %s %s" % (preposition, pkg.root)
4053 if not self.merge.build_opts.fetchonly and \
4054 not self.merge.build_opts.pretend and \
4055 not self.merge.build_opts.buildpkgonly:
4056 self.merge.statusMessage(msg)
4058 self.returncode = self.merge.merge()
4061 class DependencyArg(object):
4062 def __init__(self, arg=None, root_config=None):
4064 self.root_config = root_config
4067 return str(self.arg)
4069 class AtomArg(DependencyArg):
4070 def __init__(self, atom=None, **kwargs):
4071 DependencyArg.__init__(self, **kwargs)
4073 if not isinstance(self.atom, portage.dep.Atom):
4074 self.atom = portage.dep.Atom(self.atom)
4075 self.set = (self.atom, )
4077 class PackageArg(DependencyArg):
4078 def __init__(self, package=None, **kwargs):
4079 DependencyArg.__init__(self, **kwargs)
4080 self.package = package
4081 self.atom = portage.dep.Atom("=" + package.cpv)
4082 self.set = (self.atom, )
4084 class SetArg(DependencyArg):
4085 def __init__(self, set=None, **kwargs):
4086 DependencyArg.__init__(self, **kwargs)
4088 self.name = self.arg[len(SETPREFIX):]
4090 class Dependency(SlotObject):
4091 __slots__ = ("atom", "blocker", "depth",
4092 "parent", "onlydeps", "priority", "root")
4093 def __init__(self, **kwargs):
4094 SlotObject.__init__(self, **kwargs)
4095 if self.priority is None:
4096 self.priority = DepPriority()
4097 if self.depth is None:
4100 class BlockerCache(portage.cache.mappings.MutableMapping):
4101 """This caches blockers of installed packages so that dep_check does not
4102 have to be done for every single installed package on every invocation of
4103 emerge. The cache is invalidated whenever it is detected that something
4104 has changed that might alter the results of dep_check() calls:
4105 1) the set of installed packages (including COUNTER) has changed
4106 2) the old-style virtuals have changed
4109 # Number of uncached packages to trigger cache update, since
4110 # it's wasteful to update it for every vdb change.
4111 _cache_threshold = 5
4113 class BlockerData(object):
4115 __slots__ = ("__weakref__", "atoms", "counter")
4117 def __init__(self, counter, atoms):
4118 self.counter = counter
4121 def __init__(self, myroot, vardb):
4123 self._virtuals = vardb.settings.getvirtuals()
4124 self._cache_filename = os.path.join(myroot,
4125 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4126 self._cache_version = "1"
4127 self._cache_data = None
4128 self._modified = set()
4133 f = open(self._cache_filename, mode='rb')
4134 mypickle = pickle.Unpickler(f)
4136 mypickle.find_global = None
4137 except AttributeError:
4138 # TODO: If py3k, override Unpickler.find_class().
4140 self._cache_data = mypickle.load()
4143 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4144 if isinstance(e, pickle.UnpicklingError):
4145 writemsg("!!! Error loading '%s': %s\n" % \
4146 (self._cache_filename, str(e)), noiselevel=-1)
4149 cache_valid = self._cache_data and \
4150 isinstance(self._cache_data, dict) and \
4151 self._cache_data.get("version") == self._cache_version and \
4152 isinstance(self._cache_data.get("blockers"), dict)
4154 # Validate all the atoms and counters so that
4155 # corruption is detected as soon as possible.
4156 invalid_items = set()
4157 for k, v in self._cache_data["blockers"].iteritems():
4158 if not isinstance(k, basestring):
4159 invalid_items.add(k)
4162 if portage.catpkgsplit(k) is None:
4163 invalid_items.add(k)
4165 except portage.exception.InvalidData:
4166 invalid_items.add(k)
4168 if not isinstance(v, tuple) or \
4170 invalid_items.add(k)
4173 if not isinstance(counter, (int, long)):
4174 invalid_items.add(k)
4176 if not isinstance(atoms, (list, tuple)):
4177 invalid_items.add(k)
4179 invalid_atom = False
4181 if not isinstance(atom, basestring):
4184 if atom[:1] != "!" or \
4185 not portage.isvalidatom(
4186 atom, allow_blockers=True):
4190 invalid_items.add(k)
4193 for k in invalid_items:
4194 del self._cache_data["blockers"][k]
4195 if not self._cache_data["blockers"]:
4199 self._cache_data = {"version":self._cache_version}
4200 self._cache_data["blockers"] = {}
4201 self._cache_data["virtuals"] = self._virtuals
4202 self._modified.clear()
4205 """If the current user has permission and the internal blocker cache
4206 been updated, save it to disk and mark it unmodified. This is called
4207 by emerge after it has proccessed blockers for all installed packages.
4208 Currently, the cache is only written if the user has superuser
4209 privileges (since that's required to obtain a lock), but all users
4210 have read access and benefit from faster blocker lookups (as long as
4211 the entire cache is still valid). The cache is stored as a pickled
4212 dict object with the following format:
4216 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4217 "virtuals" : vardb.settings.getvirtuals()
4220 if len(self._modified) >= self._cache_threshold and \
4223 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4224 pickle.dump(self._cache_data, f, protocol=2)
4226 portage.util.apply_secpass_permissions(
4227 self._cache_filename, gid=portage.portage_gid, mode=0644)
4228 except (IOError, OSError), e:
4230 self._modified.clear()
4232 def __setitem__(self, cpv, blocker_data):
4234 Update the cache and mark it as modified for a future call to
4237 @param cpv: Package for which to cache blockers.
4239 @param blocker_data: An object with counter and atoms attributes.
4240 @type blocker_data: BlockerData
4242 self._cache_data["blockers"][cpv] = \
4243 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4244 self._modified.add(cpv)
4247 if self._cache_data is None:
4248 # triggered by python-trace
4250 return iter(self._cache_data["blockers"])
4252 def __delitem__(self, cpv):
4253 del self._cache_data["blockers"][cpv]
4255 def __getitem__(self, cpv):
4258 @returns: An object with counter and atoms attributes.
4260 return self.BlockerData(*self._cache_data["blockers"][cpv])
4262 class BlockerDB(object):
4264 def __init__(self, root_config):
4265 self._root_config = root_config
4266 self._vartree = root_config.trees["vartree"]
4267 self._portdb = root_config.trees["porttree"].dbapi
4269 self._dep_check_trees = None
4270 self._fake_vartree = None
4272 def _get_fake_vartree(self, acquire_lock=0):
4273 fake_vartree = self._fake_vartree
4274 if fake_vartree is None:
4275 fake_vartree = FakeVartree(self._root_config,
4276 acquire_lock=acquire_lock)
4277 self._fake_vartree = fake_vartree
4278 self._dep_check_trees = { self._vartree.root : {
4279 "porttree" : fake_vartree,
4280 "vartree" : fake_vartree,
4283 fake_vartree.sync(acquire_lock=acquire_lock)
4286 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4287 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4288 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4289 settings = self._vartree.settings
4290 stale_cache = set(blocker_cache)
4291 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4292 dep_check_trees = self._dep_check_trees
4293 vardb = fake_vartree.dbapi
4294 installed_pkgs = list(vardb)
4296 for inst_pkg in installed_pkgs:
4297 stale_cache.discard(inst_pkg.cpv)
4298 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4299 if cached_blockers is not None and \
4300 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4301 cached_blockers = None
4302 if cached_blockers is not None:
4303 blocker_atoms = cached_blockers.atoms
4305 # Use aux_get() to trigger FakeVartree global
4306 # updates on *DEPEND when appropriate.
4307 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4309 portage.dep._dep_check_strict = False
4310 success, atoms = portage.dep_check(depstr,
4311 vardb, settings, myuse=inst_pkg.use.enabled,
4312 trees=dep_check_trees, myroot=inst_pkg.root)
4314 portage.dep._dep_check_strict = True
4316 pkg_location = os.path.join(inst_pkg.root,
4317 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4318 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4319 (pkg_location, atoms), noiselevel=-1)
4322 blocker_atoms = [atom for atom in atoms \
4323 if atom.startswith("!")]
4324 blocker_atoms.sort()
4325 counter = long(inst_pkg.metadata["COUNTER"])
4326 blocker_cache[inst_pkg.cpv] = \
4327 blocker_cache.BlockerData(counter, blocker_atoms)
4328 for cpv in stale_cache:
4329 del blocker_cache[cpv]
4330 blocker_cache.flush()
4332 blocker_parents = digraph()
4334 for pkg in installed_pkgs:
4335 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4336 blocker_atom = blocker_atom.lstrip("!")
4337 blocker_atoms.append(blocker_atom)
4338 blocker_parents.add(blocker_atom, pkg)
4340 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4341 blocking_pkgs = set()
4342 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4343 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4345 # Check for blockers in the other direction.
4346 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4348 portage.dep._dep_check_strict = False
4349 success, atoms = portage.dep_check(depstr,
4350 vardb, settings, myuse=new_pkg.use.enabled,
4351 trees=dep_check_trees, myroot=new_pkg.root)
4353 portage.dep._dep_check_strict = True
4355 # We should never get this far with invalid deps.
4356 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4359 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4362 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4363 for inst_pkg in installed_pkgs:
4365 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4366 except (portage.exception.InvalidDependString, StopIteration):
4368 blocking_pkgs.add(inst_pkg)
4370 return blocking_pkgs
4372 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4374 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4375 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4376 p_type, p_root, p_key, p_status = parent_node
4378 if p_status == "nomerge":
4379 category, pf = portage.catsplit(p_key)
4380 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4381 msg.append("Portage is unable to process the dependencies of the ")
4382 msg.append("'%s' package. " % p_key)
4383 msg.append("In order to correct this problem, the package ")
4384 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4385 msg.append("As a temporary workaround, the --nodeps option can ")
4386 msg.append("be used to ignore all dependencies. For reference, ")
4387 msg.append("the problematic dependencies can be found in the ")
4388 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4390 msg.append("This package can not be installed. ")
4391 msg.append("Please notify the '%s' package maintainer " % p_key)
4392 msg.append("about this problem.")
4394 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4395 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4397 class PackageVirtualDbapi(portage.dbapi):
4399 A dbapi-like interface class that represents the state of the installed
4400 package database as new packages are installed, replacing any packages
4401 that previously existed in the same slot. The main difference between
4402 this class and fakedbapi is that this one uses Package instances
4403 internally (passed in via cpv_inject() and cpv_remove() calls).
4405 def __init__(self, settings):
4406 portage.dbapi.__init__(self)
4407 self.settings = settings
4408 self._match_cache = {}
4414 Remove all packages.
4418 self._cp_map.clear()
4419 self._cpv_map.clear()
4422 obj = PackageVirtualDbapi(self.settings)
4423 obj._match_cache = self._match_cache.copy()
4424 obj._cp_map = self._cp_map.copy()
4425 for k, v in obj._cp_map.iteritems():
4426 obj._cp_map[k] = v[:]
4427 obj._cpv_map = self._cpv_map.copy()
4431 return self._cpv_map.itervalues()
4433 def __contains__(self, item):
4434 existing = self._cpv_map.get(item.cpv)
4435 if existing is not None and \
4440 def get(self, item, default=None):
4441 cpv = getattr(item, "cpv", None)
4445 type_name, root, cpv, operation = item
4447 existing = self._cpv_map.get(cpv)
4448 if existing is not None and \
4453 def match_pkgs(self, atom):
4454 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4456 def _clear_cache(self):
4457 if self._categories is not None:
4458 self._categories = None
4459 if self._match_cache:
4460 self._match_cache = {}
4462 def match(self, origdep, use_cache=1):
4463 result = self._match_cache.get(origdep)
4464 if result is not None:
4466 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4467 self._match_cache[origdep] = result
4470 def cpv_exists(self, cpv):
4471 return cpv in self._cpv_map
4473 def cp_list(self, mycp, use_cache=1):
4474 cachelist = self._match_cache.get(mycp)
4475 # cp_list() doesn't expand old-style virtuals
4476 if cachelist and cachelist[0].startswith(mycp):
4478 cpv_list = self._cp_map.get(mycp)
4479 if cpv_list is None:
4482 cpv_list = [pkg.cpv for pkg in cpv_list]
4483 self._cpv_sort_ascending(cpv_list)
4484 if not (not cpv_list and mycp.startswith("virtual/")):
4485 self._match_cache[mycp] = cpv_list
4489 return list(self._cp_map)
4492 return list(self._cpv_map)
4494 def cpv_inject(self, pkg):
4495 cp_list = self._cp_map.get(pkg.cp)
4498 self._cp_map[pkg.cp] = cp_list
4499 e_pkg = self._cpv_map.get(pkg.cpv)
4500 if e_pkg is not None:
4503 self.cpv_remove(e_pkg)
4504 for e_pkg in cp_list:
4505 if e_pkg.slot_atom == pkg.slot_atom:
4508 self.cpv_remove(e_pkg)
4511 self._cpv_map[pkg.cpv] = pkg
4514 def cpv_remove(self, pkg):
4515 old_pkg = self._cpv_map.get(pkg.cpv)
4518 self._cp_map[pkg.cp].remove(pkg)
4519 del self._cpv_map[pkg.cpv]
4522 def aux_get(self, cpv, wants):
4523 metadata = self._cpv_map[cpv].metadata
4524 return [metadata.get(x, "") for x in wants]
4526 def aux_update(self, cpv, values):
4527 self._cpv_map[cpv].metadata.update(values)
4530 class depgraph(object):
4532 pkg_tree_map = RootConfig.pkg_tree_map
4534 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4536 def __init__(self, settings, trees, myopts, myparams, spinner):
4537 self.settings = settings
4538 self.target_root = settings["ROOT"]
4539 self.myopts = myopts
4540 self.myparams = myparams
4542 if settings.get("PORTAGE_DEBUG", "") == "1":
4544 self.spinner = spinner
4545 self._running_root = trees["/"]["root_config"]
4546 self._opts_no_restart = Scheduler._opts_no_restart
4547 self.pkgsettings = {}
4548 # Maps slot atom to package for each Package added to the graph.
4549 self._slot_pkg_map = {}
4550 # Maps nodes to the reasons they were selected for reinstallation.
4551 self._reinstall_nodes = {}
4554 self._trees_orig = trees
4556 # Contains a filtered view of preferred packages that are selected
4557 # from available repositories.
4558 self._filtered_trees = {}
4559 # Contains installed packages and new packages that have been added
4561 self._graph_trees = {}
4562 # All Package instances
4563 self._pkg_cache = {}
4564 for myroot in trees:
4565 self.trees[myroot] = {}
4566 # Create a RootConfig instance that references
4567 # the FakeVartree instead of the real one.
4568 self.roots[myroot] = RootConfig(
4569 trees[myroot]["vartree"].settings,
4571 trees[myroot]["root_config"].setconfig)
4572 for tree in ("porttree", "bintree"):
4573 self.trees[myroot][tree] = trees[myroot][tree]
4574 self.trees[myroot]["vartree"] = \
4575 FakeVartree(trees[myroot]["root_config"],
4576 pkg_cache=self._pkg_cache)
4577 self.pkgsettings[myroot] = portage.config(
4578 clone=self.trees[myroot]["vartree"].settings)
4579 self._slot_pkg_map[myroot] = {}
4580 vardb = self.trees[myroot]["vartree"].dbapi
4581 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4582 "--buildpkgonly" not in self.myopts
4583 # This fakedbapi instance will model the state that the vdb will
4584 # have after new packages have been installed.
4585 fakedb = PackageVirtualDbapi(vardb.settings)
4586 if preload_installed_pkgs:
4588 self.spinner.update()
4589 # This triggers metadata updates via FakeVartree.
4590 vardb.aux_get(pkg.cpv, [])
4591 fakedb.cpv_inject(pkg)
4593 # Now that the vardb state is cached in our FakeVartree,
4594 # we won't be needing the real vartree cache for awhile.
4595 # To make some room on the heap, clear the vardbapi
4597 trees[myroot]["vartree"].dbapi._clear_cache()
4600 self.mydbapi[myroot] = fakedb
4603 graph_tree.dbapi = fakedb
4604 self._graph_trees[myroot] = {}
4605 self._filtered_trees[myroot] = {}
4606 # Substitute the graph tree for the vartree in dep_check() since we
4607 # want atom selections to be consistent with package selections
4608 # have already been made.
4609 self._graph_trees[myroot]["porttree"] = graph_tree
4610 self._graph_trees[myroot]["vartree"] = graph_tree
4611 def filtered_tree():
4613 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4614 self._filtered_trees[myroot]["porttree"] = filtered_tree
4616 # Passing in graph_tree as the vartree here could lead to better
4617 # atom selections in some cases by causing atoms for packages that
4618 # have been added to the graph to be preferred over other choices.
4619 # However, it can trigger atom selections that result in
4620 # unresolvable direct circular dependencies. For example, this
4621 # happens with gwydion-dylan which depends on either itself or
4622 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4623 # gwydion-dylan-bin needs to be selected in order to avoid a
4624 # an unresolvable direct circular dependency.
4626 # To solve the problem described above, pass in "graph_db" so that
4627 # packages that have been added to the graph are distinguishable
4628 # from other available packages and installed packages. Also, pass
4629 # the parent package into self._select_atoms() calls so that
4630 # unresolvable direct circular dependencies can be detected and
4631 # avoided when possible.
4632 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4633 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4636 portdb = self.trees[myroot]["porttree"].dbapi
4637 bindb = self.trees[myroot]["bintree"].dbapi
4638 vardb = self.trees[myroot]["vartree"].dbapi
4639 # (db, pkg_type, built, installed, db_keys)
4640 if "--usepkgonly" not in self.myopts:
4641 db_keys = list(portdb._aux_cache_keys)
4642 dbs.append((portdb, "ebuild", False, False, db_keys))
4643 if "--usepkg" in self.myopts:
4644 db_keys = list(bindb._aux_cache_keys)
4645 dbs.append((bindb, "binary", True, False, db_keys))
4646 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4647 dbs.append((vardb, "installed", True, True, db_keys))
4648 self._filtered_trees[myroot]["dbs"] = dbs
4649 if "--usepkg" in self.myopts:
4650 self.trees[myroot]["bintree"].populate(
4651 "--getbinpkg" in self.myopts,
4652 "--getbinpkgonly" in self.myopts)
4655 self.digraph=portage.digraph()
4656 # contains all sets added to the graph
4658 # contains atoms given as arguments
4659 self._sets["args"] = InternalPackageSet()
4660 # contains all atoms from all sets added to the graph, including
4661 # atoms given as arguments
4662 self._set_atoms = InternalPackageSet()
4663 self._atom_arg_map = {}
4664 # contains all nodes pulled in by self._set_atoms
4665 self._set_nodes = set()
4666 # Contains only Blocker -> Uninstall edges
4667 self._blocker_uninstalls = digraph()
4668 # Contains only Package -> Blocker edges
4669 self._blocker_parents = digraph()
4670 # Contains only irrelevant Package -> Blocker edges
4671 self._irrelevant_blockers = digraph()
4672 # Contains only unsolvable Package -> Blocker edges
4673 self._unsolvable_blockers = digraph()
4674 # Contains all Blocker -> Blocked Package edges
4675 self._blocked_pkgs = digraph()
4676 # Contains world packages that have been protected from
4677 # uninstallation but may not have been added to the graph
4678 # if the graph is not complete yet.
4679 self._blocked_world_pkgs = {}
4680 self._slot_collision_info = {}
4681 # Slot collision nodes are not allowed to block other packages since
4682 # blocker validation is only able to account for one package per slot.
4683 self._slot_collision_nodes = set()
4684 self._parent_atoms = {}
4685 self._slot_conflict_parent_atoms = set()
4686 self._serialized_tasks_cache = None
4687 self._scheduler_graph = None
4688 self._displayed_list = None
4689 self._pprovided_args = []
4690 self._missing_args = []
4691 self._masked_installed = set()
4692 self._unsatisfied_deps_for_display = []
4693 self._unsatisfied_blockers_for_display = None
4694 self._circular_deps_for_display = None
4695 self._dep_stack = []
4696 self._unsatisfied_deps = []
4697 self._initially_unsatisfied_deps = []
4698 self._ignored_deps = []
4699 self._required_set_names = set(["system", "world"])
4700 self._select_atoms = self._select_atoms_highest_available
4701 self._select_package = self._select_pkg_highest_available
4702 self._highest_pkg_cache = {}
4704 def _show_slot_collision_notice(self):
4705 """Show an informational message advising the user to mask one of the
4706 the packages. In some cases it may be possible to resolve this
4707 automatically, but support for backtracking (removal nodes that have
4708 already been selected) will be required in order to handle all possible
4712 if not self._slot_collision_info:
4715 self._show_merge_list()
4718 msg.append("\n!!! Multiple package instances within a single " + \
4719 "package slot have been pulled\n")
4720 msg.append("!!! into the dependency graph, resulting" + \
4721 " in a slot conflict:\n\n")
4723 # Max number of parents shown, to avoid flooding the display.
4725 explanation_columns = 70
4727 for (slot_atom, root), slot_nodes \
4728 in self._slot_collision_info.iteritems():
4729 msg.append(str(slot_atom))
4732 for node in slot_nodes:
4734 msg.append(str(node))
4735 parent_atoms = self._parent_atoms.get(node)
4738 # Prefer conflict atoms over others.
4739 for parent_atom in parent_atoms:
4740 if len(pruned_list) >= max_parents:
4742 if parent_atom in self._slot_conflict_parent_atoms:
4743 pruned_list.add(parent_atom)
4745 # If this package was pulled in by conflict atoms then
4746 # show those alone since those are the most interesting.
4748 # When generating the pruned list, prefer instances
4749 # of DependencyArg over instances of Package.
4750 for parent_atom in parent_atoms:
4751 if len(pruned_list) >= max_parents:
4753 parent, atom = parent_atom
4754 if isinstance(parent, DependencyArg):
4755 pruned_list.add(parent_atom)
4756 # Prefer Packages instances that themselves have been
4757 # pulled into collision slots.
4758 for parent_atom in parent_atoms:
4759 if len(pruned_list) >= max_parents:
4761 parent, atom = parent_atom
4762 if isinstance(parent, Package) and \
4763 (parent.slot_atom, parent.root) \
4764 in self._slot_collision_info:
4765 pruned_list.add(parent_atom)
4766 for parent_atom in parent_atoms:
4767 if len(pruned_list) >= max_parents:
4769 pruned_list.add(parent_atom)
4770 omitted_parents = len(parent_atoms) - len(pruned_list)
4771 parent_atoms = pruned_list
4772 msg.append(" pulled in by\n")
4773 for parent_atom in parent_atoms:
4774 parent, atom = parent_atom
4775 msg.append(2*indent)
4776 if isinstance(parent,
4777 (PackageArg, AtomArg)):
4778 # For PackageArg and AtomArg types, it's
4779 # redundant to display the atom attribute.
4780 msg.append(str(parent))
4782 # Display the specific atom from SetArg or
4784 msg.append("%s required by %s" % (atom, parent))
4787 msg.append(2*indent)
4788 msg.append("(and %d more)\n" % omitted_parents)
4790 msg.append(" (no parents)\n")
4792 explanation = self._slot_conflict_explanation(slot_nodes)
4795 msg.append(indent + "Explanation:\n\n")
4796 for line in textwrap.wrap(explanation, explanation_columns):
4797 msg.append(2*indent + line + "\n")
4800 sys.stderr.write("".join(msg))
4803 explanations_for_all = explanations == len(self._slot_collision_info)
4805 if explanations_for_all or "--quiet" in self.myopts:
4809 msg.append("It may be possible to solve this problem ")
4810 msg.append("by using package.mask to prevent one of ")
4811 msg.append("those packages from being selected. ")
4812 msg.append("However, it is also possible that conflicting ")
4813 msg.append("dependencies exist such that they are impossible to ")
4814 msg.append("satisfy simultaneously. If such a conflict exists in ")
4815 msg.append("the dependencies of two different packages, then those ")
4816 msg.append("packages can not be installed simultaneously.")
4818 from formatter import AbstractFormatter, DumbWriter
4819 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4821 f.add_flowing_data(x)
4825 msg.append("For more information, see MASKED PACKAGES ")
4826 msg.append("section in the emerge man page or refer ")
4827 msg.append("to the Gentoo Handbook.")
4829 f.add_flowing_data(x)
4833 def _slot_conflict_explanation(self, slot_nodes):
4835 When a slot conflict occurs due to USE deps, there are a few
4836 different cases to consider:
4838 1) New USE are correctly set but --newuse wasn't requested so an
4839 installed package with incorrect USE happened to get pulled
4840 into graph before the new one.
4842 2) New USE are incorrectly set but an installed package has correct
4843 USE so it got pulled into the graph, and a new instance also got
4844 pulled in due to --newuse or an upgrade.
4846 3) Multiple USE deps exist that can't be satisfied simultaneously,
4847 and multiple package instances got pulled into the same slot to
4848 satisfy the conflicting deps.
4850 Currently, explanations and suggested courses of action are generated
4851 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4854 if len(slot_nodes) != 2:
4855 # Suggestions are only implemented for
4856 # conflicts between two packages.
4859 all_conflict_atoms = self._slot_conflict_parent_atoms
4861 matched_atoms = None
4862 unmatched_node = None
4863 for node in slot_nodes:
4864 parent_atoms = self._parent_atoms.get(node)
4865 if not parent_atoms:
4866 # Normally, there are always parent atoms. If there are
4867 # none then something unexpected is happening and there's
4868 # currently no suggestion for this case.
4870 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4871 for parent_atom in conflict_atoms:
4872 parent, atom = parent_atom
4874 # Suggestions are currently only implemented for cases
4875 # in which all conflict atoms have USE deps.
4878 if matched_node is not None:
4879 # If conflict atoms match multiple nodes
4880 # then there's no suggestion.
4883 matched_atoms = conflict_atoms
4885 if unmatched_node is not None:
4886 # Neither node is matched by conflict atoms, and
4887 # there is no suggestion for this case.
4889 unmatched_node = node
4891 if matched_node is None or unmatched_node is None:
4892 # This shouldn't happen.
4895 if unmatched_node.installed and not matched_node.installed and \
4896 unmatched_node.cpv == matched_node.cpv:
4897 # If the conflicting packages are the same version then
4898 # --newuse should be all that's needed. If they are different
4899 # versions then there's some other problem.
4900 return "New USE are correctly set, but --newuse wasn't" + \
4901 " requested, so an installed package with incorrect USE " + \
4902 "happened to get pulled into the dependency graph. " + \
4903 "In order to solve " + \
4904 "this, either specify the --newuse option or explicitly " + \
4905 " reinstall '%s'." % matched_node.slot_atom
4907 if matched_node.installed and not unmatched_node.installed:
4908 atoms = sorted(set(atom for parent, atom in matched_atoms))
4909 explanation = ("New USE for '%s' are incorrectly set. " + \
4910 "In order to solve this, adjust USE to satisfy '%s'") % \
4911 (matched_node.slot_atom, atoms[0])
4913 for atom in atoms[1:-1]:
4914 explanation += ", '%s'" % (atom,)
4917 explanation += " and '%s'" % (atoms[-1],)
4923 def _process_slot_conflicts(self):
4925 Process slot conflict data to identify specific atoms which
4926 lead to conflict. These atoms only match a subset of the
4927 packages that have been pulled into a given slot.
4929 for (slot_atom, root), slot_nodes \
4930 in self._slot_collision_info.iteritems():
4932 all_parent_atoms = set()
4933 for pkg in slot_nodes:
4934 parent_atoms = self._parent_atoms.get(pkg)
4935 if not parent_atoms:
4937 all_parent_atoms.update(parent_atoms)
4939 for pkg in slot_nodes:
4940 parent_atoms = self._parent_atoms.get(pkg)
4941 if parent_atoms is None:
4942 parent_atoms = set()
4943 self._parent_atoms[pkg] = parent_atoms
4944 for parent_atom in all_parent_atoms:
4945 if parent_atom in parent_atoms:
4947 # Use package set for matching since it will match via
4948 # PROVIDE when necessary, while match_from_list does not.
4949 parent, atom = parent_atom
4950 atom_set = InternalPackageSet(
4951 initial_atoms=(atom,))
4952 if atom_set.findAtomForPackage(pkg):
4953 parent_atoms.add(parent_atom)
4955 self._slot_conflict_parent_atoms.add(parent_atom)
4957 def _reinstall_for_flags(self, forced_flags,
4958 orig_use, orig_iuse, cur_use, cur_iuse):
4959 """Return a set of flags that trigger reinstallation, or None if there
4960 are no such flags."""
4961 if "--newuse" in self.myopts:
4962 flags = set(orig_iuse.symmetric_difference(
4963 cur_iuse).difference(forced_flags))
4964 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4965 cur_iuse.intersection(cur_use)))
4968 elif "changed-use" == self.myopts.get("--reinstall"):
4969 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4970 cur_iuse.intersection(cur_use))
4975 def _create_graph(self, allow_unsatisfied=False):
4976 dep_stack = self._dep_stack
4978 self.spinner.update()
4979 dep = dep_stack.pop()
4980 if isinstance(dep, Package):
4981 if not self._add_pkg_deps(dep,
4982 allow_unsatisfied=allow_unsatisfied):
4985 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4989 def _add_dep(self, dep, allow_unsatisfied=False):
4990 debug = "--debug" in self.myopts
4991 buildpkgonly = "--buildpkgonly" in self.myopts
4992 nodeps = "--nodeps" in self.myopts
4993 empty = "empty" in self.myparams
4994 deep = "deep" in self.myparams
4995 update = "--update" in self.myopts and dep.depth <= 1
4997 if not buildpkgonly and \
4999 dep.parent not in self._slot_collision_nodes:
5000 if dep.parent.onlydeps:
5001 # It's safe to ignore blockers if the
5002 # parent is an --onlydeps node.
5004 # The blocker applies to the root where
5005 # the parent is or will be installed.
5006 blocker = Blocker(atom=dep.atom,
5007 eapi=dep.parent.metadata["EAPI"],
5008 root=dep.parent.root)
5009 self._blocker_parents.add(blocker, dep.parent)
5011 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5012 onlydeps=dep.onlydeps)
5014 if dep.priority.optional:
5015 # This could be an unecessary build-time dep
5016 # pulled in by --with-bdeps=y.
5018 if allow_unsatisfied:
5019 self._unsatisfied_deps.append(dep)
5021 self._unsatisfied_deps_for_display.append(
5022 ((dep.root, dep.atom), {"myparent":dep.parent}))
5024 # In some cases, dep_check will return deps that shouldn't
5025 # be proccessed any further, so they are identified and
5026 # discarded here. Try to discard as few as possible since
5027 # discarded dependencies reduce the amount of information
5028 # available for optimization of merge order.
5029 if dep.priority.satisfied and \
5030 not dep_pkg.installed and \
5031 not (existing_node or empty or deep or update):
5033 if dep.root == self.target_root:
5035 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5036 except StopIteration:
5038 except portage.exception.InvalidDependString:
5039 if not dep_pkg.installed:
5040 # This shouldn't happen since the package
5041 # should have been masked.
5044 self._ignored_deps.append(dep)
5047 if not self._add_pkg(dep_pkg, dep):
5051 def _add_pkg(self, pkg, dep):
5058 myparent = dep.parent
5059 priority = dep.priority
5061 if priority is None:
5062 priority = DepPriority()
5064 Fills the digraph with nodes comprised of packages to merge.
5065 mybigkey is the package spec of the package to merge.
5066 myparent is the package depending on mybigkey ( or None )
5067 addme = Should we add this package to the digraph or are we just looking at it's deps?
5068 Think --onlydeps, we need to ignore packages in that case.
5071 #IUSE-aware emerge -> USE DEP aware depgraph
5072 #"no downgrade" emerge
5074 # Ensure that the dependencies of the same package
5075 # are never processed more than once.
5076 previously_added = pkg in self.digraph
5078 # select the correct /var database that we'll be checking against
5079 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5080 pkgsettings = self.pkgsettings[pkg.root]
5085 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5086 except portage.exception.InvalidDependString, e:
5087 if not pkg.installed:
5088 show_invalid_depstring_notice(
5089 pkg, pkg.metadata["PROVIDE"], str(e))
5093 if not pkg.onlydeps:
5094 if not pkg.installed and \
5095 "empty" not in self.myparams and \
5096 vardbapi.match(pkg.slot_atom):
5097 # Increase the priority of dependencies on packages that
5098 # are being rebuilt. This optimizes merge order so that
5099 # dependencies are rebuilt/updated as soon as possible,
5100 # which is needed especially when emerge is called by
5101 # revdep-rebuild since dependencies may be affected by ABI
5102 # breakage that has rendered them useless. Don't adjust
5103 # priority here when in "empty" mode since all packages
5104 # are being merged in that case.
5105 priority.rebuild = True
5107 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5108 slot_collision = False
5110 existing_node_matches = pkg.cpv == existing_node.cpv
5111 if existing_node_matches and \
5112 pkg != existing_node and \
5113 dep.atom is not None:
5114 # Use package set for matching since it will match via
5115 # PROVIDE when necessary, while match_from_list does not.
5116 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5117 if not atom_set.findAtomForPackage(existing_node):
5118 existing_node_matches = False
5119 if existing_node_matches:
5120 # The existing node can be reused.
5122 for parent_atom in arg_atoms:
5123 parent, atom = parent_atom
5124 self.digraph.add(existing_node, parent,
5126 self._add_parent_atom(existing_node, parent_atom)
5127 # If a direct circular dependency is not an unsatisfied
5128 # buildtime dependency then drop it here since otherwise
5129 # it can skew the merge order calculation in an unwanted
5131 if existing_node != myparent or \
5132 (priority.buildtime and not priority.satisfied):
5133 self.digraph.addnode(existing_node, myparent,
5135 if dep.atom is not None and dep.parent is not None:
5136 self._add_parent_atom(existing_node,
5137 (dep.parent, dep.atom))
5141 # A slot collision has occurred. Sometimes this coincides
5142 # with unresolvable blockers, so the slot collision will be
5143 # shown later if there are no unresolvable blockers.
5144 self._add_slot_conflict(pkg)
5145 slot_collision = True
5148 # Now add this node to the graph so that self.display()
5149 # can show use flags and --tree portage.output. This node is
5150 # only being partially added to the graph. It must not be
5151 # allowed to interfere with the other nodes that have been
5152 # added. Do not overwrite data for existing nodes in
5153 # self.mydbapi since that data will be used for blocker
5155 # Even though the graph is now invalid, continue to process
5156 # dependencies so that things like --fetchonly can still
5157 # function despite collisions.
5159 elif not previously_added:
5160 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5161 self.mydbapi[pkg.root].cpv_inject(pkg)
5162 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5164 if not pkg.installed:
5165 # Allow this package to satisfy old-style virtuals in case it
5166 # doesn't already. Any pre-existing providers will be preferred
5169 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5170 # For consistency, also update the global virtuals.
5171 settings = self.roots[pkg.root].settings
5173 settings.setinst(pkg.cpv, pkg.metadata)
5175 except portage.exception.InvalidDependString, e:
5176 show_invalid_depstring_notice(
5177 pkg, pkg.metadata["PROVIDE"], str(e))
5182 self._set_nodes.add(pkg)
5184 # Do this even when addme is False (--onlydeps) so that the
5185 # parent/child relationship is always known in case
5186 # self._show_slot_collision_notice() needs to be called later.
5187 self.digraph.add(pkg, myparent, priority=priority)
5188 if dep.atom is not None and dep.parent is not None:
5189 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5192 for parent_atom in arg_atoms:
5193 parent, atom = parent_atom
5194 self.digraph.add(pkg, parent, priority=priority)
5195 self._add_parent_atom(pkg, parent_atom)
5197 """ This section determines whether we go deeper into dependencies or not.
5198 We want to go deeper on a few occasions:
5199 Installing package A, we need to make sure package A's deps are met.
5200 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5201 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5203 dep_stack = self._dep_stack
5204 if "recurse" not in self.myparams:
5206 elif pkg.installed and \
5207 "deep" not in self.myparams:
5208 dep_stack = self._ignored_deps
5210 self.spinner.update()
5215 if not previously_added:
5216 dep_stack.append(pkg)
5219 def _add_parent_atom(self, pkg, parent_atom):
5220 parent_atoms = self._parent_atoms.get(pkg)
5221 if parent_atoms is None:
5222 parent_atoms = set()
5223 self._parent_atoms[pkg] = parent_atoms
5224 parent_atoms.add(parent_atom)
5226 def _add_slot_conflict(self, pkg):
5227 self._slot_collision_nodes.add(pkg)
5228 slot_key = (pkg.slot_atom, pkg.root)
5229 slot_nodes = self._slot_collision_info.get(slot_key)
5230 if slot_nodes is None:
5232 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5233 self._slot_collision_info[slot_key] = slot_nodes
5236 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5238 mytype = pkg.type_name
5241 metadata = pkg.metadata
5242 myuse = pkg.use.enabled
5244 depth = pkg.depth + 1
5245 removal_action = "remove" in self.myparams
5248 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5250 edepend[k] = metadata[k]
5252 if not pkg.built and \
5253 "--buildpkgonly" in self.myopts and \
5254 "deep" not in self.myparams and \
5255 "empty" not in self.myparams:
5256 edepend["RDEPEND"] = ""
5257 edepend["PDEPEND"] = ""
5258 bdeps_optional = False
5260 if pkg.built and not removal_action:
5261 if self.myopts.get("--with-bdeps", "n") == "y":
5262 # Pull in build time deps as requested, but marked them as
5263 # "optional" since they are not strictly required. This allows
5264 # more freedom in the merge order calculation for solving
5265 # circular dependencies. Don't convert to PDEPEND since that
5266 # could make --with-bdeps=y less effective if it is used to
5267 # adjust merge order to prevent built_with_use() calls from
5269 bdeps_optional = True
5271 # built packages do not have build time dependencies.
5272 edepend["DEPEND"] = ""
5274 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5275 edepend["DEPEND"] = ""
5278 if self.target_root != "/":
5279 if "--root-deps" in self.myopts:
5281 if "--rdeps-only" in self.myopts:
5283 edepend["DEPEND"] = ""
5286 (bdeps_root, edepend["DEPEND"],
5287 self._priority(buildtime=(not bdeps_optional),
5288 optional=bdeps_optional)),
5289 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5290 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5293 debug = "--debug" in self.myopts
5294 strict = mytype != "installed"
5296 for dep_root, dep_string, dep_priority in deps:
5301 print "Parent: ", jbigkey
5302 print "Depstring:", dep_string
5303 print "Priority:", dep_priority
5304 vardb = self.roots[dep_root].trees["vartree"].dbapi
5306 selected_atoms = self._select_atoms(dep_root,
5307 dep_string, myuse=myuse, parent=pkg, strict=strict,
5308 priority=dep_priority)
5309 except portage.exception.InvalidDependString, e:
5310 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5313 print "Candidates:", selected_atoms
5315 for atom in selected_atoms:
5318 atom = portage.dep.Atom(atom)
5320 mypriority = dep_priority.copy()
5321 if not atom.blocker and vardb.match(atom):
5322 mypriority.satisfied = True
5324 if not self._add_dep(Dependency(atom=atom,
5325 blocker=atom.blocker, depth=depth, parent=pkg,
5326 priority=mypriority, root=dep_root),
5327 allow_unsatisfied=allow_unsatisfied):
5330 except portage.exception.InvalidAtom, e:
5331 show_invalid_depstring_notice(
5332 pkg, dep_string, str(e))
5334 if not pkg.installed:
5338 print "Exiting...", jbigkey
5339 except portage.exception.AmbiguousPackageName, e:
5341 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5342 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5344 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5345 portage.writemsg("\n", noiselevel=-1)
5346 if mytype == "binary":
5348 "!!! This binary package cannot be installed: '%s'\n" % \
5349 mykey, noiselevel=-1)
5350 elif mytype == "ebuild":
5351 portdb = self.roots[myroot].trees["porttree"].dbapi
5352 myebuild, mylocation = portdb.findname2(mykey)
5353 portage.writemsg("!!! This ebuild cannot be installed: " + \
5354 "'%s'\n" % myebuild, noiselevel=-1)
5355 portage.writemsg("!!! Please notify the package maintainer " + \
5356 "that atoms must be fully-qualified.\n", noiselevel=-1)
5360 def _priority(self, **kwargs):
5361 if "remove" in self.myparams:
5362 priority_constructor = UnmergeDepPriority
5364 priority_constructor = DepPriority
5365 return priority_constructor(**kwargs)
5367 def _dep_expand(self, root_config, atom_without_category):
5369 @param root_config: a root config instance
5370 @type root_config: RootConfig
5371 @param atom_without_category: an atom without a category component
5372 @type atom_without_category: String
5374 @returns: a list of atoms containing categories (possibly empty)
5376 null_cp = portage.dep_getkey(insert_category_into_atom(
5377 atom_without_category, "null"))
5378 cat, atom_pn = portage.catsplit(null_cp)
5380 dbs = self._filtered_trees[root_config.root]["dbs"]
5382 for db, pkg_type, built, installed, db_keys in dbs:
5383 for cat in db.categories:
5384 if db.cp_list("%s/%s" % (cat, atom_pn)):
5388 for cat in categories:
5389 deps.append(insert_category_into_atom(
5390 atom_without_category, cat))
5393 def _have_new_virt(self, root, atom_cp):
5395 for db, pkg_type, built, installed, db_keys in \
5396 self._filtered_trees[root]["dbs"]:
5397 if db.cp_list(atom_cp):
5402 def _iter_atoms_for_pkg(self, pkg):
5403 # TODO: add multiple $ROOT support
5404 if pkg.root != self.target_root:
5406 atom_arg_map = self._atom_arg_map
5407 root_config = self.roots[pkg.root]
5408 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5409 atom_cp = portage.dep_getkey(atom)
5410 if atom_cp != pkg.cp and \
5411 self._have_new_virt(pkg.root, atom_cp):
5413 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5414 visible_pkgs.reverse() # descending order
5416 for visible_pkg in visible_pkgs:
5417 if visible_pkg.cp != atom_cp:
5419 if pkg >= visible_pkg:
5420 # This is descending order, and we're not
5421 # interested in any versions <= pkg given.
5423 if pkg.slot_atom != visible_pkg.slot_atom:
5424 higher_slot = visible_pkg
5426 if higher_slot is not None:
5428 for arg in atom_arg_map[(atom, pkg.root)]:
5429 if isinstance(arg, PackageArg) and \
5434 def select_files(self, myfiles):
5435 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5436 appropriate depgraph and return a favorite list."""
5437 debug = "--debug" in self.myopts
5438 root_config = self.roots[self.target_root]
5439 sets = root_config.sets
5440 getSetAtoms = root_config.setconfig.getSetAtoms
5442 myroot = self.target_root
5443 dbs = self._filtered_trees[myroot]["dbs"]
5444 vardb = self.trees[myroot]["vartree"].dbapi
5445 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5446 portdb = self.trees[myroot]["porttree"].dbapi
5447 bindb = self.trees[myroot]["bintree"].dbapi
5448 pkgsettings = self.pkgsettings[myroot]
5450 onlydeps = "--onlydeps" in self.myopts
5453 ext = os.path.splitext(x)[1]
5455 if not os.path.exists(x):
5457 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5458 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5459 elif os.path.exists(
5460 os.path.join(pkgsettings["PKGDIR"], x)):
5461 x = os.path.join(pkgsettings["PKGDIR"], x)
5463 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5464 print "!!! Please ensure the tbz2 exists as specified.\n"
5465 return 0, myfavorites
5466 mytbz2=portage.xpak.tbz2(x)
5467 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5468 if os.path.realpath(x) != \
5469 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5470 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5471 return 0, myfavorites
5472 db_keys = list(bindb._aux_cache_keys)
5473 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5474 pkg = Package(type_name="binary", root_config=root_config,
5475 cpv=mykey, built=True, metadata=metadata,
5477 self._pkg_cache[pkg] = pkg
5478 args.append(PackageArg(arg=x, package=pkg,
5479 root_config=root_config))
5480 elif ext==".ebuild":
5481 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5482 pkgdir = os.path.dirname(ebuild_path)
5483 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5484 cp = pkgdir[len(tree_root)+1:]
5485 e = portage.exception.PackageNotFound(
5486 ("%s is not in a valid portage tree " + \
5487 "hierarchy or does not exist") % x)
5488 if not portage.isvalidatom(cp):
5490 cat = portage.catsplit(cp)[0]
5491 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5492 if not portage.isvalidatom("="+mykey):
5494 ebuild_path = portdb.findname(mykey)
5496 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5497 cp, os.path.basename(ebuild_path)):
5498 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5499 return 0, myfavorites
5500 if mykey not in portdb.xmatch(
5501 "match-visible", portage.dep_getkey(mykey)):
5502 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5503 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5504 print colorize("BAD", "*** page for details.")
5505 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5508 raise portage.exception.PackageNotFound(
5509 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5510 db_keys = list(portdb._aux_cache_keys)
5511 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5512 pkg = Package(type_name="ebuild", root_config=root_config,
5513 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5514 pkgsettings.setcpv(pkg)
5515 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5516 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5517 self._pkg_cache[pkg] = pkg
5518 args.append(PackageArg(arg=x, package=pkg,
5519 root_config=root_config))
5520 elif x.startswith(os.path.sep):
5521 if not x.startswith(myroot):
5522 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5523 " $ROOT.\n") % x, noiselevel=-1)
5525 # Queue these up since it's most efficient to handle
5526 # multiple files in a single iter_owners() call.
5527 lookup_owners.append(x)
5529 if x in ("system", "world"):
5531 if x.startswith(SETPREFIX):
5532 s = x[len(SETPREFIX):]
5534 raise portage.exception.PackageSetNotFound(s)
5537 # Recursively expand sets so that containment tests in
5538 # self._get_parent_sets() properly match atoms in nested
5539 # sets (like if world contains system).
5540 expanded_set = InternalPackageSet(
5541 initial_atoms=getSetAtoms(s))
5542 self._sets[s] = expanded_set
5543 args.append(SetArg(arg=x, set=expanded_set,
5544 root_config=root_config))
5546 if not is_valid_package_atom(x):
5547 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5549 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5550 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5552 # Don't expand categories or old-style virtuals here unless
5553 # necessary. Expansion of old-style virtuals here causes at
5554 # least the following problems:
5555 # 1) It's more difficult to determine which set(s) an atom
5556 # came from, if any.
5557 # 2) It takes away freedom from the resolver to choose other
5558 # possible expansions when necessary.
5560 args.append(AtomArg(arg=x, atom=x,
5561 root_config=root_config))
5563 expanded_atoms = self._dep_expand(root_config, x)
5564 installed_cp_set = set()
5565 for atom in expanded_atoms:
5566 atom_cp = portage.dep_getkey(atom)
5567 if vardb.cp_list(atom_cp):
5568 installed_cp_set.add(atom_cp)
5569 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5570 installed_cp = iter(installed_cp_set).next()
5571 expanded_atoms = [atom for atom in expanded_atoms \
5572 if portage.dep_getkey(atom) == installed_cp]
5574 if len(expanded_atoms) > 1:
5577 ambiguous_package_name(x, expanded_atoms, root_config,
5578 self.spinner, self.myopts)
5579 return False, myfavorites
5581 atom = expanded_atoms[0]
5583 null_atom = insert_category_into_atom(x, "null")
5584 null_cp = portage.dep_getkey(null_atom)
5585 cat, atom_pn = portage.catsplit(null_cp)
5586 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5588 # Allow the depgraph to choose which virtual.
5589 atom = insert_category_into_atom(x, "virtual")
5591 atom = insert_category_into_atom(x, "null")
5593 args.append(AtomArg(arg=x, atom=atom,
5594 root_config=root_config))
5598 search_for_multiple = False
5599 if len(lookup_owners) > 1:
5600 search_for_multiple = True
5602 for x in lookup_owners:
5603 if not search_for_multiple and os.path.isdir(x):
5604 search_for_multiple = True
5605 relative_paths.append(x[len(myroot):])
5608 for pkg, relative_path in \
5609 real_vardb._owners.iter_owners(relative_paths):
5610 owners.add(pkg.mycpv)
5611 if not search_for_multiple:
5615 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5616 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5620 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5622 # portage now masks packages with missing slot, but it's
5623 # possible that one was installed by an older version
5624 atom = portage.cpv_getkey(cpv)
5626 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5627 args.append(AtomArg(arg=atom, atom=atom,
5628 root_config=root_config))
5630 if "--update" in self.myopts:
5631 # In some cases, the greedy slots behavior can pull in a slot that
5632 # the user would want to uninstall due to it being blocked by a
5633 # newer version in a different slot. Therefore, it's necessary to
5634 # detect and discard any that should be uninstalled. Each time
5635 # that arguments are updated, package selections are repeated in
5636 # order to ensure consistency with the current arguments:
5638 # 1) Initialize args
5639 # 2) Select packages and generate initial greedy atoms
5640 # 3) Update args with greedy atoms
5641 # 4) Select packages and generate greedy atoms again, while
5642 # accounting for any blockers between selected packages
5643 # 5) Update args with revised greedy atoms
5645 self._set_args(args)
5648 greedy_args.append(arg)
5649 if not isinstance(arg, AtomArg):
5651 for atom in self._greedy_slots(arg.root_config, arg.atom):
5653 AtomArg(arg=arg.arg, atom=atom,
5654 root_config=arg.root_config))
5656 self._set_args(greedy_args)
5659 # Revise greedy atoms, accounting for any blockers
5660 # between selected packages.
5661 revised_greedy_args = []
5663 revised_greedy_args.append(arg)
5664 if not isinstance(arg, AtomArg):
5666 for atom in self._greedy_slots(arg.root_config, arg.atom,
5667 blocker_lookahead=True):
5668 revised_greedy_args.append(
5669 AtomArg(arg=arg.arg, atom=atom,
5670 root_config=arg.root_config))
5671 args = revised_greedy_args
5672 del revised_greedy_args
5674 self._set_args(args)
5676 myfavorites = set(myfavorites)
5678 if isinstance(arg, (AtomArg, PackageArg)):
5679 myfavorites.add(arg.atom)
5680 elif isinstance(arg, SetArg):
5681 myfavorites.add(arg.arg)
5682 myfavorites = list(myfavorites)
5684 pprovideddict = pkgsettings.pprovideddict
5686 portage.writemsg("\n", noiselevel=-1)
5687 # Order needs to be preserved since a feature of --nodeps
5688 # is to allow the user to force a specific merge order.
5692 for atom in arg.set:
5693 self.spinner.update()
5694 dep = Dependency(atom=atom, onlydeps=onlydeps,
5695 root=myroot, parent=arg)
5696 atom_cp = portage.dep_getkey(atom)
5698 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5699 if pprovided and portage.match_from_list(atom, pprovided):
5700 # A provided package has been specified on the command line.
5701 self._pprovided_args.append((arg, atom))
5703 if isinstance(arg, PackageArg):
5704 if not self._add_pkg(arg.package, dep) or \
5705 not self._create_graph():
5706 sys.stderr.write(("\n\n!!! Problem resolving " + \
5707 "dependencies for %s\n") % arg.arg)
5708 return 0, myfavorites
5711 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5712 (arg, atom), noiselevel=-1)
5713 pkg, existing_node = self._select_package(
5714 myroot, atom, onlydeps=onlydeps)
5716 if not (isinstance(arg, SetArg) and \
5717 arg.name in ("system", "world")):
5718 self._unsatisfied_deps_for_display.append(
5719 ((myroot, atom), {}))
5720 return 0, myfavorites
5721 self._missing_args.append((arg, atom))
5723 if atom_cp != pkg.cp:
5724 # For old-style virtuals, we need to repeat the
5725 # package.provided check against the selected package.
5726 expanded_atom = atom.replace(atom_cp, pkg.cp)
5727 pprovided = pprovideddict.get(pkg.cp)
5729 portage.match_from_list(expanded_atom, pprovided):
5730 # A provided package has been
5731 # specified on the command line.
5732 self._pprovided_args.append((arg, atom))
5734 if pkg.installed and "selective" not in self.myparams:
5735 self._unsatisfied_deps_for_display.append(
5736 ((myroot, atom), {}))
5737 # Previous behavior was to bail out in this case, but
5738 # since the dep is satisfied by the installed package,
5739 # it's more friendly to continue building the graph
5740 # and just show a warning message. Therefore, only bail
5741 # out here if the atom is not from either the system or
5743 if not (isinstance(arg, SetArg) and \
5744 arg.name in ("system", "world")):
5745 return 0, myfavorites
5747 # Add the selected package to the graph as soon as possible
5748 # so that later dep_check() calls can use it as feedback
5749 # for making more consistent atom selections.
5750 if not self._add_pkg(pkg, dep):
5751 if isinstance(arg, SetArg):
5752 sys.stderr.write(("\n\n!!! Problem resolving " + \
5753 "dependencies for %s from %s\n") % \
5756 sys.stderr.write(("\n\n!!! Problem resolving " + \
5757 "dependencies for %s\n") % atom)
5758 return 0, myfavorites
5760 except portage.exception.MissingSignature, e:
5761 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5762 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5763 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5764 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5765 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5766 return 0, myfavorites
5767 except portage.exception.InvalidSignature, e:
5768 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5769 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5770 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5771 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5772 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5773 return 0, myfavorites
5774 except SystemExit, e:
5775 raise # Needed else can't exit
5776 except Exception, e:
5777 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5778 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5781 # Now that the root packages have been added to the graph,
5782 # process the dependencies.
5783 if not self._create_graph():
5784 return 0, myfavorites
5787 if "--usepkgonly" in self.myopts:
5788 for xs in self.digraph.all_nodes():
5789 if not isinstance(xs, Package):
5791 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5795 print "Missing binary for:",xs[2]
5799 except self._unknown_internal_error:
5800 return False, myfavorites
5802 # We're true here unless we are missing binaries.
5803 return (not missing,myfavorites)
5805 def _set_args(self, args):
5807 Create the "args" package set from atoms and packages given as
5808 arguments. This method can be called multiple times if necessary.
5809 The package selection cache is automatically invalidated, since
5810 arguments influence package selections.
5812 args_set = self._sets["args"]
5815 if not isinstance(arg, (AtomArg, PackageArg)):
5818 if atom in args_set:
5822 self._set_atoms.clear()
5823 self._set_atoms.update(chain(*self._sets.itervalues()))
5824 atom_arg_map = self._atom_arg_map
5825 atom_arg_map.clear()
5827 for atom in arg.set:
5828 atom_key = (atom, arg.root_config.root)
5829 refs = atom_arg_map.get(atom_key)
5832 atom_arg_map[atom_key] = refs
5836 # Invalidate the package selection cache, since
5837 # arguments influence package selections.
5838 self._highest_pkg_cache.clear()
5839 for trees in self._filtered_trees.itervalues():
5840 trees["porttree"].dbapi._clear_cache()
5842 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5844 Return a list of slot atoms corresponding to installed slots that
5845 differ from the slot of the highest visible match. When
5846 blocker_lookahead is True, slot atoms that would trigger a blocker
5847 conflict are automatically discarded, potentially allowing automatic
5848 uninstallation of older slots when appropriate.
5850 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5851 if highest_pkg is None:
5853 vardb = root_config.trees["vartree"].dbapi
5855 for cpv in vardb.match(atom):
5856 # don't mix new virtuals with old virtuals
5857 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5858 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5860 slots.add(highest_pkg.metadata["SLOT"])
5864 slots.remove(highest_pkg.metadata["SLOT"])
5867 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5868 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5869 if pkg is not None and \
5870 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5871 greedy_pkgs.append(pkg)
5874 if not blocker_lookahead:
5875 return [pkg.slot_atom for pkg in greedy_pkgs]
5878 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5879 for pkg in greedy_pkgs + [highest_pkg]:
5880 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5882 atoms = self._select_atoms(
5883 pkg.root, dep_str, pkg.use.enabled,
5884 parent=pkg, strict=True)
5885 except portage.exception.InvalidDependString:
5887 blocker_atoms = (x for x in atoms if x.blocker)
5888 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5890 if highest_pkg not in blockers:
5893 # filter packages with invalid deps
5894 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5896 # filter packages that conflict with highest_pkg
5897 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5898 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5899 blockers[pkg].findAtomForPackage(highest_pkg))]
5904 # If two packages conflict, discard the lower version.
5905 discard_pkgs = set()
5906 greedy_pkgs.sort(reverse=True)
5907 for i in xrange(len(greedy_pkgs) - 1):
5908 pkg1 = greedy_pkgs[i]
5909 if pkg1 in discard_pkgs:
5911 for j in xrange(i + 1, len(greedy_pkgs)):
5912 pkg2 = greedy_pkgs[j]
5913 if pkg2 in discard_pkgs:
5915 if blockers[pkg1].findAtomForPackage(pkg2) or \
5916 blockers[pkg2].findAtomForPackage(pkg1):
5918 discard_pkgs.add(pkg2)
5920 return [pkg.slot_atom for pkg in greedy_pkgs \
5921 if pkg not in discard_pkgs]
5923 def _select_atoms_from_graph(self, *pargs, **kwargs):
5925 Prefer atoms matching packages that have already been
5926 added to the graph or those that are installed and have
5927 not been scheduled for replacement.
5929 kwargs["trees"] = self._graph_trees
5930 return self._select_atoms_highest_available(*pargs, **kwargs)
5932 def _select_atoms_highest_available(self, root, depstring,
5933 myuse=None, parent=None, strict=True, trees=None, priority=None):
5934 """This will raise InvalidDependString if necessary. If trees is
5935 None then self._filtered_trees is used."""
5936 pkgsettings = self.pkgsettings[root]
5938 trees = self._filtered_trees
5939 if not getattr(priority, "buildtime", False):
5940 # The parent should only be passed to dep_check() for buildtime
5941 # dependencies since that's the only case when it's appropriate
5942 # to trigger the circular dependency avoidance code which uses it.
5943 # It's important not to trigger the same circular dependency
5944 # avoidance code for runtime dependencies since it's not needed
5945 # and it can promote an incorrect package choice.
5949 if parent is not None:
5950 trees[root]["parent"] = parent
5952 portage.dep._dep_check_strict = False
5953 mycheck = portage.dep_check(depstring, None,
5954 pkgsettings, myuse=myuse,
5955 myroot=root, trees=trees)
5957 if parent is not None:
5958 trees[root].pop("parent")
5959 portage.dep._dep_check_strict = True
5961 raise portage.exception.InvalidDependString(mycheck[1])
5962 selected_atoms = mycheck[1]
5963 return selected_atoms
5965 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5966 atom = portage.dep.Atom(atom)
5967 atom_set = InternalPackageSet(initial_atoms=(atom,))
5968 atom_without_use = atom
5970 atom_without_use = portage.dep.remove_slot(atom)
5972 atom_without_use += ":" + atom.slot
5973 atom_without_use = portage.dep.Atom(atom_without_use)
5974 xinfo = '"%s"' % atom
5977 # Discard null/ from failed cpv_expand category expansion.
5978 xinfo = xinfo.replace("null/", "")
5979 masked_packages = []
5981 masked_pkg_instances = set()
5982 missing_licenses = []
5983 have_eapi_mask = False
5984 pkgsettings = self.pkgsettings[root]
5985 implicit_iuse = pkgsettings._get_implicit_iuse()
5986 root_config = self.roots[root]
5987 portdb = self.roots[root].trees["porttree"].dbapi
5988 dbs = self._filtered_trees[root]["dbs"]
5989 for db, pkg_type, built, installed, db_keys in dbs:
5993 if hasattr(db, "xmatch"):
5994 cpv_list = db.xmatch("match-all", atom_without_use)
5996 cpv_list = db.match(atom_without_use)
5999 for cpv in cpv_list:
6000 metadata, mreasons = get_mask_info(root_config, cpv,
6001 pkgsettings, db, pkg_type, built, installed, db_keys)
6002 if metadata is not None:
6003 pkg = Package(built=built, cpv=cpv,
6004 installed=installed, metadata=metadata,
6005 root_config=root_config)
6006 if pkg.cp != atom.cp:
6007 # A cpv can be returned from dbapi.match() as an
6008 # old-style virtual match even in cases when the
6009 # package does not actually PROVIDE the virtual.
6010 # Filter out any such false matches here.
6011 if not atom_set.findAtomForPackage(pkg):
6014 masked_pkg_instances.add(pkg)
6016 missing_use.append(pkg)
6019 masked_packages.append(
6020 (root_config, pkgsettings, cpv, metadata, mreasons))
6022 missing_use_reasons = []
6023 missing_iuse_reasons = []
6024 for pkg in missing_use:
6025 use = pkg.use.enabled
6026 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6027 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6029 for x in atom.use.required:
6030 if iuse_re.match(x) is None:
6031 missing_iuse.append(x)
6034 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6035 missing_iuse_reasons.append((pkg, mreasons))
6037 need_enable = sorted(atom.use.enabled.difference(use))
6038 need_disable = sorted(atom.use.disabled.intersection(use))
6039 if need_enable or need_disable:
6041 changes.extend(colorize("red", "+" + x) \
6042 for x in need_enable)
6043 changes.extend(colorize("blue", "-" + x) \
6044 for x in need_disable)
6045 mreasons.append("Change USE: %s" % " ".join(changes))
6046 missing_use_reasons.append((pkg, mreasons))
6048 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6049 in missing_use_reasons if pkg not in masked_pkg_instances]
6051 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6052 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6054 show_missing_use = False
6055 if unmasked_use_reasons:
6056 # Only show the latest version.
6057 show_missing_use = unmasked_use_reasons[:1]
6058 elif unmasked_iuse_reasons:
6059 if missing_use_reasons:
6060 # All packages with required IUSE are masked,
6061 # so display a normal masking message.
6064 show_missing_use = unmasked_iuse_reasons
6066 if show_missing_use:
6067 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6068 print "!!! One of the following packages is required to complete your request:"
6069 for pkg, mreasons in show_missing_use:
6070 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6072 elif masked_packages:
6074 colorize("BAD", "All ebuilds that could satisfy ") + \
6075 colorize("INFORM", xinfo) + \
6076 colorize("BAD", " have been masked.")
6077 print "!!! One of the following masked packages is required to complete your request:"
6078 have_eapi_mask = show_masked_packages(masked_packages)
6081 msg = ("The current version of portage supports " + \
6082 "EAPI '%s'. You must upgrade to a newer version" + \
6083 " of portage before EAPI masked packages can" + \
6084 " be installed.") % portage.const.EAPI
6085 from textwrap import wrap
6086 for line in wrap(msg, 75):
6091 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6093 # Show parent nodes and the argument that pulled them in.
6094 traversed_nodes = set()
6097 while node is not None:
6098 traversed_nodes.add(node)
6099 msg.append('(dependency required by "%s" [%s])' % \
6100 (colorize('INFORM', str(node.cpv)), node.type_name))
6101 # When traversing to parents, prefer arguments over packages
6102 # since arguments are root nodes. Never traverse the same
6103 # package twice, in order to prevent an infinite loop.
6104 selected_parent = None
6105 for parent in self.digraph.parent_nodes(node):
6106 if isinstance(parent, DependencyArg):
6107 msg.append('(dependency required by "%s" [argument])' % \
6108 (colorize('INFORM', str(parent))))
6109 selected_parent = None
6111 if parent not in traversed_nodes:
6112 selected_parent = parent
6113 node = selected_parent
6119 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6120 cache_key = (root, atom, onlydeps)
6121 ret = self._highest_pkg_cache.get(cache_key)
6124 if pkg and not existing:
6125 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6126 if existing and existing == pkg:
6127 # Update the cache to reflect that the
6128 # package has been added to the graph.
6130 self._highest_pkg_cache[cache_key] = ret
6132 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6133 self._highest_pkg_cache[cache_key] = ret
6136 settings = pkg.root_config.settings
6137 if visible(settings, pkg) and not (pkg.installed and \
6138 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6139 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6142 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6143 root_config = self.roots[root]
6144 pkgsettings = self.pkgsettings[root]
6145 dbs = self._filtered_trees[root]["dbs"]
6146 vardb = self.roots[root].trees["vartree"].dbapi
6147 portdb = self.roots[root].trees["porttree"].dbapi
6148 # List of acceptable packages, ordered by type preference.
6149 matched_packages = []
6150 highest_version = None
6151 if not isinstance(atom, portage.dep.Atom):
6152 atom = portage.dep.Atom(atom)
6154 atom_set = InternalPackageSet(initial_atoms=(atom,))
6155 existing_node = None
6157 usepkgonly = "--usepkgonly" in self.myopts
6158 empty = "empty" in self.myparams
6159 selective = "selective" in self.myparams
6161 noreplace = "--noreplace" in self.myopts
6162 # Behavior of the "selective" parameter depends on
6163 # whether or not a package matches an argument atom.
6164 # If an installed package provides an old-style
6165 # virtual that is no longer provided by an available
6166 # package, the installed package may match an argument
6167 # atom even though none of the available packages do.
6168 # Therefore, "selective" logic does not consider
6169 # whether or not an installed package matches an
6170 # argument atom. It only considers whether or not
6171 # available packages match argument atoms, which is
6172 # represented by the found_available_arg flag.
6173 found_available_arg = False
6174 for find_existing_node in True, False:
6177 for db, pkg_type, built, installed, db_keys in dbs:
6180 if installed and not find_existing_node:
6181 want_reinstall = reinstall or empty or \
6182 (found_available_arg and not selective)
6183 if want_reinstall and matched_packages:
6185 if hasattr(db, "xmatch"):
6186 cpv_list = db.xmatch("match-all", atom)
6188 cpv_list = db.match(atom)
6190 # USE=multislot can make an installed package appear as if
6191 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6192 # won't do any good as long as USE=multislot is enabled since
6193 # the newly built package still won't have the expected slot.
6194 # Therefore, assume that such SLOT dependencies are already
6195 # satisfied rather than forcing a rebuild.
6196 if installed and not cpv_list and atom.slot:
6197 for cpv in db.match(atom.cp):
6198 slot_available = False
6199 for other_db, other_type, other_built, \
6200 other_installed, other_keys in dbs:
6203 other_db.aux_get(cpv, ["SLOT"])[0]:
6204 slot_available = True
6208 if not slot_available:
6210 inst_pkg = self._pkg(cpv, "installed",
6211 root_config, installed=installed)
6212 # Remove the slot from the atom and verify that
6213 # the package matches the resulting atom.
6214 atom_without_slot = portage.dep.remove_slot(atom)
6216 atom_without_slot += str(atom.use)
6217 atom_without_slot = portage.dep.Atom(atom_without_slot)
6218 if portage.match_from_list(
6219 atom_without_slot, [inst_pkg]):
6220 cpv_list = [inst_pkg.cpv]
6225 pkg_status = "merge"
6226 if installed or onlydeps:
6227 pkg_status = "nomerge"
6230 for cpv in cpv_list:
6231 # Make --noreplace take precedence over --newuse.
6232 if not installed and noreplace and \
6233 cpv in vardb.match(atom):
6234 # If the installed version is masked, it may
6235 # be necessary to look at lower versions,
6236 # in case there is a visible downgrade.
6238 reinstall_for_flags = None
6239 cache_key = (pkg_type, root, cpv, pkg_status)
6240 calculated_use = True
6241 pkg = self._pkg_cache.get(cache_key)
6243 calculated_use = False
6245 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6248 pkg = Package(built=built, cpv=cpv,
6249 installed=installed, metadata=metadata,
6250 onlydeps=onlydeps, root_config=root_config,
6252 metadata = pkg.metadata
6254 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6255 if not built and ("?" in metadata["LICENSE"] or \
6256 "?" in metadata["PROVIDE"]):
6257 # This is avoided whenever possible because
6258 # it's expensive. It only needs to be done here
6259 # if it has an effect on visibility.
6260 pkgsettings.setcpv(pkg)
6261 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6262 calculated_use = True
6263 self._pkg_cache[pkg] = pkg
6265 if not installed or (built and matched_packages):
6266 # Only enforce visibility on installed packages
6267 # if there is at least one other visible package
6268 # available. By filtering installed masked packages
6269 # here, packages that have been masked since they
6270 # were installed can be automatically downgraded
6271 # to an unmasked version.
6273 if not visible(pkgsettings, pkg):
6275 except portage.exception.InvalidDependString:
6279 # Enable upgrade or downgrade to a version
6280 # with visible KEYWORDS when the installed
6281 # version is masked by KEYWORDS, but never
6282 # reinstall the same exact version only due
6283 # to a KEYWORDS mask.
6284 if built and matched_packages:
6286 different_version = None
6287 for avail_pkg in matched_packages:
6288 if not portage.dep.cpvequal(
6289 pkg.cpv, avail_pkg.cpv):
6290 different_version = avail_pkg
6292 if different_version is not None:
6295 pkgsettings._getMissingKeywords(
6296 pkg.cpv, pkg.metadata):
6299 # If the ebuild no longer exists or it's
6300 # keywords have been dropped, reject built
6301 # instances (installed or binary).
6302 # If --usepkgonly is enabled, assume that
6303 # the ebuild status should be ignored.
6307 pkg.cpv, "ebuild", root_config)
6308 except portage.exception.PackageNotFound:
6311 if not visible(pkgsettings, pkg_eb):
6314 if not pkg.built and not calculated_use:
6315 # This is avoided whenever possible because
6317 pkgsettings.setcpv(pkg)
6318 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6320 if pkg.cp != atom.cp:
6321 # A cpv can be returned from dbapi.match() as an
6322 # old-style virtual match even in cases when the
6323 # package does not actually PROVIDE the virtual.
6324 # Filter out any such false matches here.
6325 if not atom_set.findAtomForPackage(pkg):
6329 if root == self.target_root:
6331 # Ebuild USE must have been calculated prior
6332 # to this point, in case atoms have USE deps.
6333 myarg = self._iter_atoms_for_pkg(pkg).next()
6334 except StopIteration:
6336 except portage.exception.InvalidDependString:
6338 # masked by corruption
6340 if not installed and myarg:
6341 found_available_arg = True
6343 if atom.use and not pkg.built:
6344 use = pkg.use.enabled
6345 if atom.use.enabled.difference(use):
6347 if atom.use.disabled.intersection(use):
6349 if pkg.cp == atom_cp:
6350 if highest_version is None:
6351 highest_version = pkg
6352 elif pkg > highest_version:
6353 highest_version = pkg
6354 # At this point, we've found the highest visible
6355 # match from the current repo. Any lower versions
6356 # from this repo are ignored, so this so the loop
6357 # will always end with a break statement below
6359 if find_existing_node:
6360 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6363 if portage.dep.match_from_list(atom, [e_pkg]):
6364 if highest_version and \
6365 e_pkg.cp == atom_cp and \
6366 e_pkg < highest_version and \
6367 e_pkg.slot_atom != highest_version.slot_atom:
6368 # There is a higher version available in a
6369 # different slot, so this existing node is
6373 matched_packages.append(e_pkg)
6374 existing_node = e_pkg
6376 # Compare built package to current config and
6377 # reject the built package if necessary.
6378 if built and not installed and \
6379 ("--newuse" in self.myopts or \
6380 "--reinstall" in self.myopts):
6381 iuses = pkg.iuse.all
6382 old_use = pkg.use.enabled
6384 pkgsettings.setcpv(myeb)
6386 pkgsettings.setcpv(pkg)
6387 now_use = pkgsettings["PORTAGE_USE"].split()
6388 forced_flags = set()
6389 forced_flags.update(pkgsettings.useforce)
6390 forced_flags.update(pkgsettings.usemask)
6392 if myeb and not usepkgonly:
6393 cur_iuse = myeb.iuse.all
6394 if self._reinstall_for_flags(forced_flags,
6398 # Compare current config to installed package
6399 # and do not reinstall if possible.
6400 if not installed and \
6401 ("--newuse" in self.myopts or \
6402 "--reinstall" in self.myopts) and \
6403 cpv in vardb.match(atom):
6404 pkgsettings.setcpv(pkg)
6405 forced_flags = set()
6406 forced_flags.update(pkgsettings.useforce)
6407 forced_flags.update(pkgsettings.usemask)
6408 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6409 old_iuse = set(filter_iuse_defaults(
6410 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6411 cur_use = pkg.use.enabled
6412 cur_iuse = pkg.iuse.all
6413 reinstall_for_flags = \
6414 self._reinstall_for_flags(
6415 forced_flags, old_use, old_iuse,
6417 if reinstall_for_flags:
6421 matched_packages.append(pkg)
6422 if reinstall_for_flags:
6423 self._reinstall_nodes[pkg] = \
6427 if not matched_packages:
6430 if "--debug" in self.myopts:
6431 for pkg in matched_packages:
6432 portage.writemsg("%s %s\n" % \
6433 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6435 # Filter out any old-style virtual matches if they are
6436 # mixed with new-style virtual matches.
6437 cp = portage.dep_getkey(atom)
6438 if len(matched_packages) > 1 and \
6439 "virtual" == portage.catsplit(cp)[0]:
6440 for pkg in matched_packages:
6443 # Got a new-style virtual, so filter
6444 # out any old-style virtuals.
6445 matched_packages = [pkg for pkg in matched_packages \
6449 if len(matched_packages) > 1:
6450 bestmatch = portage.best(
6451 [pkg.cpv for pkg in matched_packages])
6452 matched_packages = [pkg for pkg in matched_packages \
6453 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6455 # ordered by type preference ("ebuild" type is the last resort)
6456 return matched_packages[-1], existing_node
6458 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6460 Select packages that have already been added to the graph or
6461 those that are installed and have not been scheduled for
6464 graph_db = self._graph_trees[root]["porttree"].dbapi
6465 matches = graph_db.match_pkgs(atom)
6468 pkg = matches[-1] # highest match
6469 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6470 return pkg, in_graph
6472 def _complete_graph(self):
6474 Add any deep dependencies of required sets (args, system, world) that
6475 have not been pulled into the graph yet. This ensures that the graph
6476 is consistent such that initially satisfied deep dependencies are not
6477 broken in the new graph. Initially unsatisfied dependencies are
6478 irrelevant since we only want to avoid breaking dependencies that are
6481 Since this method can consume enough time to disturb users, it is
6482 currently only enabled by the --complete-graph option.
6484 if "--buildpkgonly" in self.myopts or \
6485 "recurse" not in self.myparams:
6488 if "complete" not in self.myparams:
6489 # Skip this to avoid consuming enough time to disturb users.
6492 # Put the depgraph into a mode that causes it to only
6493 # select packages that have already been added to the
6494 # graph or those that are installed and have not been
6495 # scheduled for replacement. Also, toggle the "deep"
6496 # parameter so that all dependencies are traversed and
6498 self._select_atoms = self._select_atoms_from_graph
6499 self._select_package = self._select_pkg_from_graph
6500 already_deep = "deep" in self.myparams
6501 if not already_deep:
6502 self.myparams.add("deep")
6504 for root in self.roots:
6505 required_set_names = self._required_set_names.copy()
6506 if root == self.target_root and \
6507 (already_deep or "empty" in self.myparams):
6508 required_set_names.difference_update(self._sets)
6509 if not required_set_names and not self._ignored_deps:
6511 root_config = self.roots[root]
6512 setconfig = root_config.setconfig
6514 # Reuse existing SetArg instances when available.
6515 for arg in self.digraph.root_nodes():
6516 if not isinstance(arg, SetArg):
6518 if arg.root_config != root_config:
6520 if arg.name in required_set_names:
6522 required_set_names.remove(arg.name)
6523 # Create new SetArg instances only when necessary.
6524 for s in required_set_names:
6525 expanded_set = InternalPackageSet(
6526 initial_atoms=setconfig.getSetAtoms(s))
6527 atom = SETPREFIX + s
6528 args.append(SetArg(arg=atom, set=expanded_set,
6529 root_config=root_config))
6530 vardb = root_config.trees["vartree"].dbapi
6532 for atom in arg.set:
6533 self._dep_stack.append(
6534 Dependency(atom=atom, root=root, parent=arg))
6535 if self._ignored_deps:
6536 self._dep_stack.extend(self._ignored_deps)
6537 self._ignored_deps = []
6538 if not self._create_graph(allow_unsatisfied=True):
6540 # Check the unsatisfied deps to see if any initially satisfied deps
6541 # will become unsatisfied due to an upgrade. Initially unsatisfied
6542 # deps are irrelevant since we only want to avoid breaking deps
6543 # that are initially satisfied.
6544 while self._unsatisfied_deps:
6545 dep = self._unsatisfied_deps.pop()
6546 matches = vardb.match_pkgs(dep.atom)
6548 self._initially_unsatisfied_deps.append(dep)
6550 # An scheduled installation broke a deep dependency.
6551 # Add the installed package to the graph so that it
6552 # will be appropriately reported as a slot collision
6553 # (possibly solvable via backtracking).
6554 pkg = matches[-1] # highest match
6555 if not self._add_pkg(pkg, dep):
6557 if not self._create_graph(allow_unsatisfied=True):
6561 def _pkg(self, cpv, type_name, root_config, installed=False):
6563 Get a package instance from the cache, or create a new
6564 one if necessary. Raises KeyError from aux_get if it
6565 failures for some reason (package does not exist or is
6570 operation = "nomerge"
6571 pkg = self._pkg_cache.get(
6572 (type_name, root_config.root, cpv, operation))
6574 tree_type = self.pkg_tree_map[type_name]
6575 db = root_config.trees[tree_type].dbapi
6576 db_keys = list(self._trees_orig[root_config.root][
6577 tree_type].dbapi._aux_cache_keys)
6579 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6581 raise portage.exception.PackageNotFound(cpv)
6582 pkg = Package(cpv=cpv, metadata=metadata,
6583 root_config=root_config, installed=installed)
6584 if type_name == "ebuild":
6585 settings = self.pkgsettings[root_config.root]
6586 settings.setcpv(pkg)
6587 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6588 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6589 self._pkg_cache[pkg] = pkg
6592 def validate_blockers(self):
6593 """Remove any blockers from the digraph that do not match any of the
6594 packages within the graph. If necessary, create hard deps to ensure
6595 correct merge order such that mutually blocking packages are never
6596 installed simultaneously."""
6598 if "--buildpkgonly" in self.myopts or \
6599 "--nodeps" in self.myopts:
6602 #if "deep" in self.myparams:
6604 # Pull in blockers from all installed packages that haven't already
6605 # been pulled into the depgraph. This is not enabled by default
6606 # due to the performance penalty that is incurred by all the
6607 # additional dep_check calls that are required.
6609 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6610 for myroot in self.trees:
6611 vardb = self.trees[myroot]["vartree"].dbapi
6612 portdb = self.trees[myroot]["porttree"].dbapi
6613 pkgsettings = self.pkgsettings[myroot]
6614 final_db = self.mydbapi[myroot]
6616 blocker_cache = BlockerCache(myroot, vardb)
6617 stale_cache = set(blocker_cache)
6620 stale_cache.discard(cpv)
6621 pkg_in_graph = self.digraph.contains(pkg)
6623 # Check for masked installed packages. Only warn about
6624 # packages that are in the graph in order to avoid warning
6625 # about those that will be automatically uninstalled during
6626 # the merge process or by --depclean.
6628 if pkg_in_graph and not visible(pkgsettings, pkg):
6629 self._masked_installed.add(pkg)
6631 blocker_atoms = None
6637 self._blocker_parents.child_nodes(pkg))
6642 self._irrelevant_blockers.child_nodes(pkg))
6645 if blockers is not None:
6646 blockers = set(str(blocker.atom) \
6647 for blocker in blockers)
6649 # If this node has any blockers, create a "nomerge"
6650 # node for it so that they can be enforced.
6651 self.spinner.update()
6652 blocker_data = blocker_cache.get(cpv)
6653 if blocker_data is not None and \
6654 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6657 # If blocker data from the graph is available, use
6658 # it to validate the cache and update the cache if
6660 if blocker_data is not None and \
6661 blockers is not None:
6662 if not blockers.symmetric_difference(
6663 blocker_data.atoms):
6667 if blocker_data is None and \
6668 blockers is not None:
6669 # Re-use the blockers from the graph.
6670 blocker_atoms = sorted(blockers)
6671 counter = long(pkg.metadata["COUNTER"])
6673 blocker_cache.BlockerData(counter, blocker_atoms)
6674 blocker_cache[pkg.cpv] = blocker_data
6678 blocker_atoms = blocker_data.atoms
6680 # Use aux_get() to trigger FakeVartree global
6681 # updates on *DEPEND when appropriate.
6682 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6683 # It is crucial to pass in final_db here in order to
6684 # optimize dep_check calls by eliminating atoms via
6685 # dep_wordreduce and dep_eval calls.
6687 portage.dep._dep_check_strict = False
6689 success, atoms = portage.dep_check(depstr,
6690 final_db, pkgsettings, myuse=pkg.use.enabled,
6691 trees=self._graph_trees, myroot=myroot)
6692 except Exception, e:
6693 if isinstance(e, SystemExit):
6695 # This is helpful, for example, if a ValueError
6696 # is thrown from cpv_expand due to multiple
6697 # matches (this can happen if an atom lacks a
6699 show_invalid_depstring_notice(
6700 pkg, depstr, str(e))
6704 portage.dep._dep_check_strict = True
6706 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6707 if replacement_pkg and \
6708 replacement_pkg[0].operation == "merge":
6709 # This package is being replaced anyway, so
6710 # ignore invalid dependencies so as not to
6711 # annoy the user too much (otherwise they'd be
6712 # forced to manually unmerge it first).
6714 show_invalid_depstring_notice(pkg, depstr, atoms)
6716 blocker_atoms = [myatom for myatom in atoms \
6717 if myatom.startswith("!")]
6718 blocker_atoms.sort()
6719 counter = long(pkg.metadata["COUNTER"])
6720 blocker_cache[cpv] = \
6721 blocker_cache.BlockerData(counter, blocker_atoms)
6724 for atom in blocker_atoms:
6725 blocker = Blocker(atom=portage.dep.Atom(atom),
6726 eapi=pkg.metadata["EAPI"], root=myroot)
6727 self._blocker_parents.add(blocker, pkg)
6728 except portage.exception.InvalidAtom, e:
6729 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6730 show_invalid_depstring_notice(
6731 pkg, depstr, "Invalid Atom: %s" % (e,))
6733 for cpv in stale_cache:
6734 del blocker_cache[cpv]
6735 blocker_cache.flush()
6738 # Discard any "uninstall" tasks scheduled by previous calls
6739 # to this method, since those tasks may not make sense given
6740 # the current graph state.
6741 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6742 if previous_uninstall_tasks:
6743 self._blocker_uninstalls = digraph()
6744 self.digraph.difference_update(previous_uninstall_tasks)
6746 for blocker in self._blocker_parents.leaf_nodes():
6747 self.spinner.update()
6748 root_config = self.roots[blocker.root]
6749 virtuals = root_config.settings.getvirtuals()
6750 myroot = blocker.root
6751 initial_db = self.trees[myroot]["vartree"].dbapi
6752 final_db = self.mydbapi[myroot]
6754 provider_virtual = False
6755 if blocker.cp in virtuals and \
6756 not self._have_new_virt(blocker.root, blocker.cp):
6757 provider_virtual = True
6759 # Use this to check PROVIDE for each matched package
6761 atom_set = InternalPackageSet(
6762 initial_atoms=[blocker.atom])
6764 if provider_virtual:
6766 for provider_entry in virtuals[blocker.cp]:
6768 portage.dep_getkey(provider_entry)
6769 atoms.append(blocker.atom.replace(
6770 blocker.cp, provider_cp))
6772 atoms = [blocker.atom]
6774 blocked_initial = set()
6776 for pkg in initial_db.match_pkgs(atom):
6777 if atom_set.findAtomForPackage(pkg):
6778 blocked_initial.add(pkg)
6780 blocked_final = set()
6782 for pkg in final_db.match_pkgs(atom):
6783 if atom_set.findAtomForPackage(pkg):
6784 blocked_final.add(pkg)
6786 if not blocked_initial and not blocked_final:
6787 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6788 self._blocker_parents.remove(blocker)
6789 # Discard any parents that don't have any more blockers.
6790 for pkg in parent_pkgs:
6791 self._irrelevant_blockers.add(blocker, pkg)
6792 if not self._blocker_parents.child_nodes(pkg):
6793 self._blocker_parents.remove(pkg)
6795 for parent in self._blocker_parents.parent_nodes(blocker):
6796 unresolved_blocks = False
6797 depends_on_order = set()
6798 for pkg in blocked_initial:
6799 if pkg.slot_atom == parent.slot_atom:
6800 # TODO: Support blocks within slots in cases where it
6801 # might make sense. For example, a new version might
6802 # require that the old version be uninstalled at build
6805 if parent.installed:
6806 # Two currently installed packages conflict with
6807 # eachother. Ignore this case since the damage
6808 # is already done and this would be likely to
6809 # confuse users if displayed like a normal blocker.
6812 self._blocked_pkgs.add(pkg, blocker)
6814 if parent.operation == "merge":
6815 # Maybe the blocked package can be replaced or simply
6816 # unmerged to resolve this block.
6817 depends_on_order.add((pkg, parent))
6819 # None of the above blocker resolutions techniques apply,
6820 # so apparently this one is unresolvable.
6821 unresolved_blocks = True
6822 for pkg in blocked_final:
6823 if pkg.slot_atom == parent.slot_atom:
6824 # TODO: Support blocks within slots.
6826 if parent.operation == "nomerge" and \
6827 pkg.operation == "nomerge":
6828 # This blocker will be handled the next time that a
6829 # merge of either package is triggered.
6832 self._blocked_pkgs.add(pkg, blocker)
6834 # Maybe the blocking package can be
6835 # unmerged to resolve this block.
6836 if parent.operation == "merge" and pkg.installed:
6837 depends_on_order.add((pkg, parent))
6839 elif parent.operation == "nomerge":
6840 depends_on_order.add((parent, pkg))
6842 # None of the above blocker resolutions techniques apply,
6843 # so apparently this one is unresolvable.
6844 unresolved_blocks = True
6846 # Make sure we don't unmerge any package that have been pulled
6848 if not unresolved_blocks and depends_on_order:
6849 for inst_pkg, inst_task in depends_on_order:
6850 if self.digraph.contains(inst_pkg) and \
6851 self.digraph.parent_nodes(inst_pkg):
6852 unresolved_blocks = True
6855 if not unresolved_blocks and depends_on_order:
6856 for inst_pkg, inst_task in depends_on_order:
6857 uninst_task = Package(built=inst_pkg.built,
6858 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6859 metadata=inst_pkg.metadata,
6860 operation="uninstall",
6861 root_config=inst_pkg.root_config,
6862 type_name=inst_pkg.type_name)
6863 self._pkg_cache[uninst_task] = uninst_task
6864 # Enforce correct merge order with a hard dep.
6865 self.digraph.addnode(uninst_task, inst_task,
6866 priority=BlockerDepPriority.instance)
6867 # Count references to this blocker so that it can be
6868 # invalidated after nodes referencing it have been
6870 self._blocker_uninstalls.addnode(uninst_task, blocker)
6871 if not unresolved_blocks and not depends_on_order:
6872 self._irrelevant_blockers.add(blocker, parent)
6873 self._blocker_parents.remove_edge(blocker, parent)
6874 if not self._blocker_parents.parent_nodes(blocker):
6875 self._blocker_parents.remove(blocker)
6876 if not self._blocker_parents.child_nodes(parent):
6877 self._blocker_parents.remove(parent)
6878 if unresolved_blocks:
6879 self._unsolvable_blockers.add(blocker, parent)
6883 def _accept_blocker_conflicts(self):
6885 for x in ("--buildpkgonly", "--fetchonly",
6886 "--fetch-all-uri", "--nodeps"):
6887 if x in self.myopts:
6892 def _merge_order_bias(self, mygraph):
6894 For optimal leaf node selection, promote deep system runtime deps and
6895 order nodes from highest to lowest overall reference count.
6899 for node in mygraph.order:
6900 node_info[node] = len(mygraph.parent_nodes(node))
6901 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6903 def cmp_merge_preference(node1, node2):
6905 if node1.operation == 'uninstall':
6906 if node2.operation == 'uninstall':
6910 if node2.operation == 'uninstall':
6911 if node1.operation == 'uninstall':
6915 node1_sys = node1 in deep_system_deps
6916 node2_sys = node2 in deep_system_deps
6917 if node1_sys != node2_sys:
6922 return node_info[node2] - node_info[node1]
6924 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6926 def altlist(self, reversed=False):
6928 while self._serialized_tasks_cache is None:
6929 self._resolve_conflicts()
6931 self._serialized_tasks_cache, self._scheduler_graph = \
6932 self._serialize_tasks()
6933 except self._serialize_tasks_retry:
6936 retlist = self._serialized_tasks_cache[:]
6941 def schedulerGraph(self):
6943 The scheduler graph is identical to the normal one except that
6944 uninstall edges are reversed in specific cases that require
6945 conflicting packages to be temporarily installed simultaneously.
6946 This is intended for use by the Scheduler in it's parallelization
6947 logic. It ensures that temporary simultaneous installation of
6948 conflicting packages is avoided when appropriate (especially for
6949 !!atom blockers), but allowed in specific cases that require it.
6951 Note that this method calls break_refs() which alters the state of
6952 internal Package instances such that this depgraph instance should
6953 not be used to perform any more calculations.
6955 if self._scheduler_graph is None:
6957 self.break_refs(self._scheduler_graph.order)
6958 return self._scheduler_graph
6960 def break_refs(self, nodes):
6962 Take a mergelist like that returned from self.altlist() and
6963 break any references that lead back to the depgraph. This is
6964 useful if you want to hold references to packages without
6965 also holding the depgraph on the heap.
6968 if hasattr(node, "root_config"):
6969 # The FakeVartree references the _package_cache which
6970 # references the depgraph. So that Package instances don't
6971 # hold the depgraph and FakeVartree on the heap, replace
6972 # the RootConfig that references the FakeVartree with the
6973 # original RootConfig instance which references the actual
6975 node.root_config = \
6976 self._trees_orig[node.root_config.root]["root_config"]
6978 def _resolve_conflicts(self):
6979 if not self._complete_graph():
6980 raise self._unknown_internal_error()
6982 if not self.validate_blockers():
6983 raise self._unknown_internal_error()
6985 if self._slot_collision_info:
6986 self._process_slot_conflicts()
6988 def _serialize_tasks(self):
6990 if "--debug" in self.myopts:
6991 writemsg("\ndigraph:\n\n", noiselevel=-1)
6992 self.digraph.debug_print()
6993 writemsg("\n", noiselevel=-1)
6995 scheduler_graph = self.digraph.copy()
6996 mygraph=self.digraph.copy()
6997 # Prune "nomerge" root nodes if nothing depends on them, since
6998 # otherwise they slow down merge order calculation. Don't remove
6999 # non-root nodes since they help optimize merge order in some cases
7000 # such as revdep-rebuild.
7001 removed_nodes = set()
7003 for node in mygraph.root_nodes():
7004 if not isinstance(node, Package) or \
7005 node.installed or node.onlydeps:
7006 removed_nodes.add(node)
7008 self.spinner.update()
7009 mygraph.difference_update(removed_nodes)
7010 if not removed_nodes:
7012 removed_nodes.clear()
7013 self._merge_order_bias(mygraph)
7014 def cmp_circular_bias(n1, n2):
7016 RDEPEND is stronger than PDEPEND and this function
7017 measures such a strength bias within a circular
7018 dependency relationship.
7020 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7021 ignore_priority=priority_range.ignore_medium_soft)
7022 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7023 ignore_priority=priority_range.ignore_medium_soft)
7024 if n1_n2_medium == n2_n1_medium:
7029 myblocker_uninstalls = self._blocker_uninstalls.copy()
7031 # Contains uninstall tasks that have been scheduled to
7032 # occur after overlapping blockers have been installed.
7033 scheduled_uninstalls = set()
7034 # Contains any Uninstall tasks that have been ignored
7035 # in order to avoid the circular deps code path. These
7036 # correspond to blocker conflicts that could not be
7038 ignored_uninstall_tasks = set()
7039 have_uninstall_task = False
7040 complete = "complete" in self.myparams
7043 def get_nodes(**kwargs):
7045 Returns leaf nodes excluding Uninstall instances
7046 since those should be executed as late as possible.
7048 return [node for node in mygraph.leaf_nodes(**kwargs) \
7049 if isinstance(node, Package) and \
7050 (node.operation != "uninstall" or \
7051 node in scheduled_uninstalls)]
7053 # sys-apps/portage needs special treatment if ROOT="/"
7054 running_root = self._running_root.root
7055 from portage.const import PORTAGE_PACKAGE_ATOM
7056 runtime_deps = InternalPackageSet(
7057 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7058 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7059 PORTAGE_PACKAGE_ATOM)
7060 replacement_portage = self.mydbapi[running_root].match_pkgs(
7061 PORTAGE_PACKAGE_ATOM)
7064 running_portage = running_portage[0]
7066 running_portage = None
7068 if replacement_portage:
7069 replacement_portage = replacement_portage[0]
7071 replacement_portage = None
7073 if replacement_portage == running_portage:
7074 replacement_portage = None
7076 if replacement_portage is not None:
7077 # update from running_portage to replacement_portage asap
7078 asap_nodes.append(replacement_portage)
7080 if running_portage is not None:
7082 portage_rdepend = self._select_atoms_highest_available(
7083 running_root, running_portage.metadata["RDEPEND"],
7084 myuse=running_portage.use.enabled,
7085 parent=running_portage, strict=False)
7086 except portage.exception.InvalidDependString, e:
7087 portage.writemsg("!!! Invalid RDEPEND in " + \
7088 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7089 (running_root, running_portage.cpv, e), noiselevel=-1)
7091 portage_rdepend = []
7092 runtime_deps.update(atom for atom in portage_rdepend \
7093 if not atom.startswith("!"))
7095 def gather_deps(ignore_priority, mergeable_nodes,
7096 selected_nodes, node):
7098 Recursively gather a group of nodes that RDEPEND on
7099 eachother. This ensures that they are merged as a group
7100 and get their RDEPENDs satisfied as soon as possible.
7102 if node in selected_nodes:
7104 if node not in mergeable_nodes:
7106 if node == replacement_portage and \
7107 mygraph.child_nodes(node,
7108 ignore_priority=priority_range.ignore_medium_soft):
7109 # Make sure that portage always has all of it's
7110 # RDEPENDs installed first.
7112 selected_nodes.add(node)
7113 for child in mygraph.child_nodes(node,
7114 ignore_priority=ignore_priority):
7115 if not gather_deps(ignore_priority,
7116 mergeable_nodes, selected_nodes, child):
7120 def ignore_uninst_or_med(priority):
7121 if priority is BlockerDepPriority.instance:
7123 return priority_range.ignore_medium(priority)
7125 def ignore_uninst_or_med_soft(priority):
7126 if priority is BlockerDepPriority.instance:
7128 return priority_range.ignore_medium_soft(priority)
7130 tree_mode = "--tree" in self.myopts
7131 # Tracks whether or not the current iteration should prefer asap_nodes
7132 # if available. This is set to False when the previous iteration
7133 # failed to select any nodes. It is reset whenever nodes are
7134 # successfully selected.
7137 # Controls whether or not the current iteration should drop edges that
7138 # are "satisfied" by installed packages, in order to solve circular
7139 # dependencies. The deep runtime dependencies of installed packages are
7140 # not checked in this case (bug #199856), so it must be avoided
7141 # whenever possible.
7142 drop_satisfied = False
7144 # State of variables for successive iterations that loosen the
7145 # criteria for node selection.
7147 # iteration prefer_asap drop_satisfied
7152 # If no nodes are selected on the last iteration, it is due to
7153 # unresolved blockers or circular dependencies.
7155 while not mygraph.empty():
7156 self.spinner.update()
7157 selected_nodes = None
7158 ignore_priority = None
7159 if drop_satisfied or (prefer_asap and asap_nodes):
7160 priority_range = DepPrioritySatisfiedRange
7162 priority_range = DepPriorityNormalRange
7163 if prefer_asap and asap_nodes:
7164 # ASAP nodes are merged before their soft deps. Go ahead and
7165 # select root nodes here if necessary, since it's typical for
7166 # the parent to have been removed from the graph already.
7167 asap_nodes = [node for node in asap_nodes \
7168 if mygraph.contains(node)]
7169 for node in asap_nodes:
7170 if not mygraph.child_nodes(node,
7171 ignore_priority=priority_range.ignore_soft):
7172 selected_nodes = [node]
7173 asap_nodes.remove(node)
7175 if not selected_nodes and \
7176 not (prefer_asap and asap_nodes):
7177 for i in xrange(priority_range.NONE,
7178 priority_range.MEDIUM_SOFT + 1):
7179 ignore_priority = priority_range.ignore_priority[i]
7180 nodes = get_nodes(ignore_priority=ignore_priority)
7182 # If there is a mix of uninstall nodes with other
7183 # types, save the uninstall nodes for later since
7184 # sometimes a merge node will render an uninstall
7185 # node unnecessary (due to occupying the same slot),
7186 # and we want to avoid executing a separate uninstall
7187 # task in that case.
7189 good_uninstalls = []
7190 with_some_uninstalls_excluded = []
7192 if node.operation == "uninstall":
7193 slot_node = self.mydbapi[node.root
7194 ].match_pkgs(node.slot_atom)
7196 slot_node[0].operation == "merge":
7198 good_uninstalls.append(node)
7199 with_some_uninstalls_excluded.append(node)
7201 nodes = good_uninstalls
7202 elif with_some_uninstalls_excluded:
7203 nodes = with_some_uninstalls_excluded
7207 if ignore_priority is None and not tree_mode:
7208 # Greedily pop all of these nodes since no
7209 # relationship has been ignored. This optimization
7210 # destroys --tree output, so it's disabled in tree
7212 selected_nodes = nodes
7214 # For optimal merge order:
7215 # * Only pop one node.
7216 # * Removing a root node (node without a parent)
7217 # will not produce a leaf node, so avoid it.
7218 # * It's normal for a selected uninstall to be a
7219 # root node, so don't check them for parents.
7221 if node.operation == "uninstall" or \
7222 mygraph.parent_nodes(node):
7223 selected_nodes = [node]
7229 if not selected_nodes:
7230 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7232 mergeable_nodes = set(nodes)
7233 if prefer_asap and asap_nodes:
7235 for i in xrange(priority_range.SOFT,
7236 priority_range.MEDIUM_SOFT + 1):
7237 ignore_priority = priority_range.ignore_priority[i]
7239 if not mygraph.parent_nodes(node):
7241 selected_nodes = set()
7242 if gather_deps(ignore_priority,
7243 mergeable_nodes, selected_nodes, node):
7246 selected_nodes = None
7250 if prefer_asap and asap_nodes and not selected_nodes:
7251 # We failed to find any asap nodes to merge, so ignore
7252 # them for the next iteration.
7256 if selected_nodes and ignore_priority is not None:
7257 # Try to merge ignored medium_soft deps as soon as possible
7258 # if they're not satisfied by installed packages.
7259 for node in selected_nodes:
7260 children = set(mygraph.child_nodes(node))
7261 soft = children.difference(
7262 mygraph.child_nodes(node,
7263 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7264 medium_soft = children.difference(
7265 mygraph.child_nodes(node,
7267 DepPrioritySatisfiedRange.ignore_medium_soft))
7268 medium_soft.difference_update(soft)
7269 for child in medium_soft:
7270 if child in selected_nodes:
7272 if child in asap_nodes:
7274 asap_nodes.append(child)
7276 if selected_nodes and len(selected_nodes) > 1:
7277 if not isinstance(selected_nodes, list):
7278 selected_nodes = list(selected_nodes)
7279 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7281 if not selected_nodes and not myblocker_uninstalls.is_empty():
7282 # An Uninstall task needs to be executed in order to
7283 # avoid conflict if possible.
7286 priority_range = DepPrioritySatisfiedRange
7288 priority_range = DepPriorityNormalRange
7290 mergeable_nodes = get_nodes(
7291 ignore_priority=ignore_uninst_or_med)
7293 min_parent_deps = None
7295 for task in myblocker_uninstalls.leaf_nodes():
7296 # Do some sanity checks so that system or world packages
7297 # don't get uninstalled inappropriately here (only really
7298 # necessary when --complete-graph has not been enabled).
7300 if task in ignored_uninstall_tasks:
7303 if task in scheduled_uninstalls:
7304 # It's been scheduled but it hasn't
7305 # been executed yet due to dependence
7306 # on installation of blocking packages.
7309 root_config = self.roots[task.root]
7310 inst_pkg = self._pkg_cache[
7311 ("installed", task.root, task.cpv, "nomerge")]
7313 if self.digraph.contains(inst_pkg):
7316 forbid_overlap = False
7317 heuristic_overlap = False
7318 for blocker in myblocker_uninstalls.parent_nodes(task):
7319 if blocker.eapi in ("0", "1"):
7320 heuristic_overlap = True
7321 elif blocker.atom.blocker.overlap.forbid:
7322 forbid_overlap = True
7324 if forbid_overlap and running_root == task.root:
7327 if heuristic_overlap and running_root == task.root:
7328 # Never uninstall sys-apps/portage or it's essential
7329 # dependencies, except through replacement.
7331 runtime_dep_atoms = \
7332 list(runtime_deps.iterAtomsForPackage(task))
7333 except portage.exception.InvalidDependString, e:
7334 portage.writemsg("!!! Invalid PROVIDE in " + \
7335 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7336 (task.root, task.cpv, e), noiselevel=-1)
7340 # Don't uninstall a runtime dep if it appears
7341 # to be the only suitable one installed.
7343 vardb = root_config.trees["vartree"].dbapi
7344 for atom in runtime_dep_atoms:
7345 other_version = None
7346 for pkg in vardb.match_pkgs(atom):
7347 if pkg.cpv == task.cpv and \
7348 pkg.metadata["COUNTER"] == \
7349 task.metadata["COUNTER"]:
7353 if other_version is None:
7359 # For packages in the system set, don't take
7360 # any chances. If the conflict can't be resolved
7361 # by a normal replacement operation then abort.
7364 for atom in root_config.sets[
7365 "system"].iterAtomsForPackage(task):
7368 except portage.exception.InvalidDependString, e:
7369 portage.writemsg("!!! Invalid PROVIDE in " + \
7370 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7371 (task.root, task.cpv, e), noiselevel=-1)
7377 # Note that the world check isn't always
7378 # necessary since self._complete_graph() will
7379 # add all packages from the system and world sets to the
7380 # graph. This just allows unresolved conflicts to be
7381 # detected as early as possible, which makes it possible
7382 # to avoid calling self._complete_graph() when it is
7383 # unnecessary due to blockers triggering an abortion.
7385 # For packages in the world set, go ahead an uninstall
7386 # when necessary, as long as the atom will be satisfied
7387 # in the final state.
7388 graph_db = self.mydbapi[task.root]
7391 for atom in root_config.sets[
7392 "world"].iterAtomsForPackage(task):
7394 for pkg in graph_db.match_pkgs(atom):
7401 self._blocked_world_pkgs[inst_pkg] = atom
7403 except portage.exception.InvalidDependString, e:
7404 portage.writemsg("!!! Invalid PROVIDE in " + \
7405 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7406 (task.root, task.cpv, e), noiselevel=-1)
7412 # Check the deps of parent nodes to ensure that
7413 # the chosen task produces a leaf node. Maybe
7414 # this can be optimized some more to make the
7415 # best possible choice, but the current algorithm
7416 # is simple and should be near optimal for most
7418 mergeable_parent = False
7420 for parent in mygraph.parent_nodes(task):
7421 parent_deps.update(mygraph.child_nodes(parent,
7422 ignore_priority=priority_range.ignore_medium_soft))
7423 if parent in mergeable_nodes and \
7424 gather_deps(ignore_uninst_or_med_soft,
7425 mergeable_nodes, set(), parent):
7426 mergeable_parent = True
7428 if not mergeable_parent:
7431 parent_deps.remove(task)
7432 if min_parent_deps is None or \
7433 len(parent_deps) < min_parent_deps:
7434 min_parent_deps = len(parent_deps)
7437 if uninst_task is not None:
7438 # The uninstall is performed only after blocking
7439 # packages have been merged on top of it. File
7440 # collisions between blocking packages are detected
7441 # and removed from the list of files to be uninstalled.
7442 scheduled_uninstalls.add(uninst_task)
7443 parent_nodes = mygraph.parent_nodes(uninst_task)
7445 # Reverse the parent -> uninstall edges since we want
7446 # to do the uninstall after blocking packages have
7447 # been merged on top of it.
7448 mygraph.remove(uninst_task)
7449 for blocked_pkg in parent_nodes:
7450 mygraph.add(blocked_pkg, uninst_task,
7451 priority=BlockerDepPriority.instance)
7452 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7453 scheduler_graph.add(blocked_pkg, uninst_task,
7454 priority=BlockerDepPriority.instance)
7456 # Reset the state variables for leaf node selection and
7457 # continue trying to select leaf nodes.
7459 drop_satisfied = False
7462 if not selected_nodes:
7463 # Only select root nodes as a last resort. This case should
7464 # only trigger when the graph is nearly empty and the only
7465 # remaining nodes are isolated (no parents or children). Since
7466 # the nodes must be isolated, ignore_priority is not needed.
7467 selected_nodes = get_nodes()
7469 if not selected_nodes and not drop_satisfied:
7470 drop_satisfied = True
7473 if not selected_nodes and not myblocker_uninstalls.is_empty():
7474 # If possible, drop an uninstall task here in order to avoid
7475 # the circular deps code path. The corresponding blocker will
7476 # still be counted as an unresolved conflict.
7478 for node in myblocker_uninstalls.leaf_nodes():
7480 mygraph.remove(node)
7485 ignored_uninstall_tasks.add(node)
7488 if uninst_task is not None:
7489 # Reset the state variables for leaf node selection and
7490 # continue trying to select leaf nodes.
7492 drop_satisfied = False
7495 if not selected_nodes:
7496 self._circular_deps_for_display = mygraph
7497 raise self._unknown_internal_error()
7499 # At this point, we've succeeded in selecting one or more nodes, so
7500 # reset state variables for leaf node selection.
7502 drop_satisfied = False
7504 mygraph.difference_update(selected_nodes)
7506 for node in selected_nodes:
7507 if isinstance(node, Package) and \
7508 node.operation == "nomerge":
7511 # Handle interactions between blockers
7512 # and uninstallation tasks.
7513 solved_blockers = set()
7515 if isinstance(node, Package) and \
7516 "uninstall" == node.operation:
7517 have_uninstall_task = True
7520 vardb = self.trees[node.root]["vartree"].dbapi
7521 previous_cpv = vardb.match(node.slot_atom)
7523 # The package will be replaced by this one, so remove
7524 # the corresponding Uninstall task if necessary.
7525 previous_cpv = previous_cpv[0]
7527 ("installed", node.root, previous_cpv, "uninstall")
7529 mygraph.remove(uninst_task)
7533 if uninst_task is not None and \
7534 uninst_task not in ignored_uninstall_tasks and \
7535 myblocker_uninstalls.contains(uninst_task):
7536 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7537 myblocker_uninstalls.remove(uninst_task)
7538 # Discard any blockers that this Uninstall solves.
7539 for blocker in blocker_nodes:
7540 if not myblocker_uninstalls.child_nodes(blocker):
7541 myblocker_uninstalls.remove(blocker)
7542 solved_blockers.add(blocker)
7544 retlist.append(node)
7546 if (isinstance(node, Package) and \
7547 "uninstall" == node.operation) or \
7548 (uninst_task is not None and \
7549 uninst_task in scheduled_uninstalls):
7550 # Include satisfied blockers in the merge list
7551 # since the user might be interested and also
7552 # it serves as an indicator that blocking packages
7553 # will be temporarily installed simultaneously.
7554 for blocker in solved_blockers:
7555 retlist.append(Blocker(atom=blocker.atom,
7556 root=blocker.root, eapi=blocker.eapi,
7559 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7560 for node in myblocker_uninstalls.root_nodes():
7561 unsolvable_blockers.add(node)
7563 for blocker in unsolvable_blockers:
7564 retlist.append(blocker)
7566 # If any Uninstall tasks need to be executed in order
7567 # to avoid a conflict, complete the graph with any
7568 # dependencies that may have been initially
7569 # neglected (to ensure that unsafe Uninstall tasks
7570 # are properly identified and blocked from execution).
7571 if have_uninstall_task and \
7573 not unsolvable_blockers:
7574 self.myparams.add("complete")
7575 raise self._serialize_tasks_retry("")
7577 if unsolvable_blockers and \
7578 not self._accept_blocker_conflicts():
7579 self._unsatisfied_blockers_for_display = unsolvable_blockers
7580 self._serialized_tasks_cache = retlist[:]
7581 self._scheduler_graph = scheduler_graph
7582 raise self._unknown_internal_error()
7584 if self._slot_collision_info and \
7585 not self._accept_blocker_conflicts():
7586 self._serialized_tasks_cache = retlist[:]
7587 self._scheduler_graph = scheduler_graph
7588 raise self._unknown_internal_error()
7590 return retlist, scheduler_graph
7592 def _show_circular_deps(self, mygraph):
7593 # No leaf nodes are available, so we have a circular
7594 # dependency panic situation. Reduce the noise level to a
7595 # minimum via repeated elimination of root nodes since they
7596 # have no parents and thus can not be part of a cycle.
7598 root_nodes = mygraph.root_nodes(
7599 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7602 mygraph.difference_update(root_nodes)
7603 # Display the USE flags that are enabled on nodes that are part
7604 # of dependency cycles in case that helps the user decide to
7605 # disable some of them.
7607 tempgraph = mygraph.copy()
7608 while not tempgraph.empty():
7609 nodes = tempgraph.leaf_nodes()
7611 node = tempgraph.order[0]
7614 display_order.append(node)
7615 tempgraph.remove(node)
7616 display_order.reverse()
7617 self.myopts.pop("--quiet", None)
7618 self.myopts.pop("--verbose", None)
7619 self.myopts["--tree"] = True
7620 portage.writemsg("\n\n", noiselevel=-1)
7621 self.display(display_order)
7622 prefix = colorize("BAD", " * ")
7623 portage.writemsg("\n", noiselevel=-1)
7624 portage.writemsg(prefix + "Error: circular dependencies:\n",
7626 portage.writemsg("\n", noiselevel=-1)
7627 mygraph.debug_print()
7628 portage.writemsg("\n", noiselevel=-1)
7629 portage.writemsg(prefix + "Note that circular dependencies " + \
7630 "can often be avoided by temporarily\n", noiselevel=-1)
7631 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7632 "optional dependencies.\n", noiselevel=-1)
7634 def _show_merge_list(self):
7635 if self._serialized_tasks_cache is not None and \
7636 not (self._displayed_list and \
7637 (self._displayed_list == self._serialized_tasks_cache or \
7638 self._displayed_list == \
7639 list(reversed(self._serialized_tasks_cache)))):
7640 display_list = self._serialized_tasks_cache[:]
7641 if "--tree" in self.myopts:
7642 display_list.reverse()
7643 self.display(display_list)
7645 def _show_unsatisfied_blockers(self, blockers):
7646 self._show_merge_list()
7647 msg = "Error: The above package list contains " + \
7648 "packages which cannot be installed " + \
7649 "at the same time on the same system."
7650 prefix = colorize("BAD", " * ")
7651 from textwrap import wrap
7652 portage.writemsg("\n", noiselevel=-1)
7653 for line in wrap(msg, 70):
7654 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7656 # Display the conflicting packages along with the packages
7657 # that pulled them in. This is helpful for troubleshooting
7658 # cases in which blockers don't solve automatically and
7659 # the reasons are not apparent from the normal merge list
7663 for blocker in blockers:
7664 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7665 self._blocker_parents.parent_nodes(blocker)):
7666 parent_atoms = self._parent_atoms.get(pkg)
7667 if not parent_atoms:
7668 atom = self._blocked_world_pkgs.get(pkg)
7669 if atom is not None:
7670 parent_atoms = set([("@world", atom)])
7672 conflict_pkgs[pkg] = parent_atoms
7675 # Reduce noise by pruning packages that are only
7676 # pulled in by other conflict packages.
7678 for pkg, parent_atoms in conflict_pkgs.iteritems():
7679 relevant_parent = False
7680 for parent, atom in parent_atoms:
7681 if parent not in conflict_pkgs:
7682 relevant_parent = True
7684 if not relevant_parent:
7685 pruned_pkgs.add(pkg)
7686 for pkg in pruned_pkgs:
7687 del conflict_pkgs[pkg]
7693 # Max number of parents shown, to avoid flooding the display.
7695 for pkg, parent_atoms in conflict_pkgs.iteritems():
7699 # Prefer packages that are not directly involved in a conflict.
7700 for parent_atom in parent_atoms:
7701 if len(pruned_list) >= max_parents:
7703 parent, atom = parent_atom
7704 if parent not in conflict_pkgs:
7705 pruned_list.add(parent_atom)
7707 for parent_atom in parent_atoms:
7708 if len(pruned_list) >= max_parents:
7710 pruned_list.add(parent_atom)
7712 omitted_parents = len(parent_atoms) - len(pruned_list)
7713 msg.append(indent + "%s pulled in by\n" % pkg)
7715 for parent_atom in pruned_list:
7716 parent, atom = parent_atom
7717 msg.append(2*indent)
7718 if isinstance(parent,
7719 (PackageArg, AtomArg)):
7720 # For PackageArg and AtomArg types, it's
7721 # redundant to display the atom attribute.
7722 msg.append(str(parent))
7724 # Display the specific atom from SetArg or
7726 msg.append("%s required by %s" % (atom, parent))
7730 msg.append(2*indent)
7731 msg.append("(and %d more)\n" % omitted_parents)
7735 sys.stderr.write("".join(msg))
7738 if "--quiet" not in self.myopts:
7739 show_blocker_docs_link()
7741 def display(self, mylist, favorites=[], verbosity=None):
7743 # This is used to prevent display_problems() from
7744 # redundantly displaying this exact same merge list
7745 # again via _show_merge_list().
7746 self._displayed_list = mylist
7748 if verbosity is None:
7749 verbosity = ("--quiet" in self.myopts and 1 or \
7750 "--verbose" in self.myopts and 3 or 2)
7751 favorites_set = InternalPackageSet(favorites)
7752 oneshot = "--oneshot" in self.myopts or \
7753 "--onlydeps" in self.myopts
7754 columns = "--columns" in self.myopts
7759 counters = PackageCounters()
7761 if verbosity == 1 and "--verbose" not in self.myopts:
7762 def create_use_string(*args):
7765 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7767 is_new, reinst_flags,
7768 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7769 alphabetical=("--alphabetical" in self.myopts)):
7777 cur_iuse = set(cur_iuse)
7778 enabled_flags = cur_iuse.intersection(cur_use)
7779 removed_iuse = set(old_iuse).difference(cur_iuse)
7780 any_iuse = cur_iuse.union(old_iuse)
7781 any_iuse = list(any_iuse)
7783 for flag in any_iuse:
7786 reinst_flag = reinst_flags and flag in reinst_flags
7787 if flag in enabled_flags:
7789 if is_new or flag in old_use and \
7790 (all_flags or reinst_flag):
7791 flag_str = red(flag)
7792 elif flag not in old_iuse:
7793 flag_str = yellow(flag) + "%*"
7794 elif flag not in old_use:
7795 flag_str = green(flag) + "*"
7796 elif flag in removed_iuse:
7797 if all_flags or reinst_flag:
7798 flag_str = yellow("-" + flag) + "%"
7801 flag_str = "(" + flag_str + ")"
7802 removed.append(flag_str)
7805 if is_new or flag in old_iuse and \
7806 flag not in old_use and \
7807 (all_flags or reinst_flag):
7808 flag_str = blue("-" + flag)
7809 elif flag not in old_iuse:
7810 flag_str = yellow("-" + flag)
7811 if flag not in iuse_forced:
7813 elif flag in old_use:
7814 flag_str = green("-" + flag) + "*"
7816 if flag in iuse_forced:
7817 flag_str = "(" + flag_str + ")"
7819 enabled.append(flag_str)
7821 disabled.append(flag_str)
7824 ret = " ".join(enabled)
7826 ret = " ".join(enabled + disabled + removed)
7828 ret = '%s="%s" ' % (name, ret)
7831 repo_display = RepoDisplay(self.roots)
7835 mygraph = self.digraph.copy()
7837 # If there are any Uninstall instances, add the corresponding
7838 # blockers to the digraph (useful for --tree display).
7840 executed_uninstalls = set(node for node in mylist \
7841 if isinstance(node, Package) and node.operation == "unmerge")
7843 for uninstall in self._blocker_uninstalls.leaf_nodes():
7844 uninstall_parents = \
7845 self._blocker_uninstalls.parent_nodes(uninstall)
7846 if not uninstall_parents:
7849 # Remove the corresponding "nomerge" node and substitute
7850 # the Uninstall node.
7851 inst_pkg = self._pkg_cache[
7852 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7854 mygraph.remove(inst_pkg)
7859 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7861 inst_pkg_blockers = []
7863 # Break the Package -> Uninstall edges.
7864 mygraph.remove(uninstall)
7866 # Resolution of a package's blockers
7867 # depend on it's own uninstallation.
7868 for blocker in inst_pkg_blockers:
7869 mygraph.add(uninstall, blocker)
7871 # Expand Package -> Uninstall edges into
7872 # Package -> Blocker -> Uninstall edges.
7873 for blocker in uninstall_parents:
7874 mygraph.add(uninstall, blocker)
7875 for parent in self._blocker_parents.parent_nodes(blocker):
7876 if parent != inst_pkg:
7877 mygraph.add(blocker, parent)
7879 # If the uninstall task did not need to be executed because
7880 # of an upgrade, display Blocker -> Upgrade edges since the
7881 # corresponding Blocker -> Uninstall edges will not be shown.
7883 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7884 if upgrade_node is not None and \
7885 uninstall not in executed_uninstalls:
7886 for blocker in uninstall_parents:
7887 mygraph.add(upgrade_node, blocker)
7889 unsatisfied_blockers = []
7894 if isinstance(x, Blocker) and not x.satisfied:
7895 unsatisfied_blockers.append(x)
7898 if "--tree" in self.myopts:
7899 depth = len(tree_nodes)
7900 while depth and graph_key not in \
7901 mygraph.child_nodes(tree_nodes[depth-1]):
7904 tree_nodes = tree_nodes[:depth]
7905 tree_nodes.append(graph_key)
7906 display_list.append((x, depth, True))
7907 shown_edges.add((graph_key, tree_nodes[depth-1]))
7909 traversed_nodes = set() # prevent endless circles
7910 traversed_nodes.add(graph_key)
7911 def add_parents(current_node, ordered):
7913 # Do not traverse to parents if this node is an
7914 # an argument or a direct member of a set that has
7915 # been specified as an argument (system or world).
7916 if current_node not in self._set_nodes:
7917 parent_nodes = mygraph.parent_nodes(current_node)
7919 child_nodes = set(mygraph.child_nodes(current_node))
7920 selected_parent = None
7921 # First, try to avoid a direct cycle.
7922 for node in parent_nodes:
7923 if not isinstance(node, (Blocker, Package)):
7925 if node not in traversed_nodes and \
7926 node not in child_nodes:
7927 edge = (current_node, node)
7928 if edge in shown_edges:
7930 selected_parent = node
7932 if not selected_parent:
7933 # A direct cycle is unavoidable.
7934 for node in parent_nodes:
7935 if not isinstance(node, (Blocker, Package)):
7937 if node not in traversed_nodes:
7938 edge = (current_node, node)
7939 if edge in shown_edges:
7941 selected_parent = node
7944 shown_edges.add((current_node, selected_parent))
7945 traversed_nodes.add(selected_parent)
7946 add_parents(selected_parent, False)
7947 display_list.append((current_node,
7948 len(tree_nodes), ordered))
7949 tree_nodes.append(current_node)
7951 add_parents(graph_key, True)
7953 display_list.append((x, depth, True))
7954 mylist = display_list
7955 for x in unsatisfied_blockers:
7956 mylist.append((x, 0, True))
7958 last_merge_depth = 0
7959 for i in xrange(len(mylist)-1,-1,-1):
7960 graph_key, depth, ordered = mylist[i]
7961 if not ordered and depth == 0 and i > 0 \
7962 and graph_key == mylist[i-1][0] and \
7963 mylist[i-1][1] == 0:
7964 # An ordered node got a consecutive duplicate when the tree was
7968 if ordered and graph_key[-1] != "nomerge":
7969 last_merge_depth = depth
7971 if depth >= last_merge_depth or \
7972 i < len(mylist) - 1 and \
7973 depth >= mylist[i+1][1]:
7976 from portage import flatten
7977 from portage.dep import use_reduce, paren_reduce
7978 # files to fetch list - avoids counting a same file twice
7979 # in size display (verbose mode)
7982 # Use this set to detect when all the "repoadd" strings are "[0]"
7983 # and disable the entire repo display in this case.
7986 for mylist_index in xrange(len(mylist)):
7987 x, depth, ordered = mylist[mylist_index]
7991 portdb = self.trees[myroot]["porttree"].dbapi
7992 bindb = self.trees[myroot]["bintree"].dbapi
7993 vardb = self.trees[myroot]["vartree"].dbapi
7994 vartree = self.trees[myroot]["vartree"]
7995 pkgsettings = self.pkgsettings[myroot]
7998 indent = " " * depth
8000 if isinstance(x, Blocker):
8002 blocker_style = "PKG_BLOCKER_SATISFIED"
8003 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8005 blocker_style = "PKG_BLOCKER"
8006 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8008 counters.blocks += 1
8010 counters.blocks_satisfied += 1
8011 resolved = portage.key_expand(
8012 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8013 if "--columns" in self.myopts and "--quiet" in self.myopts:
8014 addl += " " + colorize(blocker_style, resolved)
8016 addl = "[%s %s] %s%s" % \
8017 (colorize(blocker_style, "blocks"),
8018 addl, indent, colorize(blocker_style, resolved))
8019 block_parents = self._blocker_parents.parent_nodes(x)
8020 block_parents = set([pnode[2] for pnode in block_parents])
8021 block_parents = ", ".join(block_parents)
8023 addl += colorize(blocker_style,
8024 " (\"%s\" is blocking %s)") % \
8025 (str(x.atom).lstrip("!"), block_parents)
8027 addl += colorize(blocker_style,
8028 " (is blocking %s)") % block_parents
8029 if isinstance(x, Blocker) and x.satisfied:
8034 blockers.append(addl)
8037 pkg_merge = ordered and pkg_status == "merge"
8038 if not pkg_merge and pkg_status == "merge":
8039 pkg_status = "nomerge"
8040 built = pkg_type != "ebuild"
8041 installed = pkg_type == "installed"
8043 metadata = pkg.metadata
8045 repo_name = metadata["repository"]
8046 if pkg_type == "ebuild":
8047 ebuild_path = portdb.findname(pkg_key)
8048 if not ebuild_path: # shouldn't happen
8049 raise portage.exception.PackageNotFound(pkg_key)
8050 repo_path_real = os.path.dirname(os.path.dirname(
8051 os.path.dirname(ebuild_path)))
8053 repo_path_real = portdb.getRepositoryPath(repo_name)
8054 pkg_use = list(pkg.use.enabled)
8056 restrict = flatten(use_reduce(paren_reduce(
8057 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8058 except portage.exception.InvalidDependString, e:
8059 if not pkg.installed:
8060 show_invalid_depstring_notice(x,
8061 pkg.metadata["RESTRICT"], str(e))
8065 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8066 "fetch" in restrict:
8069 counters.restrict_fetch += 1
8070 if portdb.fetch_check(pkg_key, pkg_use):
8073 counters.restrict_fetch_satisfied += 1
8075 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8076 #param is used for -u, where you still *do* want to see when something is being upgraded.
8079 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8080 if vardb.cpv_exists(pkg_key):
8081 addl=" "+yellow("R")+fetch+" "
8084 counters.reinst += 1
8085 elif pkg_status == "uninstall":
8086 counters.uninst += 1
8087 # filter out old-style virtual matches
8088 elif installed_versions and \
8089 portage.cpv_getkey(installed_versions[0]) == \
8090 portage.cpv_getkey(pkg_key):
8091 myinslotlist = vardb.match(pkg.slot_atom)
8092 # If this is the first install of a new-style virtual, we
8093 # need to filter out old-style virtual matches.
8094 if myinslotlist and \
8095 portage.cpv_getkey(myinslotlist[0]) != \
8096 portage.cpv_getkey(pkg_key):
8099 myoldbest = myinslotlist[:]
8101 if not portage.dep.cpvequal(pkg_key,
8102 portage.best([pkg_key] + myoldbest)):
8104 addl += turquoise("U")+blue("D")
8106 counters.downgrades += 1
8109 addl += turquoise("U") + " "
8111 counters.upgrades += 1
8113 # New slot, mark it new.
8114 addl = " " + green("NS") + fetch + " "
8115 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8117 counters.newslot += 1
8119 if "--changelog" in self.myopts:
8120 inst_matches = vardb.match(pkg.slot_atom)
8122 changelogs.extend(self.calc_changelog(
8123 portdb.findname(pkg_key),
8124 inst_matches[0], pkg_key))
8126 addl = " " + green("N") + " " + fetch + " "
8135 forced_flags = set()
8136 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8137 forced_flags.update(pkgsettings.useforce)
8138 forced_flags.update(pkgsettings.usemask)
8140 cur_use = [flag for flag in pkg.use.enabled \
8141 if flag in pkg.iuse.all]
8142 cur_iuse = sorted(pkg.iuse.all)
8144 if myoldbest and myinslotlist:
8145 previous_cpv = myoldbest[0]
8147 previous_cpv = pkg.cpv
8148 if vardb.cpv_exists(previous_cpv):
8149 old_iuse, old_use = vardb.aux_get(
8150 previous_cpv, ["IUSE", "USE"])
8151 old_iuse = list(set(
8152 filter_iuse_defaults(old_iuse.split())))
8154 old_use = old_use.split()
8161 old_use = [flag for flag in old_use if flag in old_iuse]
8163 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8165 use_expand.reverse()
8166 use_expand_hidden = \
8167 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8169 def map_to_use_expand(myvals, forcedFlags=False,
8173 for exp in use_expand:
8176 for val in myvals[:]:
8177 if val.startswith(exp.lower()+"_"):
8178 if val in forced_flags:
8179 forced[exp].add(val[len(exp)+1:])
8180 ret[exp].append(val[len(exp)+1:])
8183 forced["USE"] = [val for val in myvals \
8184 if val in forced_flags]
8186 for exp in use_expand_hidden:
8192 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8193 # are the only thing that triggered reinstallation.
8194 reinst_flags_map = {}
8195 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8196 reinst_expand_map = None
8197 if reinstall_for_flags:
8198 reinst_flags_map = map_to_use_expand(
8199 list(reinstall_for_flags), removeHidden=False)
8200 for k in list(reinst_flags_map):
8201 if not reinst_flags_map[k]:
8202 del reinst_flags_map[k]
8203 if not reinst_flags_map.get("USE"):
8204 reinst_expand_map = reinst_flags_map.copy()
8205 reinst_expand_map.pop("USE", None)
8206 if reinst_expand_map and \
8207 not set(reinst_expand_map).difference(
8209 use_expand_hidden = \
8210 set(use_expand_hidden).difference(
8213 cur_iuse_map, iuse_forced = \
8214 map_to_use_expand(cur_iuse, forcedFlags=True)
8215 cur_use_map = map_to_use_expand(cur_use)
8216 old_iuse_map = map_to_use_expand(old_iuse)
8217 old_use_map = map_to_use_expand(old_use)
8220 use_expand.insert(0, "USE")
8222 for key in use_expand:
8223 if key in use_expand_hidden:
8225 verboseadd += create_use_string(key.upper(),
8226 cur_iuse_map[key], iuse_forced[key],
8227 cur_use_map[key], old_iuse_map[key],
8228 old_use_map[key], is_new,
8229 reinst_flags_map.get(key))
8234 if pkg_type == "ebuild" and pkg_merge:
8236 myfilesdict = portdb.getfetchsizes(pkg_key,
8237 useflags=pkg_use, debug=self.edebug)
8238 except portage.exception.InvalidDependString, e:
8239 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8240 show_invalid_depstring_notice(x, src_uri, str(e))
8243 if myfilesdict is None:
8244 myfilesdict="[empty/missing/bad digest]"
8246 for myfetchfile in myfilesdict:
8247 if myfetchfile not in myfetchlist:
8248 mysize+=myfilesdict[myfetchfile]
8249 myfetchlist.append(myfetchfile)
8251 counters.totalsize += mysize
8252 verboseadd += format_size(mysize)
8255 # assign index for a previous version in the same slot
8256 has_previous = False
8257 repo_name_prev = None
8258 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8260 slot_matches = vardb.match(slot_atom)
8263 repo_name_prev = vardb.aux_get(slot_matches[0],
8266 # now use the data to generate output
8267 if pkg.installed or not has_previous:
8268 repoadd = repo_display.repoStr(repo_path_real)
8270 repo_path_prev = None
8272 repo_path_prev = portdb.getRepositoryPath(
8274 if repo_path_prev == repo_path_real:
8275 repoadd = repo_display.repoStr(repo_path_real)
8277 repoadd = "%s=>%s" % (
8278 repo_display.repoStr(repo_path_prev),
8279 repo_display.repoStr(repo_path_real))
8281 repoadd_set.add(repoadd)
8283 xs = [portage.cpv_getkey(pkg_key)] + \
8284 list(portage.catpkgsplit(pkg_key)[2:])
8291 if "COLUMNWIDTH" in self.settings:
8293 mywidth = int(self.settings["COLUMNWIDTH"])
8294 except ValueError, e:
8295 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8297 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8298 self.settings["COLUMNWIDTH"], noiselevel=-1)
8300 oldlp = mywidth - 30
8303 # Convert myoldbest from a list to a string.
8307 for pos, key in enumerate(myoldbest):
8308 key = portage.catpkgsplit(key)[2] + \
8309 "-" + portage.catpkgsplit(key)[3]
8310 if key[-3:] == "-r0":
8312 myoldbest[pos] = key
8313 myoldbest = blue("["+", ".join(myoldbest)+"]")
8316 root_config = self.roots[myroot]
8317 system_set = root_config.sets["system"]
8318 world_set = root_config.sets["world"]
8323 pkg_system = system_set.findAtomForPackage(pkg)
8324 pkg_world = world_set.findAtomForPackage(pkg)
8325 if not (oneshot or pkg_world) and \
8326 myroot == self.target_root and \
8327 favorites_set.findAtomForPackage(pkg):
8328 # Maybe it will be added to world now.
8329 if create_world_atom(pkg, favorites_set, root_config):
8331 except portage.exception.InvalidDependString:
8332 # This is reported elsewhere if relevant.
8335 def pkgprint(pkg_str):
8338 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8340 return colorize("PKG_MERGE_WORLD", pkg_str)
8342 return colorize("PKG_MERGE", pkg_str)
8343 elif pkg_status == "uninstall":
8344 return colorize("PKG_UNINSTALL", pkg_str)
8347 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8349 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8351 return colorize("PKG_NOMERGE", pkg_str)
8354 properties = flatten(use_reduce(paren_reduce(
8355 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8356 except portage.exception.InvalidDependString, e:
8357 if not pkg.installed:
8358 show_invalid_depstring_notice(pkg,
8359 pkg.metadata["PROPERTIES"], str(e))
8363 interactive = "interactive" in properties
8364 if interactive and pkg.operation == "merge":
8365 addl = colorize("WARN", "I") + addl[1:]
8367 counters.interactive += 1
8372 if "--columns" in self.myopts:
8373 if "--quiet" in self.myopts:
8374 myprint=addl+" "+indent+pkgprint(pkg_cp)
8375 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8376 myprint=myprint+myoldbest
8377 myprint=myprint+darkgreen("to "+x[1])
8381 myprint = "[%s] %s%s" % \
8382 (pkgprint(pkg_status.ljust(13)),
8383 indent, pkgprint(pkg.cp))
8385 myprint = "[%s %s] %s%s" % \
8386 (pkgprint(pkg.type_name), addl,
8387 indent, pkgprint(pkg.cp))
8388 if (newlp-nc_len(myprint)) > 0:
8389 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8390 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8391 if (oldlp-nc_len(myprint)) > 0:
8392 myprint=myprint+" "*(oldlp-nc_len(myprint))
8393 myprint=myprint+myoldbest
8394 myprint += darkgreen("to " + pkg.root)
8397 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8399 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8400 myprint += indent + pkgprint(pkg_key) + " " + \
8401 myoldbest + darkgreen("to " + myroot)
8403 if "--columns" in self.myopts:
8404 if "--quiet" in self.myopts:
8405 myprint=addl+" "+indent+pkgprint(pkg_cp)
8406 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8407 myprint=myprint+myoldbest
8411 myprint = "[%s] %s%s" % \
8412 (pkgprint(pkg_status.ljust(13)),
8413 indent, pkgprint(pkg.cp))
8415 myprint = "[%s %s] %s%s" % \
8416 (pkgprint(pkg.type_name), addl,
8417 indent, pkgprint(pkg.cp))
8418 if (newlp-nc_len(myprint)) > 0:
8419 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8420 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8421 if (oldlp-nc_len(myprint)) > 0:
8422 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8423 myprint += myoldbest
8426 myprint = "[%s] %s%s %s" % \
8427 (pkgprint(pkg_status.ljust(13)),
8428 indent, pkgprint(pkg.cpv),
8431 myprint = "[%s %s] %s%s %s" % \
8432 (pkgprint(pkg_type), addl, indent,
8433 pkgprint(pkg.cpv), myoldbest)
8435 if columns and pkg.operation == "uninstall":
8437 p.append((myprint, verboseadd, repoadd))
8439 if "--tree" not in self.myopts and \
8440 "--quiet" not in self.myopts and \
8441 not self._opts_no_restart.intersection(self.myopts) and \
8442 pkg.root == self._running_root.root and \
8443 portage.match_from_list(
8444 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8445 not vardb.cpv_exists(pkg.cpv) and \
8446 "--quiet" not in self.myopts:
8447 if mylist_index < len(mylist) - 1:
8448 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8449 p.append(colorize("WARN", " then resume the merge."))
8452 show_repos = repoadd_set and repoadd_set != set(["0"])
8455 if isinstance(x, basestring):
8456 out.write("%s\n" % (x,))
8459 myprint, verboseadd, repoadd = x
8462 myprint += " " + verboseadd
8464 if show_repos and repoadd:
8465 myprint += " " + teal("[%s]" % repoadd)
8467 out.write("%s\n" % (myprint,))
8476 sys.stdout.write(str(repo_display))
8478 if "--changelog" in self.myopts:
8480 for revision,text in changelogs:
8481 print bold('*'+revision)
8482 sys.stdout.write(text)
8487 def display_problems(self):
8489 Display problems with the dependency graph such as slot collisions.
8490 This is called internally by display() to show the problems _after_
8491 the merge list where it is most likely to be seen, but if display()
8492 is not going to be called then this method should be called explicitly
8493 to ensure that the user is notified of problems with the graph.
8495 All output goes to stderr, except for unsatisfied dependencies which
8496 go to stdout for parsing by programs such as autounmask.
8499 # Note that show_masked_packages() sends it's output to
8500 # stdout, and some programs such as autounmask parse the
8501 # output in cases when emerge bails out. However, when
8502 # show_masked_packages() is called for installed packages
8503 # here, the message is a warning that is more appropriate
8504 # to send to stderr, so temporarily redirect stdout to
8505 # stderr. TODO: Fix output code so there's a cleaner way
8506 # to redirect everything to stderr.
8511 sys.stdout = sys.stderr
8512 self._display_problems()
8518 # This goes to stdout for parsing by programs like autounmask.
8519 for pargs, kwargs in self._unsatisfied_deps_for_display:
8520 self._show_unsatisfied_dep(*pargs, **kwargs)
8522 def _display_problems(self):
8523 if self._circular_deps_for_display is not None:
8524 self._show_circular_deps(
8525 self._circular_deps_for_display)
8527 # The user is only notified of a slot conflict if
8528 # there are no unresolvable blocker conflicts.
8529 if self._unsatisfied_blockers_for_display is not None:
8530 self._show_unsatisfied_blockers(
8531 self._unsatisfied_blockers_for_display)
8533 self._show_slot_collision_notice()
8535 # TODO: Add generic support for "set problem" handlers so that
8536 # the below warnings aren't special cases for world only.
8538 if self._missing_args:
8539 world_problems = False
8540 if "world" in self._sets:
8541 # Filter out indirect members of world (from nested sets)
8542 # since only direct members of world are desired here.
8543 world_set = self.roots[self.target_root].sets["world"]
8544 for arg, atom in self._missing_args:
8545 if arg.name == "world" and atom in world_set:
8546 world_problems = True
8550 sys.stderr.write("\n!!! Problems have been " + \
8551 "detected with your world file\n")
8552 sys.stderr.write("!!! Please run " + \
8553 green("emaint --check world")+"\n\n")
8555 if self._missing_args:
8556 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8557 " Ebuilds for the following packages are either all\n")
8558 sys.stderr.write(colorize("BAD", "!!!") + \
8559 " masked or don't exist:\n")
8560 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8561 self._missing_args) + "\n")
8563 if self._pprovided_args:
8565 for arg, atom in self._pprovided_args:
8566 if isinstance(arg, SetArg):
8568 arg_atom = (atom, atom)
8571 arg_atom = (arg.arg, atom)
8572 refs = arg_refs.setdefault(arg_atom, [])
8573 if parent not in refs:
8576 msg.append(bad("\nWARNING: "))
8577 if len(self._pprovided_args) > 1:
8578 msg.append("Requested packages will not be " + \
8579 "merged because they are listed in\n")
8581 msg.append("A requested package will not be " + \
8582 "merged because it is listed in\n")
8583 msg.append("package.provided:\n\n")
8584 problems_sets = set()
8585 for (arg, atom), refs in arg_refs.iteritems():
8588 problems_sets.update(refs)
8590 ref_string = ", ".join(["'%s'" % name for name in refs])
8591 ref_string = " pulled in by " + ref_string
8592 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8594 if "world" in problems_sets:
8595 msg.append("This problem can be solved in one of the following ways:\n\n")
8596 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8597 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8598 msg.append(" C) Remove offending entries from package.provided.\n\n")
8599 msg.append("The best course of action depends on the reason that an offending\n")
8600 msg.append("package.provided entry exists.\n\n")
8601 sys.stderr.write("".join(msg))
8603 masked_packages = []
8604 for pkg in self._masked_installed:
8605 root_config = pkg.root_config
8606 pkgsettings = self.pkgsettings[pkg.root]
8607 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8608 masked_packages.append((root_config, pkgsettings,
8609 pkg.cpv, pkg.metadata, mreasons))
8611 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8612 " The following installed packages are masked:\n")
8613 show_masked_packages(masked_packages)
8617 def calc_changelog(self,ebuildpath,current,next):
8618 if ebuildpath == None or not os.path.exists(ebuildpath):
8620 current = '-'.join(portage.catpkgsplit(current)[1:])
8621 if current.endswith('-r0'):
8622 current = current[:-3]
8623 next = '-'.join(portage.catpkgsplit(next)[1:])
8624 if next.endswith('-r0'):
8626 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8628 changelog = open(changelogpath).read()
8629 except SystemExit, e:
8630 raise # Needed else can't exit
8633 divisions = self.find_changelog_tags(changelog)
8634 #print 'XX from',current,'to',next
8635 #for div,text in divisions: print 'XX',div
8636 # skip entries for all revisions above the one we are about to emerge
8637 for i in range(len(divisions)):
8638 if divisions[i][0]==next:
8639 divisions = divisions[i:]
8641 # find out how many entries we are going to display
8642 for i in range(len(divisions)):
8643 if divisions[i][0]==current:
8644 divisions = divisions[:i]
8647 # couldnt find the current revision in the list. display nothing
8651 def find_changelog_tags(self,changelog):
8655 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8657 if release is not None:
8658 divs.append((release,changelog))
8660 if release is not None:
8661 divs.append((release,changelog[:match.start()]))
8662 changelog = changelog[match.end():]
8663 release = match.group(1)
8664 if release.endswith('.ebuild'):
8665 release = release[:-7]
8666 if release.endswith('-r0'):
8667 release = release[:-3]
8669 def saveNomergeFavorites(self):
8670 """Find atoms in favorites that are not in the mergelist and add them
8671 to the world file if necessary."""
8672 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8673 "--oneshot", "--onlydeps", "--pretend"):
8674 if x in self.myopts:
8676 root_config = self.roots[self.target_root]
8677 world_set = root_config.sets["world"]
8679 world_locked = False
8680 if hasattr(world_set, "lock"):
8684 if hasattr(world_set, "load"):
8685 world_set.load() # maybe it's changed on disk
8687 args_set = self._sets["args"]
8688 portdb = self.trees[self.target_root]["porttree"].dbapi
8689 added_favorites = set()
8690 for x in self._set_nodes:
8691 pkg_type, root, pkg_key, pkg_status = x
8692 if pkg_status != "nomerge":
8696 myfavkey = create_world_atom(x, args_set, root_config)
8698 if myfavkey in added_favorites:
8700 added_favorites.add(myfavkey)
8701 except portage.exception.InvalidDependString, e:
8702 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8703 (pkg_key, str(e)), noiselevel=-1)
8704 writemsg("!!! see '%s'\n\n" % os.path.join(
8705 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8708 for k in self._sets:
8709 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8714 all_added.append(SETPREFIX + k)
8715 all_added.extend(added_favorites)
8718 print ">>> Recording %s in \"world\" favorites file..." % \
8719 colorize("INFORM", str(a))
8721 world_set.update(all_added)
8726 def loadResumeCommand(self, resume_data, skip_masked=False):
8728 Add a resume command to the graph and validate it in the process. This
8729 will raise a PackageNotFound exception if a package is not available.
8732 if not isinstance(resume_data, dict):
8735 mergelist = resume_data.get("mergelist")
8736 if not isinstance(mergelist, list):
8739 fakedb = self.mydbapi
8741 serialized_tasks = []
8744 if not (isinstance(x, list) and len(x) == 4):
8746 pkg_type, myroot, pkg_key, action = x
8747 if pkg_type not in self.pkg_tree_map:
8749 if action != "merge":
8751 tree_type = self.pkg_tree_map[pkg_type]
8752 mydb = trees[myroot][tree_type].dbapi
8753 db_keys = list(self._trees_orig[myroot][
8754 tree_type].dbapi._aux_cache_keys)
8756 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8758 # It does no exist or it is corrupt.
8759 if action == "uninstall":
8761 raise portage.exception.PackageNotFound(pkg_key)
8762 installed = action == "uninstall"
8763 built = pkg_type != "ebuild"
8764 root_config = self.roots[myroot]
8765 pkg = Package(built=built, cpv=pkg_key,
8766 installed=installed, metadata=metadata,
8767 operation=action, root_config=root_config,
8769 if pkg_type == "ebuild":
8770 pkgsettings = self.pkgsettings[myroot]
8771 pkgsettings.setcpv(pkg)
8772 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8773 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8774 self._pkg_cache[pkg] = pkg
8776 root_config = self.roots[pkg.root]
8777 if "merge" == pkg.operation and \
8778 not visible(root_config.settings, pkg):
8780 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8782 self._unsatisfied_deps_for_display.append(
8783 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8785 fakedb[myroot].cpv_inject(pkg)
8786 serialized_tasks.append(pkg)
8787 self.spinner.update()
8789 if self._unsatisfied_deps_for_display:
8792 if not serialized_tasks or "--nodeps" in self.myopts:
8793 self._serialized_tasks_cache = serialized_tasks
8794 self._scheduler_graph = self.digraph
8796 self._select_package = self._select_pkg_from_graph
8797 self.myparams.add("selective")
8798 # Always traverse deep dependencies in order to account for
8799 # potentially unsatisfied dependencies of installed packages.
8800 # This is necessary for correct --keep-going or --resume operation
8801 # in case a package from a group of circularly dependent packages
8802 # fails. In this case, a package which has recently been installed
8803 # may have an unsatisfied circular dependency (pulled in by
8804 # PDEPEND, for example). So, even though a package is already
8805 # installed, it may not have all of it's dependencies satisfied, so
8806 # it may not be usable. If such a package is in the subgraph of
8807 # deep depenedencies of a scheduled build, that build needs to
8808 # be cancelled. In order for this type of situation to be
8809 # recognized, deep traversal of dependencies is required.
8810 self.myparams.add("deep")
8812 favorites = resume_data.get("favorites")
8813 args_set = self._sets["args"]
8814 if isinstance(favorites, list):
8815 args = self._load_favorites(favorites)
8819 for task in serialized_tasks:
8820 if isinstance(task, Package) and \
8821 task.operation == "merge":
8822 if not self._add_pkg(task, None):
8825 # Packages for argument atoms need to be explicitly
8826 # added via _add_pkg() so that they are included in the
8827 # digraph (needed at least for --tree display).
8829 for atom in arg.set:
8830 pkg, existing_node = self._select_package(
8831 arg.root_config.root, atom)
8832 if existing_node is None and \
8834 if not self._add_pkg(pkg, Dependency(atom=atom,
8835 root=pkg.root, parent=arg)):
8838 # Allow unsatisfied deps here to avoid showing a masking
8839 # message for an unsatisfied dep that isn't necessarily
8841 if not self._create_graph(allow_unsatisfied=True):
8844 unsatisfied_deps = []
8845 for dep in self._unsatisfied_deps:
8846 if not isinstance(dep.parent, Package):
8848 if dep.parent.operation == "merge":
8849 unsatisfied_deps.append(dep)
8852 # For unsatisfied deps of installed packages, only account for
8853 # them if they are in the subgraph of dependencies of a package
8854 # which is scheduled to be installed.
8855 unsatisfied_install = False
8857 dep_stack = self.digraph.parent_nodes(dep.parent)
8859 node = dep_stack.pop()
8860 if not isinstance(node, Package):
8862 if node.operation == "merge":
8863 unsatisfied_install = True
8865 if node in traversed:
8868 dep_stack.extend(self.digraph.parent_nodes(node))
8870 if unsatisfied_install:
8871 unsatisfied_deps.append(dep)
8873 if masked_tasks or unsatisfied_deps:
8874 # This probably means that a required package
8875 # was dropped via --skipfirst. It makes the
8876 # resume list invalid, so convert it to a
8877 # UnsatisfiedResumeDep exception.
8878 raise self.UnsatisfiedResumeDep(self,
8879 masked_tasks + unsatisfied_deps)
8880 self._serialized_tasks_cache = None
8883 except self._unknown_internal_error:
8888 def _load_favorites(self, favorites):
8890 Use a list of favorites to resume state from a
8891 previous select_files() call. This creates similar
8892 DependencyArg instances to those that would have
8893 been created by the original select_files() call.
8894 This allows Package instances to be matched with
8895 DependencyArg instances during graph creation.
8897 root_config = self.roots[self.target_root]
8898 getSetAtoms = root_config.setconfig.getSetAtoms
8899 sets = root_config.sets
8902 if not isinstance(x, basestring):
8904 if x in ("system", "world"):
8906 if x.startswith(SETPREFIX):
8907 s = x[len(SETPREFIX):]
8912 # Recursively expand sets so that containment tests in
8913 # self._get_parent_sets() properly match atoms in nested
8914 # sets (like if world contains system).
8915 expanded_set = InternalPackageSet(
8916 initial_atoms=getSetAtoms(s))
8917 self._sets[s] = expanded_set
8918 args.append(SetArg(arg=x, set=expanded_set,
8919 root_config=root_config))
8921 if not portage.isvalidatom(x):
8923 args.append(AtomArg(arg=x, atom=x,
8924 root_config=root_config))
8926 self._set_args(args)
8929 class UnsatisfiedResumeDep(portage.exception.PortageException):
8931 A dependency of a resume list is not installed. This
8932 can occur when a required package is dropped from the
8933 merge list via --skipfirst.
8935 def __init__(self, depgraph, value):
8936 portage.exception.PortageException.__init__(self, value)
8937 self.depgraph = depgraph
8939 class _internal_exception(portage.exception.PortageException):
8940 def __init__(self, value=""):
8941 portage.exception.PortageException.__init__(self, value)
8943 class _unknown_internal_error(_internal_exception):
8945 Used by the depgraph internally to terminate graph creation.
8946 The specific reason for the failure should have been dumped
8947 to stderr, unfortunately, the exact reason for the failure
8951 class _serialize_tasks_retry(_internal_exception):
8953 This is raised by the _serialize_tasks() method when it needs to
8954 be called again for some reason. The only case that it's currently
8955 used for is when neglected dependencies need to be added to the
8956 graph in order to avoid making a potentially unsafe decision.
8959 class _dep_check_composite_db(portage.dbapi):
8961 A dbapi-like interface that is optimized for use in dep_check() calls.
8962 This is built on top of the existing depgraph package selection logic.
8963 Some packages that have been added to the graph may be masked from this
8964 view in order to influence the atom preference selection that occurs
8967 def __init__(self, depgraph, root):
8968 portage.dbapi.__init__(self)
8969 self._depgraph = depgraph
8971 self._match_cache = {}
8972 self._cpv_pkg_map = {}
8974 def _clear_cache(self):
8975 self._match_cache.clear()
8976 self._cpv_pkg_map.clear()
8978 def match(self, atom):
8979 ret = self._match_cache.get(atom)
8984 atom = self._dep_expand(atom)
8985 pkg, existing = self._depgraph._select_package(self._root, atom)
8989 # Return the highest available from select_package() as well as
8990 # any matching slots in the graph db.
8992 slots.add(pkg.metadata["SLOT"])
8993 atom_cp = portage.dep_getkey(atom)
8994 if pkg.cp.startswith("virtual/"):
8995 # For new-style virtual lookahead that occurs inside
8996 # dep_check(), examine all slots. This is needed
8997 # so that newer slots will not unnecessarily be pulled in
8998 # when a satisfying lower slot is already installed. For
8999 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9000 # there's no need to pull in a newer slot to satisfy a
9001 # virtual/jdk dependency.
9002 for db, pkg_type, built, installed, db_keys in \
9003 self._depgraph._filtered_trees[self._root]["dbs"]:
9004 for cpv in db.match(atom):
9005 if portage.cpv_getkey(cpv) != pkg.cp:
9007 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9009 if self._visible(pkg):
9010 self._cpv_pkg_map[pkg.cpv] = pkg
9012 slots.remove(pkg.metadata["SLOT"])
9014 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9015 pkg, existing = self._depgraph._select_package(
9016 self._root, slot_atom)
9019 if not self._visible(pkg):
9021 self._cpv_pkg_map[pkg.cpv] = pkg
9024 self._cpv_sort_ascending(ret)
9025 self._match_cache[orig_atom] = ret
9028 def _visible(self, pkg):
9029 if pkg.installed and "selective" not in self._depgraph.myparams:
9031 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9032 except (StopIteration, portage.exception.InvalidDependString):
9039 self._depgraph.pkgsettings[pkg.root], pkg):
9041 except portage.exception.InvalidDependString:
9043 in_graph = self._depgraph._slot_pkg_map[
9044 self._root].get(pkg.slot_atom)
9045 if in_graph is None:
9046 # Mask choices for packages which are not the highest visible
9047 # version within their slot (since they usually trigger slot
9049 highest_visible, in_graph = self._depgraph._select_package(
9050 self._root, pkg.slot_atom)
9051 if pkg != highest_visible:
9053 elif in_graph != pkg:
9054 # Mask choices for packages that would trigger a slot
9055 # conflict with a previously selected package.
9059 def _dep_expand(self, atom):
9061 This is only needed for old installed packages that may
9062 contain atoms that are not fully qualified with a specific
9063 category. Emulate the cpv_expand() function that's used by
9064 dbapi.match() in cases like this. If there are multiple
9065 matches, it's often due to a new-style virtual that has
9066 been added, so try to filter those out to avoid raising
9069 root_config = self._depgraph.roots[self._root]
9071 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9072 if len(expanded_atoms) > 1:
9073 non_virtual_atoms = []
9074 for x in expanded_atoms:
9075 if not portage.dep_getkey(x).startswith("virtual/"):
9076 non_virtual_atoms.append(x)
9077 if len(non_virtual_atoms) == 1:
9078 expanded_atoms = non_virtual_atoms
9079 if len(expanded_atoms) > 1:
9080 # compatible with portage.cpv_expand()
9081 raise portage.exception.AmbiguousPackageName(
9082 [portage.dep_getkey(x) for x in expanded_atoms])
9084 atom = expanded_atoms[0]
9086 null_atom = insert_category_into_atom(atom, "null")
9087 null_cp = portage.dep_getkey(null_atom)
9088 cat, atom_pn = portage.catsplit(null_cp)
9089 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9091 # Allow the resolver to choose which virtual.
9092 atom = insert_category_into_atom(atom, "virtual")
9094 atom = insert_category_into_atom(atom, "null")
9097 def aux_get(self, cpv, wants):
9098 metadata = self._cpv_pkg_map[cpv].metadata
9099 return [metadata.get(x, "") for x in wants]
9101 class RepoDisplay(object):
9102 def __init__(self, roots):
9103 self._shown_repos = {}
9104 self._unknown_repo = False
9106 for root_config in roots.itervalues():
9107 portdir = root_config.settings.get("PORTDIR")
9109 repo_paths.add(portdir)
9110 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9112 repo_paths.update(overlays.split())
9113 repo_paths = list(repo_paths)
9114 self._repo_paths = repo_paths
9115 self._repo_paths_real = [ os.path.realpath(repo_path) \
9116 for repo_path in repo_paths ]
9118 # pre-allocate index for PORTDIR so that it always has index 0.
9119 for root_config in roots.itervalues():
9120 portdb = root_config.trees["porttree"].dbapi
9121 portdir = portdb.porttree_root
9123 self.repoStr(portdir)
9125 def repoStr(self, repo_path_real):
9128 real_index = self._repo_paths_real.index(repo_path_real)
9129 if real_index == -1:
9131 self._unknown_repo = True
9133 shown_repos = self._shown_repos
9134 repo_paths = self._repo_paths
9135 repo_path = repo_paths[real_index]
9136 index = shown_repos.get(repo_path)
9138 index = len(shown_repos)
9139 shown_repos[repo_path] = index
9145 shown_repos = self._shown_repos
9146 unknown_repo = self._unknown_repo
9147 if shown_repos or self._unknown_repo:
9148 output.append("Portage tree and overlays:\n")
9149 show_repo_paths = list(shown_repos)
9150 for repo_path, repo_index in shown_repos.iteritems():
9151 show_repo_paths[repo_index] = repo_path
9153 for index, repo_path in enumerate(show_repo_paths):
9154 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9156 output.append(" "+teal("[?]") + \
9157 " indicates that the source repository could not be determined\n")
9158 return "".join(output)
9160 class PackageCounters(object):
9170 self.blocks_satisfied = 0
9172 self.restrict_fetch = 0
9173 self.restrict_fetch_satisfied = 0
9174 self.interactive = 0
9177 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9180 myoutput.append("Total: %s package" % total_installs)
9181 if total_installs != 1:
9182 myoutput.append("s")
9183 if total_installs != 0:
9184 myoutput.append(" (")
9185 if self.upgrades > 0:
9186 details.append("%s upgrade" % self.upgrades)
9187 if self.upgrades > 1:
9189 if self.downgrades > 0:
9190 details.append("%s downgrade" % self.downgrades)
9191 if self.downgrades > 1:
9194 details.append("%s new" % self.new)
9195 if self.newslot > 0:
9196 details.append("%s in new slot" % self.newslot)
9197 if self.newslot > 1:
9200 details.append("%s reinstall" % self.reinst)
9204 details.append("%s uninstall" % self.uninst)
9207 if self.interactive > 0:
9208 details.append("%s %s" % (self.interactive,
9209 colorize("WARN", "interactive")))
9210 myoutput.append(", ".join(details))
9211 if total_installs != 0:
9212 myoutput.append(")")
9213 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9214 if self.restrict_fetch:
9215 myoutput.append("\nFetch Restriction: %s package" % \
9216 self.restrict_fetch)
9217 if self.restrict_fetch > 1:
9218 myoutput.append("s")
9219 if self.restrict_fetch_satisfied < self.restrict_fetch:
9220 myoutput.append(bad(" (%s unsatisfied)") % \
9221 (self.restrict_fetch - self.restrict_fetch_satisfied))
9223 myoutput.append("\nConflict: %s block" % \
9226 myoutput.append("s")
9227 if self.blocks_satisfied < self.blocks:
9228 myoutput.append(bad(" (%s unsatisfied)") % \
9229 (self.blocks - self.blocks_satisfied))
9230 return "".join(myoutput)
9232 class PollSelectAdapter(PollConstants):
9235 Use select to emulate a poll object, for
9236 systems that don't support poll().
9240 self._registered = {}
9241 self._select_args = [[], [], []]
9243 def register(self, fd, *args):
9245 Only POLLIN is currently supported!
9249 "register expected at most 2 arguments, got " + \
9250 repr(1 + len(args)))
9252 eventmask = PollConstants.POLLIN | \
9253 PollConstants.POLLPRI | PollConstants.POLLOUT
9257 self._registered[fd] = eventmask
9258 self._select_args = None
9260 def unregister(self, fd):
9261 self._select_args = None
9262 del self._registered[fd]
9264 def poll(self, *args):
9267 "poll expected at most 2 arguments, got " + \
9268 repr(1 + len(args)))
9274 select_args = self._select_args
9275 if select_args is None:
9276 select_args = [self._registered.keys(), [], []]
9278 if timeout is not None:
9279 select_args = select_args[:]
9280 # Translate poll() timeout args to select() timeout args:
9282 # | units | value(s) for indefinite block
9283 # ---------|--------------|------------------------------
9284 # poll | milliseconds | omitted, negative, or None
9285 # ---------|--------------|------------------------------
9286 # select | seconds | omitted
9287 # ---------|--------------|------------------------------
9289 if timeout is not None and timeout < 0:
9291 if timeout is not None:
9292 select_args.append(timeout / 1000)
9294 select_events = select.select(*select_args)
9296 for fd in select_events[0]:
9297 poll_events.append((fd, PollConstants.POLLIN))
9300 class SequentialTaskQueue(SlotObject):
9302 __slots__ = ("max_jobs", "running_tasks") + \
9303 ("_dirty", "_scheduling", "_task_queue")
9305 def __init__(self, **kwargs):
9306 SlotObject.__init__(self, **kwargs)
9307 self._task_queue = deque()
9308 self.running_tasks = set()
9309 if self.max_jobs is None:
9313 def add(self, task):
9314 self._task_queue.append(task)
9317 def addFront(self, task):
9318 self._task_queue.appendleft(task)
9329 if self._scheduling:
9330 # Ignore any recursive schedule() calls triggered via
9331 # self._task_exit().
9334 self._scheduling = True
9336 task_queue = self._task_queue
9337 running_tasks = self.running_tasks
9338 max_jobs = self.max_jobs
9339 state_changed = False
9341 while task_queue and \
9342 (max_jobs is True or len(running_tasks) < max_jobs):
9343 task = task_queue.popleft()
9344 cancelled = getattr(task, "cancelled", None)
9346 running_tasks.add(task)
9347 task.addExitListener(self._task_exit)
9349 state_changed = True
9352 self._scheduling = False
9354 return state_changed
9356 def _task_exit(self, task):
9358 Since we can always rely on exit listeners being called, the set of
9359 running tasks is always pruned automatically and there is never any need
9360 to actively prune it.
9362 self.running_tasks.remove(task)
9363 if self._task_queue:
9367 self._task_queue.clear()
9368 running_tasks = self.running_tasks
9369 while running_tasks:
9370 task = running_tasks.pop()
9371 task.removeExitListener(self._task_exit)
9375 def __nonzero__(self):
9376 return bool(self._task_queue or self.running_tasks)
9379 return len(self._task_queue) + len(self.running_tasks)
9381 _can_poll_device = None
9383 def can_poll_device():
9385 Test if it's possible to use poll() on a device such as a pty. This
9386 is known to fail on Darwin.
9388 @returns: True if poll() on a device succeeds, False otherwise.
9391 global _can_poll_device
9392 if _can_poll_device is not None:
9393 return _can_poll_device
9395 if not hasattr(select, "poll"):
9396 _can_poll_device = False
9397 return _can_poll_device
9400 dev_null = open('/dev/null', 'rb')
9402 _can_poll_device = False
9403 return _can_poll_device
9406 p.register(dev_null.fileno(), PollConstants.POLLIN)
9408 invalid_request = False
9409 for f, event in p.poll():
9410 if event & PollConstants.POLLNVAL:
9411 invalid_request = True
9415 _can_poll_device = not invalid_request
9416 return _can_poll_device
9418 def create_poll_instance():
9420 Create an instance of select.poll, or an instance of
9421 PollSelectAdapter there is no poll() implementation or
9422 it is broken somehow.
9424 if can_poll_device():
9425 return select.poll()
9426 return PollSelectAdapter()
9428 getloadavg = getattr(os, "getloadavg", None)
9429 if getloadavg is None:
9432 Uses /proc/loadavg to emulate os.getloadavg().
9433 Raises OSError if the load average was unobtainable.
9436 loadavg_str = open('/proc/loadavg').readline()
9438 # getloadavg() is only supposed to raise OSError, so convert
9439 raise OSError('unknown')
9440 loadavg_split = loadavg_str.split()
9441 if len(loadavg_split) < 3:
9442 raise OSError('unknown')
9446 loadavg_floats.append(float(loadavg_split[i]))
9448 raise OSError('unknown')
9449 return tuple(loadavg_floats)
9451 class PollScheduler(object):
9453 class _sched_iface_class(SlotObject):
9454 __slots__ = ("register", "schedule", "unregister")
9458 self._max_load = None
9460 self._poll_event_queue = []
9461 self._poll_event_handlers = {}
9462 self._poll_event_handler_ids = {}
9463 # Increment id for each new handler.
9464 self._event_handler_id = 0
9465 self._poll_obj = create_poll_instance()
9466 self._scheduling = False
9468 def _schedule(self):
9470 Calls _schedule_tasks() and automatically returns early from
9471 any recursive calls to this method that the _schedule_tasks()
9472 call might trigger. This makes _schedule() safe to call from
9473 inside exit listeners.
9475 if self._scheduling:
9477 self._scheduling = True
9479 return self._schedule_tasks()
9481 self._scheduling = False
9483 def _running_job_count(self):
9486 def _can_add_job(self):
9487 max_jobs = self._max_jobs
9488 max_load = self._max_load
9490 if self._max_jobs is not True and \
9491 self._running_job_count() >= self._max_jobs:
9494 if max_load is not None and \
9495 (max_jobs is True or max_jobs > 1) and \
9496 self._running_job_count() >= 1:
9498 avg1, avg5, avg15 = getloadavg()
9502 if avg1 >= max_load:
9507 def _poll(self, timeout=None):
9509 All poll() calls pass through here. The poll events
9510 are added directly to self._poll_event_queue.
9511 In order to avoid endless blocking, this raises
9512 StopIteration if timeout is None and there are
9513 no file descriptors to poll.
9515 if not self._poll_event_handlers:
9517 if timeout is None and \
9518 not self._poll_event_handlers:
9519 raise StopIteration(
9520 "timeout is None and there are no poll() event handlers")
9522 # The following error is known to occur with Linux kernel versions
9525 # select.error: (4, 'Interrupted system call')
9527 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9528 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9529 # without any events.
9532 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9534 except select.error, e:
9535 writemsg_level("\n!!! select error: %s\n" % (e,),
9536 level=logging.ERROR, noiselevel=-1)
9538 if timeout is not None:
9541 def _next_poll_event(self, timeout=None):
9543 Since the _schedule_wait() loop is called by event
9544 handlers from _poll_loop(), maintain a central event
9545 queue for both of them to share events from a single
9546 poll() call. In order to avoid endless blocking, this
9547 raises StopIteration if timeout is None and there are
9548 no file descriptors to poll.
9550 if not self._poll_event_queue:
9552 return self._poll_event_queue.pop()
9554 def _poll_loop(self):
9556 event_handlers = self._poll_event_handlers
9557 event_handled = False
9560 while event_handlers:
9561 f, event = self._next_poll_event()
9562 handler, reg_id = event_handlers[f]
9564 event_handled = True
9565 except StopIteration:
9566 event_handled = True
9568 if not event_handled:
9569 raise AssertionError("tight loop")
9571 def _schedule_yield(self):
9573 Schedule for a short period of time chosen by the scheduler based
9574 on internal state. Synchronous tasks should call this periodically
9575 in order to allow the scheduler to service pending poll events. The
9576 scheduler will call poll() exactly once, without blocking, and any
9577 resulting poll events will be serviced.
9579 event_handlers = self._poll_event_handlers
9582 if not event_handlers:
9583 return bool(events_handled)
9585 if not self._poll_event_queue:
9589 while event_handlers and self._poll_event_queue:
9590 f, event = self._next_poll_event()
9591 handler, reg_id = event_handlers[f]
9594 except StopIteration:
9597 return bool(events_handled)
9599 def _register(self, f, eventmask, handler):
9602 @return: A unique registration id, for use in schedule() or
9605 if f in self._poll_event_handlers:
9606 raise AssertionError("fd %d is already registered" % f)
9607 self._event_handler_id += 1
9608 reg_id = self._event_handler_id
9609 self._poll_event_handler_ids[reg_id] = f
9610 self._poll_event_handlers[f] = (handler, reg_id)
9611 self._poll_obj.register(f, eventmask)
9614 def _unregister(self, reg_id):
9615 f = self._poll_event_handler_ids[reg_id]
9616 self._poll_obj.unregister(f)
9617 del self._poll_event_handlers[f]
9618 del self._poll_event_handler_ids[reg_id]
9620 def _schedule_wait(self, wait_ids):
9622 Schedule until wait_id is not longer registered
9625 @param wait_id: a task id to wait for
9627 event_handlers = self._poll_event_handlers
9628 handler_ids = self._poll_event_handler_ids
9629 event_handled = False
9631 if isinstance(wait_ids, int):
9632 wait_ids = frozenset([wait_ids])
9635 while wait_ids.intersection(handler_ids):
9636 f, event = self._next_poll_event()
9637 handler, reg_id = event_handlers[f]
9639 event_handled = True
9640 except StopIteration:
9641 event_handled = True
9643 return event_handled
9645 class QueueScheduler(PollScheduler):
9648 Add instances of SequentialTaskQueue and then call run(). The
9649 run() method returns when no tasks remain.
9652 def __init__(self, max_jobs=None, max_load=None):
9653 PollScheduler.__init__(self)
9655 if max_jobs is None:
9658 self._max_jobs = max_jobs
9659 self._max_load = max_load
9660 self.sched_iface = self._sched_iface_class(
9661 register=self._register,
9662 schedule=self._schedule_wait,
9663 unregister=self._unregister)
9666 self._schedule_listeners = []
9669 self._queues.append(q)
9671 def remove(self, q):
9672 self._queues.remove(q)
9676 while self._schedule():
9679 while self._running_job_count():
9682 def _schedule_tasks(self):
9685 @returns: True if there may be remaining tasks to schedule,
9688 while self._can_add_job():
9689 n = self._max_jobs - self._running_job_count()
9693 if not self._start_next_job(n):
9696 for q in self._queues:
9701 def _running_job_count(self):
9703 for q in self._queues:
9704 job_count += len(q.running_tasks)
9705 self._jobs = job_count
9708 def _start_next_job(self, n=1):
9710 for q in self._queues:
9711 initial_job_count = len(q.running_tasks)
9713 final_job_count = len(q.running_tasks)
9714 if final_job_count > initial_job_count:
9715 started_count += (final_job_count - initial_job_count)
9716 if started_count >= n:
9718 return started_count
9720 class TaskScheduler(object):
9723 A simple way to handle scheduling of AsynchrousTask instances. Simply
9724 add tasks and call run(). The run() method returns when no tasks remain.
9727 def __init__(self, max_jobs=None, max_load=None):
9728 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9729 self._scheduler = QueueScheduler(
9730 max_jobs=max_jobs, max_load=max_load)
9731 self.sched_iface = self._scheduler.sched_iface
9732 self.run = self._scheduler.run
9733 self._scheduler.add(self._queue)
9735 def add(self, task):
9736 self._queue.add(task)
9738 class JobStatusDisplay(object):
9740 _bound_properties = ("curval", "failed", "running")
9741 _jobs_column_width = 48
9743 # Don't update the display unless at least this much
9744 # time has passed, in units of seconds.
9745 _min_display_latency = 2
9747 _default_term_codes = {
9753 _termcap_name_map = {
9754 'carriage_return' : 'cr',
9759 def __init__(self, out=sys.stdout, quiet=False):
9760 object.__setattr__(self, "out", out)
9761 object.__setattr__(self, "quiet", quiet)
9762 object.__setattr__(self, "maxval", 0)
9763 object.__setattr__(self, "merges", 0)
9764 object.__setattr__(self, "_changed", False)
9765 object.__setattr__(self, "_displayed", False)
9766 object.__setattr__(self, "_last_display_time", 0)
9767 object.__setattr__(self, "width", 80)
9770 isatty = hasattr(out, "isatty") and out.isatty()
9771 object.__setattr__(self, "_isatty", isatty)
9772 if not isatty or not self._init_term():
9774 for k, capname in self._termcap_name_map.iteritems():
9775 term_codes[k] = self._default_term_codes[capname]
9776 object.__setattr__(self, "_term_codes", term_codes)
9777 encoding = sys.getdefaultencoding()
9778 for k, v in self._term_codes.items():
9779 if not isinstance(v, basestring):
9780 self._term_codes[k] = v.decode(encoding, 'replace')
9782 def _init_term(self):
9784 Initialize term control codes.
9786 @returns: True if term codes were successfully initialized,
9790 term_type = os.environ.get("TERM", "vt100")
9796 curses.setupterm(term_type, self.out.fileno())
9797 tigetstr = curses.tigetstr
9798 except curses.error:
9803 if tigetstr is None:
9807 for k, capname in self._termcap_name_map.iteritems():
9808 code = tigetstr(capname)
9810 code = self._default_term_codes[capname]
9811 term_codes[k] = code
9812 object.__setattr__(self, "_term_codes", term_codes)
9815 def _format_msg(self, msg):
9816 return ">>> %s" % msg
9820 self._term_codes['carriage_return'] + \
9821 self._term_codes['clr_eol'])
9823 self._displayed = False
9825 def _display(self, line):
9826 self.out.write(line)
9828 self._displayed = True
9830 def _update(self, msg):
9833 if not self._isatty:
9834 out.write(self._format_msg(msg) + self._term_codes['newline'])
9836 self._displayed = True
9842 self._display(self._format_msg(msg))
9844 def displayMessage(self, msg):
9846 was_displayed = self._displayed
9848 if self._isatty and self._displayed:
9851 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9853 self._displayed = False
9856 self._changed = True
9862 for name in self._bound_properties:
9863 object.__setattr__(self, name, 0)
9866 self.out.write(self._term_codes['newline'])
9868 self._displayed = False
9870 def __setattr__(self, name, value):
9871 old_value = getattr(self, name)
9872 if value == old_value:
9874 object.__setattr__(self, name, value)
9875 if name in self._bound_properties:
9876 self._property_change(name, old_value, value)
9878 def _property_change(self, name, old_value, new_value):
9879 self._changed = True
9882 def _load_avg_str(self):
9897 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9901 Display status on stdout, but only if something has
9902 changed since the last call.
9908 current_time = time.time()
9909 time_delta = current_time - self._last_display_time
9910 if self._displayed and \
9912 if not self._isatty:
9914 if time_delta < self._min_display_latency:
9917 self._last_display_time = current_time
9918 self._changed = False
9919 self._display_status()
9921 def _display_status(self):
9922 # Don't use len(self._completed_tasks) here since that also
9923 # can include uninstall tasks.
9924 curval_str = str(self.curval)
9925 maxval_str = str(self.maxval)
9926 running_str = str(self.running)
9927 failed_str = str(self.failed)
9928 load_avg_str = self._load_avg_str()
9930 color_output = StringIO()
9931 plain_output = StringIO()
9932 style_file = portage.output.ConsoleStyleFile(color_output)
9933 style_file.write_listener = plain_output
9934 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9935 style_writer.style_listener = style_file.new_styles
9936 f = formatter.AbstractFormatter(style_writer)
9938 number_style = "INFORM"
9939 f.add_literal_data("Jobs: ")
9940 f.push_style(number_style)
9941 f.add_literal_data(curval_str)
9943 f.add_literal_data(" of ")
9944 f.push_style(number_style)
9945 f.add_literal_data(maxval_str)
9947 f.add_literal_data(" complete")
9950 f.add_literal_data(", ")
9951 f.push_style(number_style)
9952 f.add_literal_data(running_str)
9954 f.add_literal_data(" running")
9957 f.add_literal_data(", ")
9958 f.push_style(number_style)
9959 f.add_literal_data(failed_str)
9961 f.add_literal_data(" failed")
9963 padding = self._jobs_column_width - len(plain_output.getvalue())
9965 f.add_literal_data(padding * " ")
9967 f.add_literal_data("Load avg: ")
9968 f.add_literal_data(load_avg_str)
9970 # Truncate to fit width, to avoid making the terminal scroll if the
9971 # line overflows (happens when the load average is large).
9972 plain_output = plain_output.getvalue()
9973 if self._isatty and len(plain_output) > self.width:
9974 # Use plain_output here since it's easier to truncate
9975 # properly than the color output which contains console
9977 self._update(plain_output[:self.width])
9979 self._update(color_output.getvalue())
9981 xtermTitle(" ".join(plain_output.split()))
9983 class Scheduler(PollScheduler):
9985 _opts_ignore_blockers = \
9986 frozenset(["--buildpkgonly",
9987 "--fetchonly", "--fetch-all-uri",
9988 "--nodeps", "--pretend"])
9990 _opts_no_background = \
9991 frozenset(["--pretend",
9992 "--fetchonly", "--fetch-all-uri"])
9994 _opts_no_restart = frozenset(["--buildpkgonly",
9995 "--fetchonly", "--fetch-all-uri", "--pretend"])
9997 _bad_resume_opts = set(["--ask", "--changelog",
9998 "--resume", "--skipfirst"])
10000 _fetch_log = "/var/log/emerge-fetch.log"
10002 class _iface_class(SlotObject):
10003 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10004 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10005 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10008 class _fetch_iface_class(SlotObject):
10009 __slots__ = ("log_file", "schedule")
10011 _task_queues_class = slot_dict_class(
10012 ("merge", "jobs", "fetch", "unpack"), prefix="")
10014 class _build_opts_class(SlotObject):
10015 __slots__ = ("buildpkg", "buildpkgonly",
10016 "fetch_all_uri", "fetchonly", "pretend")
10018 class _binpkg_opts_class(SlotObject):
10019 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10021 class _pkg_count_class(SlotObject):
10022 __slots__ = ("curval", "maxval")
10024 class _emerge_log_class(SlotObject):
10025 __slots__ = ("xterm_titles",)
10027 def log(self, *pargs, **kwargs):
10028 if not self.xterm_titles:
10029 # Avoid interference with the scheduler's status display.
10030 kwargs.pop("short_msg", None)
10031 emergelog(self.xterm_titles, *pargs, **kwargs)
10033 class _failed_pkg(SlotObject):
10034 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10036 class _ConfigPool(object):
10037 """Interface for a task to temporarily allocate a config
10038 instance from a pool. This allows a task to be constructed
10039 long before the config instance actually becomes needed, like
10040 when prefetchers are constructed for the whole merge list."""
10041 __slots__ = ("_root", "_allocate", "_deallocate")
10042 def __init__(self, root, allocate, deallocate):
10044 self._allocate = allocate
10045 self._deallocate = deallocate
10046 def allocate(self):
10047 return self._allocate(self._root)
10048 def deallocate(self, settings):
10049 self._deallocate(settings)
10051 class _unknown_internal_error(portage.exception.PortageException):
10053 Used internally to terminate scheduling. The specific reason for
10054 the failure should have been dumped to stderr.
10056 def __init__(self, value=""):
10057 portage.exception.PortageException.__init__(self, value)
10059 def __init__(self, settings, trees, mtimedb, myopts,
10060 spinner, mergelist, favorites, digraph):
10061 PollScheduler.__init__(self)
10062 self.settings = settings
10063 self.target_root = settings["ROOT"]
10065 self.myopts = myopts
10066 self._spinner = spinner
10067 self._mtimedb = mtimedb
10068 self._mergelist = mergelist
10069 self._favorites = favorites
10070 self._args_set = InternalPackageSet(favorites)
10071 self._build_opts = self._build_opts_class()
10072 for k in self._build_opts.__slots__:
10073 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10074 self._binpkg_opts = self._binpkg_opts_class()
10075 for k in self._binpkg_opts.__slots__:
10076 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10079 self._logger = self._emerge_log_class()
10080 self._task_queues = self._task_queues_class()
10081 for k in self._task_queues.allowed_keys:
10082 setattr(self._task_queues, k,
10083 SequentialTaskQueue())
10085 # Holds merges that will wait to be executed when no builds are
10086 # executing. This is useful for system packages since dependencies
10087 # on system packages are frequently unspecified.
10088 self._merge_wait_queue = []
10089 # Holds merges that have been transfered from the merge_wait_queue to
10090 # the actual merge queue. They are removed from this list upon
10091 # completion. Other packages can start building only when this list is
10093 self._merge_wait_scheduled = []
10095 # Holds system packages and their deep runtime dependencies. Before
10096 # being merged, these packages go to merge_wait_queue, to be merged
10097 # when no other packages are building.
10098 self._deep_system_deps = set()
10100 # Holds packages to merge which will satisfy currently unsatisfied
10101 # deep runtime dependencies of system packages. If this is not empty
10102 # then no parallel builds will be spawned until it is empty. This
10103 # minimizes the possibility that a build will fail due to the system
10104 # being in a fragile state. For example, see bug #259954.
10105 self._unsatisfied_system_deps = set()
10107 self._status_display = JobStatusDisplay()
10108 self._max_load = myopts.get("--load-average")
10109 max_jobs = myopts.get("--jobs")
10110 if max_jobs is None:
10112 self._set_max_jobs(max_jobs)
10114 # The root where the currently running
10115 # portage instance is installed.
10116 self._running_root = trees["/"]["root_config"]
10118 if settings.get("PORTAGE_DEBUG", "") == "1":
10120 self.pkgsettings = {}
10121 self._config_pool = {}
10122 self._blocker_db = {}
10124 self._config_pool[root] = []
10125 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10127 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10128 schedule=self._schedule_fetch)
10129 self._sched_iface = self._iface_class(
10130 dblinkEbuildPhase=self._dblink_ebuild_phase,
10131 dblinkDisplayMerge=self._dblink_display_merge,
10132 dblinkElog=self._dblink_elog,
10133 dblinkEmergeLog=self._dblink_emerge_log,
10134 fetch=fetch_iface, register=self._register,
10135 schedule=self._schedule_wait,
10136 scheduleSetup=self._schedule_setup,
10137 scheduleUnpack=self._schedule_unpack,
10138 scheduleYield=self._schedule_yield,
10139 unregister=self._unregister)
10141 self._prefetchers = weakref.WeakValueDictionary()
10142 self._pkg_queue = []
10143 self._completed_tasks = set()
10145 self._failed_pkgs = []
10146 self._failed_pkgs_all = []
10147 self._failed_pkgs_die_msgs = []
10148 self._post_mod_echo_msgs = []
10149 self._parallel_fetch = False
10150 merge_count = len([x for x in mergelist \
10151 if isinstance(x, Package) and x.operation == "merge"])
10152 self._pkg_count = self._pkg_count_class(
10153 curval=0, maxval=merge_count)
10154 self._status_display.maxval = self._pkg_count.maxval
10156 # The load average takes some time to respond when new
10157 # jobs are added, so we need to limit the rate of adding
10159 self._job_delay_max = 10
10160 self._job_delay_factor = 1.0
10161 self._job_delay_exp = 1.5
10162 self._previous_job_start_time = None
10164 self._set_digraph(digraph)
10166 # This is used to memoize the _choose_pkg() result when
10167 # no packages can be chosen until one of the existing
10169 self._choose_pkg_return_early = False
10171 features = self.settings.features
10172 if "parallel-fetch" in features and \
10173 not ("--pretend" in self.myopts or \
10174 "--fetch-all-uri" in self.myopts or \
10175 "--fetchonly" in self.myopts):
10176 if "distlocks" not in features:
10177 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10178 portage.writemsg(red("!!!")+" parallel-fetching " + \
10179 "requires the distlocks feature enabled"+"\n",
10181 portage.writemsg(red("!!!")+" you have it disabled, " + \
10182 "thus parallel-fetching is being disabled"+"\n",
10184 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10185 elif len(mergelist) > 1:
10186 self._parallel_fetch = True
10188 if self._parallel_fetch:
10189 # clear out existing fetch log if it exists
10191 open(self._fetch_log, 'w')
10192 except EnvironmentError:
10195 self._running_portage = None
10196 portage_match = self._running_root.trees["vartree"].dbapi.match(
10197 portage.const.PORTAGE_PACKAGE_ATOM)
10199 cpv = portage_match.pop()
10200 self._running_portage = self._pkg(cpv, "installed",
10201 self._running_root, installed=True)
10203 def _poll(self, timeout=None):
10205 PollScheduler._poll(self, timeout=timeout)
10207 def _set_max_jobs(self, max_jobs):
10208 self._max_jobs = max_jobs
10209 self._task_queues.jobs.max_jobs = max_jobs
10211 def _background_mode(self):
10213 Check if background mode is enabled and adjust states as necessary.
10216 @returns: True if background mode is enabled, False otherwise.
10218 background = (self._max_jobs is True or \
10219 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10220 not bool(self._opts_no_background.intersection(self.myopts))
10223 interactive_tasks = self._get_interactive_tasks()
10224 if interactive_tasks:
10226 writemsg_level(">>> Sending package output to stdio due " + \
10227 "to interactive package(s):\n",
10228 level=logging.INFO, noiselevel=-1)
10230 for pkg in interactive_tasks:
10231 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10232 if pkg.root != "/":
10233 pkg_str += " for " + pkg.root
10234 msg.append(pkg_str)
10236 writemsg_level("".join("%s\n" % (l,) for l in msg),
10237 level=logging.INFO, noiselevel=-1)
10238 if self._max_jobs is True or self._max_jobs > 1:
10239 self._set_max_jobs(1)
10240 writemsg_level(">>> Setting --jobs=1 due " + \
10241 "to the above interactive package(s)\n",
10242 level=logging.INFO, noiselevel=-1)
10244 self._status_display.quiet = \
10245 not background or \
10246 ("--quiet" in self.myopts and \
10247 "--verbose" not in self.myopts)
10249 self._logger.xterm_titles = \
10250 "notitles" not in self.settings.features and \
10251 self._status_display.quiet
10255 def _get_interactive_tasks(self):
10256 from portage import flatten
10257 from portage.dep import use_reduce, paren_reduce
10258 interactive_tasks = []
10259 for task in self._mergelist:
10260 if not (isinstance(task, Package) and \
10261 task.operation == "merge"):
10264 properties = flatten(use_reduce(paren_reduce(
10265 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10266 except portage.exception.InvalidDependString, e:
10267 show_invalid_depstring_notice(task,
10268 task.metadata["PROPERTIES"], str(e))
10269 raise self._unknown_internal_error()
10270 if "interactive" in properties:
10271 interactive_tasks.append(task)
10272 return interactive_tasks
10274 def _set_digraph(self, digraph):
10275 if "--nodeps" in self.myopts or \
10276 (self._max_jobs is not True and self._max_jobs < 2):
10278 self._digraph = None
10281 self._digraph = digraph
10282 self._find_system_deps()
10283 self._prune_digraph()
10284 self._prevent_builddir_collisions()
10286 def _find_system_deps(self):
10288 Find system packages and their deep runtime dependencies. Before being
10289 merged, these packages go to merge_wait_queue, to be merged when no
10290 other packages are building.
10292 deep_system_deps = self._deep_system_deps
10293 deep_system_deps.clear()
10294 deep_system_deps.update(
10295 _find_deep_system_runtime_deps(self._digraph))
10296 deep_system_deps.difference_update([pkg for pkg in \
10297 deep_system_deps if pkg.operation != "merge"])
10299 def _prune_digraph(self):
10301 Prune any root nodes that are irrelevant.
10304 graph = self._digraph
10305 completed_tasks = self._completed_tasks
10306 removed_nodes = set()
10308 for node in graph.root_nodes():
10309 if not isinstance(node, Package) or \
10310 (node.installed and node.operation == "nomerge") or \
10312 node in completed_tasks:
10313 removed_nodes.add(node)
10315 graph.difference_update(removed_nodes)
10316 if not removed_nodes:
10318 removed_nodes.clear()
10320 def _prevent_builddir_collisions(self):
10322 When building stages, sometimes the same exact cpv needs to be merged
10323 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10324 in the builddir. Currently, normal file locks would be inappropriate
10325 for this purpose since emerge holds all of it's build dir locks from
10329 for pkg in self._mergelist:
10330 if not isinstance(pkg, Package):
10331 # a satisfied blocker
10335 if pkg.cpv not in cpv_map:
10336 cpv_map[pkg.cpv] = [pkg]
10338 for earlier_pkg in cpv_map[pkg.cpv]:
10339 self._digraph.add(earlier_pkg, pkg,
10340 priority=DepPriority(buildtime=True))
10341 cpv_map[pkg.cpv].append(pkg)
10343 class _pkg_failure(portage.exception.PortageException):
10345 An instance of this class is raised by unmerge() when
10346 an uninstallation fails.
10349 def __init__(self, *pargs):
10350 portage.exception.PortageException.__init__(self, pargs)
10352 self.status = pargs[0]
10354 def _schedule_fetch(self, fetcher):
10356 Schedule a fetcher on the fetch queue, in order to
10357 serialize access to the fetch log.
10359 self._task_queues.fetch.addFront(fetcher)
10361 def _schedule_setup(self, setup_phase):
10363 Schedule a setup phase on the merge queue, in order to
10364 serialize unsandboxed access to the live filesystem.
10366 self._task_queues.merge.addFront(setup_phase)
10369 def _schedule_unpack(self, unpack_phase):
10371 Schedule an unpack phase on the unpack queue, in order
10372 to serialize $DISTDIR access for live ebuilds.
10374 self._task_queues.unpack.add(unpack_phase)
10376 def _find_blockers(self, new_pkg):
10378 Returns a callable which should be called only when
10379 the vdb lock has been acquired.
10381 def get_blockers():
10382 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10383 return get_blockers
10385 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10386 if self._opts_ignore_blockers.intersection(self.myopts):
10389 # Call gc.collect() here to avoid heap overflow that
10390 # triggers 'Cannot allocate memory' errors (reported
10391 # with python-2.5).
10395 blocker_db = self._blocker_db[new_pkg.root]
10397 blocker_dblinks = []
10398 for blocking_pkg in blocker_db.findInstalledBlockers(
10399 new_pkg, acquire_lock=acquire_lock):
10400 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10402 if new_pkg.cpv == blocking_pkg.cpv:
10404 blocker_dblinks.append(portage.dblink(
10405 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10406 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10407 vartree=self.trees[blocking_pkg.root]["vartree"]))
10411 return blocker_dblinks
10413 def _dblink_pkg(self, pkg_dblink):
10414 cpv = pkg_dblink.mycpv
10415 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10416 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10417 installed = type_name == "installed"
10418 return self._pkg(cpv, type_name, root_config, installed=installed)
10420 def _append_to_log_path(self, log_path, msg):
10421 f = open(log_path, 'a')
10427 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10429 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10432 background = self._background
10434 if background and log_path is not None:
10435 log_file = open(log_path, 'a')
10440 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10442 if log_file is not None:
10445 def _dblink_emerge_log(self, msg):
10446 self._logger.log(msg)
10448 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10449 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10450 background = self._background
10452 if log_path is None:
10453 if not (background and level < logging.WARN):
10454 portage.util.writemsg_level(msg,
10455 level=level, noiselevel=noiselevel)
10458 portage.util.writemsg_level(msg,
10459 level=level, noiselevel=noiselevel)
10460 self._append_to_log_path(log_path, msg)
10462 def _dblink_ebuild_phase(self,
10463 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10465 Using this callback for merge phases allows the scheduler
10466 to run while these phases execute asynchronously, and allows
10467 the scheduler control output handling.
10470 scheduler = self._sched_iface
10471 settings = pkg_dblink.settings
10472 pkg = self._dblink_pkg(pkg_dblink)
10473 background = self._background
10474 log_path = settings.get("PORTAGE_LOG_FILE")
10476 ebuild_phase = EbuildPhase(background=background,
10477 pkg=pkg, phase=phase, scheduler=scheduler,
10478 settings=settings, tree=pkg_dblink.treetype)
10479 ebuild_phase.start()
10480 ebuild_phase.wait()
10482 return ebuild_phase.returncode
10484 def _generate_digests(self):
10486 Generate digests if necessary for --digests or FEATURES=digest.
10487 In order to avoid interference, this must done before parallel
10491 if '--fetchonly' in self.myopts:
10494 digest = '--digest' in self.myopts
10496 for pkgsettings in self.pkgsettings.itervalues():
10497 if 'digest' in pkgsettings.features:
10504 for x in self._mergelist:
10505 if not isinstance(x, Package) or \
10506 x.type_name != 'ebuild' or \
10507 x.operation != 'merge':
10509 pkgsettings = self.pkgsettings[x.root]
10510 if '--digest' not in self.myopts and \
10511 'digest' not in pkgsettings.features:
10513 portdb = x.root_config.trees['porttree'].dbapi
10514 ebuild_path = portdb.findname(x.cpv)
10515 if not ebuild_path:
10517 "!!! Could not locate ebuild for '%s'.\n" \
10518 % x.cpv, level=logging.ERROR, noiselevel=-1)
10520 pkgsettings['O'] = os.path.dirname(ebuild_path)
10521 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10523 "!!! Unable to generate manifest for '%s'.\n" \
10524 % x.cpv, level=logging.ERROR, noiselevel=-1)
10529 def _check_manifests(self):
10530 # Verify all the manifests now so that the user is notified of failure
10531 # as soon as possible.
10532 if "strict" not in self.settings.features or \
10533 "--fetchonly" in self.myopts or \
10534 "--fetch-all-uri" in self.myopts:
10537 shown_verifying_msg = False
10538 quiet_settings = {}
10539 for myroot, pkgsettings in self.pkgsettings.iteritems():
10540 quiet_config = portage.config(clone=pkgsettings)
10541 quiet_config["PORTAGE_QUIET"] = "1"
10542 quiet_config.backup_changes("PORTAGE_QUIET")
10543 quiet_settings[myroot] = quiet_config
10546 for x in self._mergelist:
10547 if not isinstance(x, Package) or \
10548 x.type_name != "ebuild":
10551 if not shown_verifying_msg:
10552 shown_verifying_msg = True
10553 self._status_msg("Verifying ebuild manifests")
10555 root_config = x.root_config
10556 portdb = root_config.trees["porttree"].dbapi
10557 quiet_config = quiet_settings[root_config.root]
10558 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10559 if not portage.digestcheck([], quiet_config, strict=True):
10564 def _add_prefetchers(self):
10566 if not self._parallel_fetch:
10569 if self._parallel_fetch:
10570 self._status_msg("Starting parallel fetch")
10572 prefetchers = self._prefetchers
10573 getbinpkg = "--getbinpkg" in self.myopts
10575 # In order to avoid "waiting for lock" messages
10576 # at the beginning, which annoy users, never
10577 # spawn a prefetcher for the first package.
10578 for pkg in self._mergelist[1:]:
10579 prefetcher = self._create_prefetcher(pkg)
10580 if prefetcher is not None:
10581 self._task_queues.fetch.add(prefetcher)
10582 prefetchers[pkg] = prefetcher
10584 def _create_prefetcher(self, pkg):
10586 @return: a prefetcher, or None if not applicable
10590 if not isinstance(pkg, Package):
10593 elif pkg.type_name == "ebuild":
10595 prefetcher = EbuildFetcher(background=True,
10596 config_pool=self._ConfigPool(pkg.root,
10597 self._allocate_config, self._deallocate_config),
10598 fetchonly=1, logfile=self._fetch_log,
10599 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10601 elif pkg.type_name == "binary" and \
10602 "--getbinpkg" in self.myopts and \
10603 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10605 prefetcher = BinpkgPrefetcher(background=True,
10606 pkg=pkg, scheduler=self._sched_iface)
10610 def _is_restart_scheduled(self):
10612 Check if the merge list contains a replacement
10613 for the current running instance, that will result
10614 in restart after merge.
10616 @returns: True if a restart is scheduled, False otherwise.
10618 if self._opts_no_restart.intersection(self.myopts):
10621 mergelist = self._mergelist
10623 for i, pkg in enumerate(mergelist):
10624 if self._is_restart_necessary(pkg) and \
10625 i != len(mergelist) - 1:
10630 def _is_restart_necessary(self, pkg):
10632 @return: True if merging the given package
10633 requires restart, False otherwise.
10636 # Figure out if we need a restart.
10637 if pkg.root == self._running_root.root and \
10638 portage.match_from_list(
10639 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10640 if self._running_portage:
10641 return pkg.cpv != self._running_portage.cpv
10645 def _restart_if_necessary(self, pkg):
10647 Use execv() to restart emerge. This happens
10648 if portage upgrades itself and there are
10649 remaining packages in the list.
10652 if self._opts_no_restart.intersection(self.myopts):
10655 if not self._is_restart_necessary(pkg):
10658 if pkg == self._mergelist[-1]:
10661 self._main_loop_cleanup()
10663 logger = self._logger
10664 pkg_count = self._pkg_count
10665 mtimedb = self._mtimedb
10666 bad_resume_opts = self._bad_resume_opts
10668 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10669 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10671 logger.log(" *** RESTARTING " + \
10672 "emerge via exec() after change of " + \
10673 "portage version.")
10675 mtimedb["resume"]["mergelist"].remove(list(pkg))
10677 portage.run_exitfuncs()
10678 mynewargv = [sys.argv[0], "--resume"]
10679 resume_opts = self.myopts.copy()
10680 # For automatic resume, we need to prevent
10681 # any of bad_resume_opts from leaking in
10682 # via EMERGE_DEFAULT_OPTS.
10683 resume_opts["--ignore-default-opts"] = True
10684 for myopt, myarg in resume_opts.iteritems():
10685 if myopt not in bad_resume_opts:
10687 mynewargv.append(myopt)
10689 mynewargv.append(myopt +"="+ str(myarg))
10690 # priority only needs to be adjusted on the first run
10691 os.environ["PORTAGE_NICENESS"] = "0"
10692 os.execv(mynewargv[0], mynewargv)
10696 if "--resume" in self.myopts:
10698 portage.writemsg_stdout(
10699 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10700 self._logger.log(" *** Resuming merge...")
10702 self._save_resume_list()
10705 self._background = self._background_mode()
10706 except self._unknown_internal_error:
10709 for root in self.trees:
10710 root_config = self.trees[root]["root_config"]
10712 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10713 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10714 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10715 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10716 if not tmpdir or not os.path.isdir(tmpdir):
10717 msg = "The directory specified in your " + \
10718 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10719 "does not exist. Please create this " + \
10720 "directory or correct your PORTAGE_TMPDIR setting."
10721 msg = textwrap.wrap(msg, 70)
10722 out = portage.output.EOutput()
10727 if self._background:
10728 root_config.settings.unlock()
10729 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10730 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10731 root_config.settings.lock()
10733 self.pkgsettings[root] = portage.config(
10734 clone=root_config.settings)
10736 rval = self._generate_digests()
10737 if rval != os.EX_OK:
10740 rval = self._check_manifests()
10741 if rval != os.EX_OK:
10744 keep_going = "--keep-going" in self.myopts
10745 fetchonly = self._build_opts.fetchonly
10746 mtimedb = self._mtimedb
10747 failed_pkgs = self._failed_pkgs
10750 rval = self._merge()
10751 if rval == os.EX_OK or fetchonly or not keep_going:
10753 if "resume" not in mtimedb:
10755 mergelist = self._mtimedb["resume"].get("mergelist")
10759 if not failed_pkgs:
10762 for failed_pkg in failed_pkgs:
10763 mergelist.remove(list(failed_pkg.pkg))
10765 self._failed_pkgs_all.extend(failed_pkgs)
10771 if not self._calc_resume_list():
10774 clear_caches(self.trees)
10775 if not self._mergelist:
10778 self._save_resume_list()
10779 self._pkg_count.curval = 0
10780 self._pkg_count.maxval = len([x for x in self._mergelist \
10781 if isinstance(x, Package) and x.operation == "merge"])
10782 self._status_display.maxval = self._pkg_count.maxval
10784 self._logger.log(" *** Finished. Cleaning up...")
10787 self._failed_pkgs_all.extend(failed_pkgs)
10790 background = self._background
10791 failure_log_shown = False
10792 if background and len(self._failed_pkgs_all) == 1:
10793 # If only one package failed then just show it's
10794 # whole log for easy viewing.
10795 failed_pkg = self._failed_pkgs_all[-1]
10796 build_dir = failed_pkg.build_dir
10799 log_paths = [failed_pkg.build_log]
10801 log_path = self._locate_failure_log(failed_pkg)
10802 if log_path is not None:
10804 log_file = open(log_path)
10808 if log_file is not None:
10810 for line in log_file:
10811 writemsg_level(line, noiselevel=-1)
10814 failure_log_shown = True
10816 # Dump mod_echo output now since it tends to flood the terminal.
10817 # This allows us to avoid having more important output, generated
10818 # later, from being swept away by the mod_echo output.
10819 mod_echo_output = _flush_elog_mod_echo()
10821 if background and not failure_log_shown and \
10822 self._failed_pkgs_all and \
10823 self._failed_pkgs_die_msgs and \
10824 not mod_echo_output:
10826 printer = portage.output.EOutput()
10827 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10829 if mysettings["ROOT"] != "/":
10830 root_msg = " merged to %s" % mysettings["ROOT"]
10832 printer.einfo("Error messages for package %s%s:" % \
10833 (colorize("INFORM", key), root_msg))
10835 for phase in portage.const.EBUILD_PHASES:
10836 if phase not in logentries:
10838 for msgtype, msgcontent in logentries[phase]:
10839 if isinstance(msgcontent, basestring):
10840 msgcontent = [msgcontent]
10841 for line in msgcontent:
10842 printer.eerror(line.strip("\n"))
10844 if self._post_mod_echo_msgs:
10845 for msg in self._post_mod_echo_msgs:
10848 if len(self._failed_pkgs_all) > 1 or \
10849 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10850 if len(self._failed_pkgs_all) > 1:
10851 msg = "The following %d packages have " % \
10852 len(self._failed_pkgs_all) + \
10853 "failed to build or install:"
10855 msg = "The following package has " + \
10856 "failed to build or install:"
10857 prefix = bad(" * ")
10858 writemsg(prefix + "\n", noiselevel=-1)
10859 from textwrap import wrap
10860 for line in wrap(msg, 72):
10861 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10862 writemsg(prefix + "\n", noiselevel=-1)
10863 for failed_pkg in self._failed_pkgs_all:
10864 writemsg("%s\t%s\n" % (prefix,
10865 colorize("INFORM", str(failed_pkg.pkg))),
10867 writemsg(prefix + "\n", noiselevel=-1)
10871 def _elog_listener(self, mysettings, key, logentries, fulltext):
10872 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10874 self._failed_pkgs_die_msgs.append(
10875 (mysettings, key, errors))
10877 def _locate_failure_log(self, failed_pkg):
10879 build_dir = failed_pkg.build_dir
10882 log_paths = [failed_pkg.build_log]
10884 for log_path in log_paths:
10889 log_size = os.stat(log_path).st_size
10900 def _add_packages(self):
10901 pkg_queue = self._pkg_queue
10902 for pkg in self._mergelist:
10903 if isinstance(pkg, Package):
10904 pkg_queue.append(pkg)
10905 elif isinstance(pkg, Blocker):
10908 def _system_merge_started(self, merge):
10910 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10912 graph = self._digraph
10915 pkg = merge.merge.pkg
10917 # Skip this if $ROOT != / since it shouldn't matter if there
10918 # are unsatisfied system runtime deps in this case.
10919 if pkg.root != '/':
10922 completed_tasks = self._completed_tasks
10923 unsatisfied = self._unsatisfied_system_deps
10925 def ignore_non_runtime_or_satisfied(priority):
10927 Ignore non-runtime and satisfied runtime priorities.
10929 if isinstance(priority, DepPriority) and \
10930 not priority.satisfied and \
10931 (priority.runtime or priority.runtime_post):
10935 # When checking for unsatisfied runtime deps, only check
10936 # direct deps since indirect deps are checked when the
10937 # corresponding parent is merged.
10938 for child in graph.child_nodes(pkg,
10939 ignore_priority=ignore_non_runtime_or_satisfied):
10940 if not isinstance(child, Package) or \
10941 child.operation == 'uninstall':
10945 if child.operation == 'merge' and \
10946 child not in completed_tasks:
10947 unsatisfied.add(child)
10949 def _merge_wait_exit_handler(self, task):
10950 self._merge_wait_scheduled.remove(task)
10951 self._merge_exit(task)
10953 def _merge_exit(self, merge):
10954 self._do_merge_exit(merge)
10955 self._deallocate_config(merge.merge.settings)
10956 if merge.returncode == os.EX_OK and \
10957 not merge.merge.pkg.installed:
10958 self._status_display.curval += 1
10959 self._status_display.merges = len(self._task_queues.merge)
10962 def _do_merge_exit(self, merge):
10963 pkg = merge.merge.pkg
10964 if merge.returncode != os.EX_OK:
10965 settings = merge.merge.settings
10966 build_dir = settings.get("PORTAGE_BUILDDIR")
10967 build_log = settings.get("PORTAGE_LOG_FILE")
10969 self._failed_pkgs.append(self._failed_pkg(
10970 build_dir=build_dir, build_log=build_log,
10972 returncode=merge.returncode))
10973 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10975 self._status_display.failed = len(self._failed_pkgs)
10978 self._task_complete(pkg)
10979 pkg_to_replace = merge.merge.pkg_to_replace
10980 if pkg_to_replace is not None:
10981 # When a package is replaced, mark it's uninstall
10982 # task complete (if any).
10983 uninst_hash_key = \
10984 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10985 self._task_complete(uninst_hash_key)
10990 self._restart_if_necessary(pkg)
10992 # Call mtimedb.commit() after each merge so that
10993 # --resume still works after being interrupted
10994 # by reboot, sigkill or similar.
10995 mtimedb = self._mtimedb
10996 mtimedb["resume"]["mergelist"].remove(list(pkg))
10997 if not mtimedb["resume"]["mergelist"]:
10998 del mtimedb["resume"]
11001 def _build_exit(self, build):
11002 if build.returncode == os.EX_OK:
11004 merge = PackageMerge(merge=build)
11005 if not build.build_opts.buildpkgonly and \
11006 build.pkg in self._deep_system_deps:
11007 # Since dependencies on system packages are frequently
11008 # unspecified, merge them only when no builds are executing.
11009 self._merge_wait_queue.append(merge)
11010 merge.addStartListener(self._system_merge_started)
11012 merge.addExitListener(self._merge_exit)
11013 self._task_queues.merge.add(merge)
11014 self._status_display.merges = len(self._task_queues.merge)
11016 settings = build.settings
11017 build_dir = settings.get("PORTAGE_BUILDDIR")
11018 build_log = settings.get("PORTAGE_LOG_FILE")
11020 self._failed_pkgs.append(self._failed_pkg(
11021 build_dir=build_dir, build_log=build_log,
11023 returncode=build.returncode))
11024 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11026 self._status_display.failed = len(self._failed_pkgs)
11027 self._deallocate_config(build.settings)
11029 self._status_display.running = self._jobs
11032 def _extract_exit(self, build):
11033 self._build_exit(build)
11035 def _task_complete(self, pkg):
11036 self._completed_tasks.add(pkg)
11037 self._unsatisfied_system_deps.discard(pkg)
11038 self._choose_pkg_return_early = False
11042 self._add_prefetchers()
11043 self._add_packages()
11044 pkg_queue = self._pkg_queue
11045 failed_pkgs = self._failed_pkgs
11046 portage.locks._quiet = self._background
11047 portage.elog._emerge_elog_listener = self._elog_listener
11053 self._main_loop_cleanup()
11054 portage.locks._quiet = False
11055 portage.elog._emerge_elog_listener = None
11057 rval = failed_pkgs[-1].returncode
11061 def _main_loop_cleanup(self):
11062 del self._pkg_queue[:]
11063 self._completed_tasks.clear()
11064 self._deep_system_deps.clear()
11065 self._unsatisfied_system_deps.clear()
11066 self._choose_pkg_return_early = False
11067 self._status_display.reset()
11068 self._digraph = None
11069 self._task_queues.fetch.clear()
11071 def _choose_pkg(self):
11073 Choose a task that has all it's dependencies satisfied.
11076 if self._choose_pkg_return_early:
11079 if self._digraph is None:
11080 if (self._jobs or self._task_queues.merge) and \
11081 not ("--nodeps" in self.myopts and \
11082 (self._max_jobs is True or self._max_jobs > 1)):
11083 self._choose_pkg_return_early = True
11085 return self._pkg_queue.pop(0)
11087 if not (self._jobs or self._task_queues.merge):
11088 return self._pkg_queue.pop(0)
11090 self._prune_digraph()
11093 later = set(self._pkg_queue)
11094 for pkg in self._pkg_queue:
11096 if not self._dependent_on_scheduled_merges(pkg, later):
11100 if chosen_pkg is not None:
11101 self._pkg_queue.remove(chosen_pkg)
11103 if chosen_pkg is None:
11104 # There's no point in searching for a package to
11105 # choose until at least one of the existing jobs
11107 self._choose_pkg_return_early = True
11111 def _dependent_on_scheduled_merges(self, pkg, later):
11113 Traverse the subgraph of the given packages deep dependencies
11114 to see if it contains any scheduled merges.
11115 @param pkg: a package to check dependencies for
11117 @param later: packages for which dependence should be ignored
11118 since they will be merged later than pkg anyway and therefore
11119 delaying the merge of pkg will not result in a more optimal
11123 @returns: True if the package is dependent, False otherwise.
11126 graph = self._digraph
11127 completed_tasks = self._completed_tasks
11130 traversed_nodes = set([pkg])
11131 direct_deps = graph.child_nodes(pkg)
11132 node_stack = direct_deps
11133 direct_deps = frozenset(direct_deps)
11135 node = node_stack.pop()
11136 if node in traversed_nodes:
11138 traversed_nodes.add(node)
11139 if not ((node.installed and node.operation == "nomerge") or \
11140 (node.operation == "uninstall" and \
11141 node not in direct_deps) or \
11142 node in completed_tasks or \
11146 node_stack.extend(graph.child_nodes(node))
11150 def _allocate_config(self, root):
11152 Allocate a unique config instance for a task in order
11153 to prevent interference between parallel tasks.
11155 if self._config_pool[root]:
11156 temp_settings = self._config_pool[root].pop()
11158 temp_settings = portage.config(clone=self.pkgsettings[root])
11159 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11160 # performance reasons, call it here to make sure all settings from the
11161 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11162 temp_settings.reload()
11163 temp_settings.reset()
11164 return temp_settings
11166 def _deallocate_config(self, settings):
11167 self._config_pool[settings["ROOT"]].append(settings)
11169 def _main_loop(self):
11171 # Only allow 1 job max if a restart is scheduled
11172 # due to portage update.
11173 if self._is_restart_scheduled() or \
11174 self._opts_no_background.intersection(self.myopts):
11175 self._set_max_jobs(1)
11177 merge_queue = self._task_queues.merge
11179 while self._schedule():
11180 if self._poll_event_handlers:
11185 if not (self._jobs or merge_queue):
11187 if self._poll_event_handlers:
11190 def _keep_scheduling(self):
11191 return bool(self._pkg_queue and \
11192 not (self._failed_pkgs and not self._build_opts.fetchonly))
11194 def _schedule_tasks(self):
11196 # When the number of jobs drops to zero, process all waiting merges.
11197 if not self._jobs and self._merge_wait_queue:
11198 for task in self._merge_wait_queue:
11199 task.addExitListener(self._merge_wait_exit_handler)
11200 self._task_queues.merge.add(task)
11201 self._status_display.merges = len(self._task_queues.merge)
11202 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11203 del self._merge_wait_queue[:]
11205 self._schedule_tasks_imp()
11206 self._status_display.display()
11209 for q in self._task_queues.values():
11213 # Cancel prefetchers if they're the only reason
11214 # the main poll loop is still running.
11215 if self._failed_pkgs and not self._build_opts.fetchonly and \
11216 not (self._jobs or self._task_queues.merge) and \
11217 self._task_queues.fetch:
11218 self._task_queues.fetch.clear()
11222 self._schedule_tasks_imp()
11223 self._status_display.display()
11225 return self._keep_scheduling()
11227 def _job_delay(self):
11230 @returns: True if job scheduling should be delayed, False otherwise.
11233 if self._jobs and self._max_load is not None:
11235 current_time = time.time()
11237 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11238 if delay > self._job_delay_max:
11239 delay = self._job_delay_max
11240 if (current_time - self._previous_job_start_time) < delay:
11245 def _schedule_tasks_imp(self):
11248 @returns: True if state changed, False otherwise.
11255 if not self._keep_scheduling():
11256 return bool(state_change)
11258 if self._choose_pkg_return_early or \
11259 self._merge_wait_scheduled or \
11260 (self._jobs and self._unsatisfied_system_deps) or \
11261 not self._can_add_job() or \
11263 return bool(state_change)
11265 pkg = self._choose_pkg()
11267 return bool(state_change)
11271 if not pkg.installed:
11272 self._pkg_count.curval += 1
11274 task = self._task(pkg)
11277 merge = PackageMerge(merge=task)
11278 merge.addExitListener(self._merge_exit)
11279 self._task_queues.merge.add(merge)
11283 self._previous_job_start_time = time.time()
11284 self._status_display.running = self._jobs
11285 task.addExitListener(self._extract_exit)
11286 self._task_queues.jobs.add(task)
11290 self._previous_job_start_time = time.time()
11291 self._status_display.running = self._jobs
11292 task.addExitListener(self._build_exit)
11293 self._task_queues.jobs.add(task)
11295 return bool(state_change)
11297 def _task(self, pkg):
11299 pkg_to_replace = None
11300 if pkg.operation != "uninstall":
11301 vardb = pkg.root_config.trees["vartree"].dbapi
11302 previous_cpv = vardb.match(pkg.slot_atom)
11304 previous_cpv = previous_cpv.pop()
11305 pkg_to_replace = self._pkg(previous_cpv,
11306 "installed", pkg.root_config, installed=True)
11308 task = MergeListItem(args_set=self._args_set,
11309 background=self._background, binpkg_opts=self._binpkg_opts,
11310 build_opts=self._build_opts,
11311 config_pool=self._ConfigPool(pkg.root,
11312 self._allocate_config, self._deallocate_config),
11313 emerge_opts=self.myopts,
11314 find_blockers=self._find_blockers(pkg), logger=self._logger,
11315 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11316 pkg_to_replace=pkg_to_replace,
11317 prefetcher=self._prefetchers.get(pkg),
11318 scheduler=self._sched_iface,
11319 settings=self._allocate_config(pkg.root),
11320 statusMessage=self._status_msg,
11321 world_atom=self._world_atom)
11325 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11326 pkg = failed_pkg.pkg
11327 msg = "%s to %s %s" % \
11328 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11329 if pkg.root != "/":
11330 msg += " %s %s" % (preposition, pkg.root)
11332 log_path = self._locate_failure_log(failed_pkg)
11333 if log_path is not None:
11334 msg += ", Log file:"
11335 self._status_msg(msg)
11337 if log_path is not None:
11338 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11340 def _status_msg(self, msg):
11342 Display a brief status message (no newlines) in the status display.
11343 This is called by tasks to provide feedback to the user. This
11344 delegates the resposibility of generating \r and \n control characters,
11345 to guarantee that lines are created or erased when necessary and
11349 @param msg: a brief status message (no newlines allowed)
11351 if not self._background:
11352 writemsg_level("\n")
11353 self._status_display.displayMessage(msg)
11355 def _save_resume_list(self):
11357 Do this before verifying the ebuild Manifests since it might
11358 be possible for the user to use --resume --skipfirst get past
11359 a non-essential package with a broken digest.
11361 mtimedb = self._mtimedb
11362 mtimedb["resume"]["mergelist"] = [list(x) \
11363 for x in self._mergelist \
11364 if isinstance(x, Package) and x.operation == "merge"]
11368 def _calc_resume_list(self):
11370 Use the current resume list to calculate a new one,
11371 dropping any packages with unsatisfied deps.
11373 @returns: True if successful, False otherwise.
11375 print colorize("GOOD", "*** Resuming merge...")
11377 if self._show_list():
11378 if "--tree" in self.myopts:
11379 portage.writemsg_stdout("\n" + \
11380 darkgreen("These are the packages that " + \
11381 "would be merged, in reverse order:\n\n"))
11384 portage.writemsg_stdout("\n" + \
11385 darkgreen("These are the packages that " + \
11386 "would be merged, in order:\n\n"))
11388 show_spinner = "--quiet" not in self.myopts and \
11389 "--nodeps" not in self.myopts
11392 print "Calculating dependencies ",
11394 myparams = create_depgraph_params(self.myopts, None)
11398 success, mydepgraph, dropped_tasks = resume_depgraph(
11399 self.settings, self.trees, self._mtimedb, self.myopts,
11400 myparams, self._spinner)
11401 except depgraph.UnsatisfiedResumeDep, exc:
11402 # rename variable to avoid python-3.0 error:
11403 # SyntaxError: can not delete variable 'e' referenced in nested
11406 mydepgraph = e.depgraph
11407 dropped_tasks = set()
11410 print "\b\b... done!"
11413 def unsatisfied_resume_dep_msg():
11414 mydepgraph.display_problems()
11415 out = portage.output.EOutput()
11416 out.eerror("One or more packages are either masked or " + \
11417 "have missing dependencies:")
11420 show_parents = set()
11421 for dep in e.value:
11422 if dep.parent in show_parents:
11424 show_parents.add(dep.parent)
11425 if dep.atom is None:
11426 out.eerror(indent + "Masked package:")
11427 out.eerror(2 * indent + str(dep.parent))
11430 out.eerror(indent + str(dep.atom) + " pulled in by:")
11431 out.eerror(2 * indent + str(dep.parent))
11433 msg = "The resume list contains packages " + \
11434 "that are either masked or have " + \
11435 "unsatisfied dependencies. " + \
11436 "Please restart/continue " + \
11437 "the operation manually, or use --skipfirst " + \
11438 "to skip the first package in the list and " + \
11439 "any other packages that may be " + \
11440 "masked or have missing dependencies."
11441 for line in textwrap.wrap(msg, 72):
11443 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11446 if success and self._show_list():
11447 mylist = mydepgraph.altlist()
11449 if "--tree" in self.myopts:
11451 mydepgraph.display(mylist, favorites=self._favorites)
11454 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11456 mydepgraph.display_problems()
11458 mylist = mydepgraph.altlist()
11459 mydepgraph.break_refs(mylist)
11460 mydepgraph.break_refs(dropped_tasks)
11461 self._mergelist = mylist
11462 self._set_digraph(mydepgraph.schedulerGraph())
11465 for task in dropped_tasks:
11466 if not (isinstance(task, Package) and task.operation == "merge"):
11469 msg = "emerge --keep-going:" + \
11471 if pkg.root != "/":
11472 msg += " for %s" % (pkg.root,)
11473 msg += " dropped due to unsatisfied dependency."
11474 for line in textwrap.wrap(msg, msg_width):
11475 eerror(line, phase="other", key=pkg.cpv)
11476 settings = self.pkgsettings[pkg.root]
11477 # Ensure that log collection from $T is disabled inside
11478 # elog_process(), since any logs that might exist are
11480 settings.pop("T", None)
11481 portage.elog.elog_process(pkg.cpv, settings)
11482 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11486 def _show_list(self):
11487 myopts = self.myopts
11488 if "--quiet" not in myopts and \
11489 ("--ask" in myopts or "--tree" in myopts or \
11490 "--verbose" in myopts):
11494 def _world_atom(self, pkg):
11496 Add the package to the world file, but only if
11497 it's supposed to be added. Otherwise, do nothing.
11500 if set(("--buildpkgonly", "--fetchonly",
11502 "--oneshot", "--onlydeps",
11503 "--pretend")).intersection(self.myopts):
11506 if pkg.root != self.target_root:
11509 args_set = self._args_set
11510 if not args_set.findAtomForPackage(pkg):
11513 logger = self._logger
11514 pkg_count = self._pkg_count
11515 root_config = pkg.root_config
11516 world_set = root_config.sets["world"]
11517 world_locked = False
11518 if hasattr(world_set, "lock"):
11520 world_locked = True
11523 if hasattr(world_set, "load"):
11524 world_set.load() # maybe it's changed on disk
11526 atom = create_world_atom(pkg, args_set, root_config)
11528 if hasattr(world_set, "add"):
11529 self._status_msg(('Recording %s in "world" ' + \
11530 'favorites file...') % atom)
11531 logger.log(" === (%s of %s) Updating world file (%s)" % \
11532 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11533 world_set.add(atom)
11535 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11536 (atom,), level=logging.WARN, noiselevel=-1)
11541 def _pkg(self, cpv, type_name, root_config, installed=False):
11543 Get a package instance from the cache, or create a new
11544 one if necessary. Raises KeyError from aux_get if it
11545 failures for some reason (package does not exist or is
11548 operation = "merge"
11550 operation = "nomerge"
11552 if self._digraph is not None:
11553 # Reuse existing instance when available.
11554 pkg = self._digraph.get(
11555 (type_name, root_config.root, cpv, operation))
11556 if pkg is not None:
11559 tree_type = depgraph.pkg_tree_map[type_name]
11560 db = root_config.trees[tree_type].dbapi
11561 db_keys = list(self.trees[root_config.root][
11562 tree_type].dbapi._aux_cache_keys)
11563 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11564 pkg = Package(cpv=cpv, metadata=metadata,
11565 root_config=root_config, installed=installed)
11566 if type_name == "ebuild":
11567 settings = self.pkgsettings[root_config.root]
11568 settings.setcpv(pkg)
11569 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11570 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11574 class MetadataRegen(PollScheduler):
11576 def __init__(self, portdb, cp_iter=None, max_jobs=None, max_load=None):
11577 PollScheduler.__init__(self)
11578 self._portdb = portdb
11579 self._global_cleanse = False
11580 if cp_iter is None:
11581 cp_iter = self._iter_every_cp()
11582 # We can globally cleanse stale cache only if we
11583 # iterate over every single cp.
11584 self._global_cleanse = True
11585 self._cp_iter = cp_iter
11587 if max_jobs is None:
11590 self._max_jobs = max_jobs
11591 self._max_load = max_load
11592 self._sched_iface = self._sched_iface_class(
11593 register=self._register,
11594 schedule=self._schedule_wait,
11595 unregister=self._unregister)
11597 self._valid_pkgs = set()
11598 self._cp_set = set()
11599 self._process_iter = self._iter_metadata_processes()
11600 self.returncode = os.EX_OK
11601 self._error_count = 0
11603 def _iter_every_cp(self):
11604 every_cp = self._portdb.cp_all()
11605 every_cp.sort(reverse=True)
11608 yield every_cp.pop()
11612 def _iter_metadata_processes(self):
11613 portdb = self._portdb
11614 valid_pkgs = self._valid_pkgs
11615 cp_set = self._cp_set
11617 for cp in self._cp_iter:
11619 portage.writemsg_stdout("Processing %s\n" % cp)
11620 cpv_list = portdb.cp_list(cp)
11621 for cpv in cpv_list:
11622 valid_pkgs.add(cpv)
11623 ebuild_path, repo_path = portdb.findname2(cpv)
11624 metadata_process = portdb._metadata_process(
11625 cpv, ebuild_path, repo_path)
11626 if metadata_process is None:
11628 yield metadata_process
11632 portdb = self._portdb
11633 from portage.cache.cache_errors import CacheError
11636 while self._schedule():
11642 if self._global_cleanse:
11643 for mytree in portdb.porttrees:
11645 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11646 except CacheError, e:
11647 portage.writemsg("Error listing cache entries for " + \
11648 "'%s': %s, continuing...\n" % (mytree, e),
11654 cp_set = self._cp_set
11655 cpv_getkey = portage.cpv_getkey
11656 for mytree in portdb.porttrees:
11658 dead_nodes[mytree] = set(cpv for cpv in \
11659 portdb.auxdb[mytree].iterkeys() \
11660 if cpv_getkey(cpv) in cp_set)
11661 except CacheError, e:
11662 portage.writemsg("Error listing cache entries for " + \
11663 "'%s': %s, continuing...\n" % (mytree, e),
11670 for y in self._valid_pkgs:
11671 for mytree in portdb.porttrees:
11672 if portdb.findname2(y, mytree=mytree)[0]:
11673 dead_nodes[mytree].discard(y)
11675 for mytree, nodes in dead_nodes.iteritems():
11676 auxdb = portdb.auxdb[mytree]
11680 except (KeyError, CacheError):
11683 def _schedule_tasks(self):
11686 @returns: True if there may be remaining tasks to schedule,
11689 while self._can_add_job():
11691 metadata_process = self._process_iter.next()
11692 except StopIteration:
11696 metadata_process.scheduler = self._sched_iface
11697 metadata_process.addExitListener(self._metadata_exit)
11698 metadata_process.start()
11701 def _metadata_exit(self, metadata_process):
11703 if metadata_process.returncode != os.EX_OK:
11704 self.returncode = 1
11705 self._error_count += 1
11706 self._valid_pkgs.discard(metadata_process.cpv)
11707 portage.writemsg("Error processing %s, continuing...\n" % \
11708 (metadata_process.cpv,))
11711 class UninstallFailure(portage.exception.PortageException):
11713 An instance of this class is raised by unmerge() when
11714 an uninstallation fails.
11717 def __init__(self, *pargs):
11718 portage.exception.PortageException.__init__(self, pargs)
11720 self.status = pargs[0]
11722 def unmerge(root_config, myopts, unmerge_action,
11723 unmerge_files, ldpath_mtimes, autoclean=0,
11724 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11725 scheduler=None, writemsg_level=portage.util.writemsg_level):
11727 quiet = "--quiet" in myopts
11728 settings = root_config.settings
11729 sets = root_config.sets
11730 vartree = root_config.trees["vartree"]
11731 candidate_catpkgs=[]
11733 xterm_titles = "notitles" not in settings.features
11734 out = portage.output.EOutput()
11736 db_keys = list(vartree.dbapi._aux_cache_keys)
11739 pkg = pkg_cache.get(cpv)
11741 pkg = Package(cpv=cpv, installed=True,
11742 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11743 root_config=root_config,
11744 type_name="installed")
11745 pkg_cache[cpv] = pkg
11748 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11750 # At least the parent needs to exist for the lock file.
11751 portage.util.ensure_dirs(vdb_path)
11752 except portage.exception.PortageException:
11756 if os.access(vdb_path, os.W_OK):
11757 vdb_lock = portage.locks.lockdir(vdb_path)
11758 realsyslist = sets["system"].getAtoms()
11760 for x in realsyslist:
11761 mycp = portage.dep_getkey(x)
11762 if mycp in settings.getvirtuals():
11764 for provider in settings.getvirtuals()[mycp]:
11765 if vartree.dbapi.match(provider):
11766 providers.append(provider)
11767 if len(providers) == 1:
11768 syslist.extend(providers)
11770 syslist.append(mycp)
11772 mysettings = portage.config(clone=settings)
11774 if not unmerge_files:
11775 if unmerge_action == "unmerge":
11777 print bold("emerge unmerge") + " can only be used with specific package names"
11783 localtree = vartree
11784 # process all arguments and add all
11785 # valid db entries to candidate_catpkgs
11787 if not unmerge_files:
11788 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11790 #we've got command-line arguments
11791 if not unmerge_files:
11792 print "\nNo packages to unmerge have been provided.\n"
11794 for x in unmerge_files:
11795 arg_parts = x.split('/')
11796 if x[0] not in [".","/"] and \
11797 arg_parts[-1][-7:] != ".ebuild":
11798 #possible cat/pkg or dep; treat as such
11799 candidate_catpkgs.append(x)
11800 elif unmerge_action in ["prune","clean"]:
11801 print "\n!!! Prune and clean do not accept individual" + \
11802 " ebuilds as arguments;\n skipping.\n"
11805 # it appears that the user is specifying an installed
11806 # ebuild and we're in "unmerge" mode, so it's ok.
11807 if not os.path.exists(x):
11808 print "\n!!! The path '"+x+"' doesn't exist.\n"
11811 absx = os.path.abspath(x)
11812 sp_absx = absx.split("/")
11813 if sp_absx[-1][-7:] == ".ebuild":
11815 absx = "/".join(sp_absx)
11817 sp_absx_len = len(sp_absx)
11819 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11820 vdb_len = len(vdb_path)
11822 sp_vdb = vdb_path.split("/")
11823 sp_vdb_len = len(sp_vdb)
11825 if not os.path.exists(absx+"/CONTENTS"):
11826 print "!!! Not a valid db dir: "+str(absx)
11829 if sp_absx_len <= sp_vdb_len:
11830 # The Path is shorter... so it can't be inside the vdb.
11833 print "\n!!!",x,"cannot be inside "+ \
11834 vdb_path+"; aborting.\n"
11837 for idx in range(0,sp_vdb_len):
11838 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11841 print "\n!!!", x, "is not inside "+\
11842 vdb_path+"; aborting.\n"
11845 print "="+"/".join(sp_absx[sp_vdb_len:])
11846 candidate_catpkgs.append(
11847 "="+"/".join(sp_absx[sp_vdb_len:]))
11850 if (not "--quiet" in myopts):
11852 if settings["ROOT"] != "/":
11853 writemsg_level(darkgreen(newline+ \
11854 ">>> Using system located in ROOT tree %s\n" % \
11857 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11858 not ("--quiet" in myopts):
11859 writemsg_level(darkgreen(newline+\
11860 ">>> These are the packages that would be unmerged:\n"))
11862 # Preservation of order is required for --depclean and --prune so
11863 # that dependencies are respected. Use all_selected to eliminate
11864 # duplicate packages since the same package may be selected by
11867 all_selected = set()
11868 for x in candidate_catpkgs:
11869 # cycle through all our candidate deps and determine
11870 # what will and will not get unmerged
11872 mymatch = vartree.dbapi.match(x)
11873 except portage.exception.AmbiguousPackageName, errpkgs:
11874 print "\n\n!!! The short ebuild name \"" + \
11875 x + "\" is ambiguous. Please specify"
11876 print "!!! one of the following fully-qualified " + \
11877 "ebuild names instead:\n"
11878 for i in errpkgs[0]:
11879 print " " + green(i)
11883 if not mymatch and x[0] not in "<>=~":
11884 mymatch = localtree.dep_match(x)
11886 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11887 (x, unmerge_action), noiselevel=-1)
11891 {"protected": set(), "selected": set(), "omitted": set()})
11892 mykey = len(pkgmap) - 1
11893 if unmerge_action=="unmerge":
11895 if y not in all_selected:
11896 pkgmap[mykey]["selected"].add(y)
11897 all_selected.add(y)
11898 elif unmerge_action == "prune":
11899 if len(mymatch) == 1:
11901 best_version = mymatch[0]
11902 best_slot = vartree.getslot(best_version)
11903 best_counter = vartree.dbapi.cpv_counter(best_version)
11904 for mypkg in mymatch[1:]:
11905 myslot = vartree.getslot(mypkg)
11906 mycounter = vartree.dbapi.cpv_counter(mypkg)
11907 if (myslot == best_slot and mycounter > best_counter) or \
11908 mypkg == portage.best([mypkg, best_version]):
11909 if myslot == best_slot:
11910 if mycounter < best_counter:
11911 # On slot collision, keep the one with the
11912 # highest counter since it is the most
11913 # recently installed.
11915 best_version = mypkg
11917 best_counter = mycounter
11918 pkgmap[mykey]["protected"].add(best_version)
11919 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11920 if mypkg != best_version and mypkg not in all_selected)
11921 all_selected.update(pkgmap[mykey]["selected"])
11923 # unmerge_action == "clean"
11925 for mypkg in mymatch:
11926 if unmerge_action == "clean":
11927 myslot = localtree.getslot(mypkg)
11929 # since we're pruning, we don't care about slots
11930 # and put all the pkgs in together
11932 if myslot not in slotmap:
11933 slotmap[myslot] = {}
11934 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11936 for mypkg in vartree.dbapi.cp_list(
11937 portage.dep_getkey(mymatch[0])):
11938 myslot = vartree.getslot(mypkg)
11939 if myslot not in slotmap:
11940 slotmap[myslot] = {}
11941 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11943 for myslot in slotmap:
11944 counterkeys = slotmap[myslot].keys()
11945 if not counterkeys:
11948 pkgmap[mykey]["protected"].add(
11949 slotmap[myslot][counterkeys[-1]])
11950 del counterkeys[-1]
11952 for counter in counterkeys[:]:
11953 mypkg = slotmap[myslot][counter]
11954 if mypkg not in mymatch:
11955 counterkeys.remove(counter)
11956 pkgmap[mykey]["protected"].add(
11957 slotmap[myslot][counter])
11959 #be pretty and get them in order of merge:
11960 for ckey in counterkeys:
11961 mypkg = slotmap[myslot][ckey]
11962 if mypkg not in all_selected:
11963 pkgmap[mykey]["selected"].add(mypkg)
11964 all_selected.add(mypkg)
11965 # ok, now the last-merged package
11966 # is protected, and the rest are selected
11967 numselected = len(all_selected)
11968 if global_unmerge and not numselected:
11969 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11972 if not numselected:
11973 portage.writemsg_stdout(
11974 "\n>>> No packages selected for removal by " + \
11975 unmerge_action + "\n")
11979 vartree.dbapi.flush_cache()
11980 portage.locks.unlockdir(vdb_lock)
11982 from portage.sets.base import EditablePackageSet
11984 # generate a list of package sets that are directly or indirectly listed in "world",
11985 # as there is no persistent list of "installed" sets
11986 installed_sets = ["world"]
11991 pos = len(installed_sets)
11992 for s in installed_sets[pos - 1:]:
11995 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11998 installed_sets += candidates
11999 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12002 # we don't want to unmerge packages that are still listed in user-editable package sets
12003 # listed in "world" as they would be remerged on the next update of "world" or the
12004 # relevant package sets.
12005 unknown_sets = set()
12006 for cp in xrange(len(pkgmap)):
12007 for cpv in pkgmap[cp]["selected"].copy():
12011 # It could have been uninstalled
12012 # by a concurrent process.
12015 if unmerge_action != "clean" and \
12016 root_config.root == "/" and \
12017 portage.match_from_list(
12018 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12019 msg = ("Not unmerging package %s since there is no valid " + \
12020 "reason for portage to unmerge itself.") % (pkg.cpv,)
12021 for line in textwrap.wrap(msg, 75):
12023 # adjust pkgmap so the display output is correct
12024 pkgmap[cp]["selected"].remove(cpv)
12025 all_selected.remove(cpv)
12026 pkgmap[cp]["protected"].add(cpv)
12030 for s in installed_sets:
12031 # skip sets that the user requested to unmerge, and skip world
12032 # unless we're unmerging a package set (as the package would be
12033 # removed from "world" later on)
12034 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12038 if s in unknown_sets:
12040 unknown_sets.add(s)
12041 out = portage.output.EOutput()
12042 out.eerror(("Unknown set '@%s' in " + \
12043 "%svar/lib/portage/world_sets") % \
12044 (s, root_config.root))
12047 # only check instances of EditablePackageSet as other classes are generally used for
12048 # special purposes and can be ignored here (and are usually generated dynamically, so the
12049 # user can't do much about them anyway)
12050 if isinstance(sets[s], EditablePackageSet):
12052 # This is derived from a snippet of code in the
12053 # depgraph._iter_atoms_for_pkg() method.
12054 for atom in sets[s].iterAtomsForPackage(pkg):
12055 inst_matches = vartree.dbapi.match(atom)
12056 inst_matches.reverse() # descending order
12058 for inst_cpv in inst_matches:
12060 inst_pkg = _pkg(inst_cpv)
12062 # It could have been uninstalled
12063 # by a concurrent process.
12066 if inst_pkg.cp != atom.cp:
12068 if pkg >= inst_pkg:
12069 # This is descending order, and we're not
12070 # interested in any versions <= pkg given.
12072 if pkg.slot_atom != inst_pkg.slot_atom:
12073 higher_slot = inst_pkg
12075 if higher_slot is None:
12079 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12080 #print colorize("WARN", "but still listed in the following package sets:")
12081 #print " %s\n" % ", ".join(parents)
12082 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12083 print colorize("WARN", "still referenced by the following package sets:")
12084 print " %s\n" % ", ".join(parents)
12085 # adjust pkgmap so the display output is correct
12086 pkgmap[cp]["selected"].remove(cpv)
12087 all_selected.remove(cpv)
12088 pkgmap[cp]["protected"].add(cpv)
12092 numselected = len(all_selected)
12093 if not numselected:
12095 "\n>>> No packages selected for removal by " + \
12096 unmerge_action + "\n")
12099 # Unmerge order only matters in some cases
12103 selected = d["selected"]
12106 cp = portage.cpv_getkey(iter(selected).next())
12107 cp_dict = unordered.get(cp)
12108 if cp_dict is None:
12110 unordered[cp] = cp_dict
12113 for k, v in d.iteritems():
12114 cp_dict[k].update(v)
12115 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12117 for x in xrange(len(pkgmap)):
12118 selected = pkgmap[x]["selected"]
12121 for mytype, mylist in pkgmap[x].iteritems():
12122 if mytype == "selected":
12124 mylist.difference_update(all_selected)
12125 cp = portage.cpv_getkey(iter(selected).next())
12126 for y in localtree.dep_match(cp):
12127 if y not in pkgmap[x]["omitted"] and \
12128 y not in pkgmap[x]["selected"] and \
12129 y not in pkgmap[x]["protected"] and \
12130 y not in all_selected:
12131 pkgmap[x]["omitted"].add(y)
12132 if global_unmerge and not pkgmap[x]["selected"]:
12133 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12135 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12136 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12137 "'%s' is part of your system profile.\n" % cp),
12138 level=logging.WARNING, noiselevel=-1)
12139 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12140 "be damaging to your system.\n\n"),
12141 level=logging.WARNING, noiselevel=-1)
12142 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12143 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12144 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12146 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12148 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12149 for mytype in ["selected","protected","omitted"]:
12151 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12152 if pkgmap[x][mytype]:
12153 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12154 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12155 for pn, ver, rev in sorted_pkgs:
12159 myversion = ver + "-" + rev
12160 if mytype == "selected":
12162 colorize("UNMERGE_WARN", myversion + " "),
12166 colorize("GOOD", myversion + " "), noiselevel=-1)
12168 writemsg_level("none ", noiselevel=-1)
12170 writemsg_level("\n", noiselevel=-1)
12172 writemsg_level("\n", noiselevel=-1)
12174 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12175 " packages are slated for removal.\n")
12176 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12177 " and " + colorize("GOOD", "'omitted'") + \
12178 " packages will not be removed.\n\n")
12180 if "--pretend" in myopts:
12181 #we're done... return
12183 if "--ask" in myopts:
12184 if userquery("Would you like to unmerge these packages?")=="No":
12185 # enter pretend mode for correct formatting of results
12186 myopts["--pretend"] = True
12191 #the real unmerging begins, after a short delay....
12192 if clean_delay and not autoclean:
12193 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12195 for x in xrange(len(pkgmap)):
12196 for y in pkgmap[x]["selected"]:
12197 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12198 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12199 mysplit = y.split("/")
12201 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12202 mysettings, unmerge_action not in ["clean","prune"],
12203 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12204 scheduler=scheduler)
12206 if retval != os.EX_OK:
12207 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12209 raise UninstallFailure(retval)
12212 if clean_world and hasattr(sets["world"], "cleanPackage"):
12213 sets["world"].cleanPackage(vartree.dbapi, y)
12214 emergelog(xterm_titles, " >>> unmerge success: "+y)
12215 if clean_world and hasattr(sets["world"], "remove"):
12216 for s in root_config.setconfig.active:
12217 sets["world"].remove(SETPREFIX+s)
12220 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12222 if os.path.exists("/usr/bin/install-info"):
12223 out = portage.output.EOutput()
12228 inforoot=normpath(root+z)
12229 if os.path.isdir(inforoot):
12230 infomtime = long(os.stat(inforoot).st_mtime)
12231 if inforoot not in prev_mtimes or \
12232 prev_mtimes[inforoot] != infomtime:
12233 regen_infodirs.append(inforoot)
12235 if not regen_infodirs:
12236 portage.writemsg_stdout("\n")
12237 out.einfo("GNU info directory index is up-to-date.")
12239 portage.writemsg_stdout("\n")
12240 out.einfo("Regenerating GNU info directory index...")
12242 dir_extensions = ("", ".gz", ".bz2")
12246 for inforoot in regen_infodirs:
12250 if not os.path.isdir(inforoot) or \
12251 not os.access(inforoot, os.W_OK):
12254 file_list = os.listdir(inforoot)
12256 dir_file = os.path.join(inforoot, "dir")
12257 moved_old_dir = False
12258 processed_count = 0
12259 for x in file_list:
12260 if x.startswith(".") or \
12261 os.path.isdir(os.path.join(inforoot, x)):
12263 if x.startswith("dir"):
12265 for ext in dir_extensions:
12266 if x == "dir" + ext or \
12267 x == "dir" + ext + ".old":
12272 if processed_count == 0:
12273 for ext in dir_extensions:
12275 os.rename(dir_file + ext, dir_file + ext + ".old")
12276 moved_old_dir = True
12277 except EnvironmentError, e:
12278 if e.errno != errno.ENOENT:
12281 processed_count += 1
12282 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12283 existsstr="already exists, for file `"
12285 if re.search(existsstr,myso):
12286 # Already exists... Don't increment the count for this.
12288 elif myso[:44]=="install-info: warning: no info dir entry in ":
12289 # This info file doesn't contain a DIR-header: install-info produces this
12290 # (harmless) warning (the --quiet switch doesn't seem to work).
12291 # Don't increment the count for this.
12294 badcount=badcount+1
12295 errmsg += myso + "\n"
12298 if moved_old_dir and not os.path.exists(dir_file):
12299 # We didn't generate a new dir file, so put the old file
12300 # back where it was originally found.
12301 for ext in dir_extensions:
12303 os.rename(dir_file + ext + ".old", dir_file + ext)
12304 except EnvironmentError, e:
12305 if e.errno != errno.ENOENT:
12309 # Clean dir.old cruft so that they don't prevent
12310 # unmerge of otherwise empty directories.
12311 for ext in dir_extensions:
12313 os.unlink(dir_file + ext + ".old")
12314 except EnvironmentError, e:
12315 if e.errno != errno.ENOENT:
12319 #update mtime so we can potentially avoid regenerating.
12320 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12323 out.eerror("Processed %d info files; %d errors." % \
12324 (icount, badcount))
12325 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12328 out.einfo("Processed %d info files." % (icount,))
12331 def display_news_notification(root_config, myopts):
12332 target_root = root_config.root
12333 trees = root_config.trees
12334 settings = trees["vartree"].settings
12335 portdb = trees["porttree"].dbapi
12336 vardb = trees["vartree"].dbapi
12337 NEWS_PATH = os.path.join("metadata", "news")
12338 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12339 newsReaderDisplay = False
12340 update = "--pretend" not in myopts
12342 for repo in portdb.getRepositories():
12343 unreadItems = checkUpdatedNewsItems(
12344 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12346 if not newsReaderDisplay:
12347 newsReaderDisplay = True
12349 print colorize("WARN", " * IMPORTANT:"),
12350 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12353 if newsReaderDisplay:
12354 print colorize("WARN", " *"),
12355 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12358 def display_preserved_libs(vardbapi):
12361 # Ensure the registry is consistent with existing files.
12362 vardbapi.plib_registry.pruneNonExisting()
12364 if vardbapi.plib_registry.hasEntries():
12366 print colorize("WARN", "!!!") + " existing preserved libs:"
12367 plibdata = vardbapi.plib_registry.getPreservedLibs()
12368 linkmap = vardbapi.linkmap
12371 linkmap_broken = False
12375 except portage.exception.CommandNotFound, e:
12376 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12377 level=logging.ERROR, noiselevel=-1)
12379 linkmap_broken = True
12381 search_for_owners = set()
12382 for cpv in plibdata:
12383 internal_plib_keys = set(linkmap._obj_key(f) \
12384 for f in plibdata[cpv])
12385 for f in plibdata[cpv]:
12386 if f in consumer_map:
12389 for c in linkmap.findConsumers(f):
12390 # Filter out any consumers that are also preserved libs
12391 # belonging to the same package as the provider.
12392 if linkmap._obj_key(c) not in internal_plib_keys:
12393 consumers.append(c)
12395 consumer_map[f] = consumers
12396 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12398 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12400 for cpv in plibdata:
12401 print colorize("WARN", ">>>") + " package: %s" % cpv
12403 for f in plibdata[cpv]:
12404 obj_key = linkmap._obj_key(f)
12405 alt_paths = samefile_map.get(obj_key)
12406 if alt_paths is None:
12408 samefile_map[obj_key] = alt_paths
12411 for alt_paths in samefile_map.itervalues():
12412 alt_paths = sorted(alt_paths)
12413 for p in alt_paths:
12414 print colorize("WARN", " * ") + " - %s" % (p,)
12416 consumers = consumer_map.get(f, [])
12417 for c in consumers[:MAX_DISPLAY]:
12418 print colorize("WARN", " * ") + " used by %s (%s)" % \
12419 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12420 if len(consumers) == MAX_DISPLAY + 1:
12421 print colorize("WARN", " * ") + " used by %s (%s)" % \
12422 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12423 for x in owners.get(consumers[MAX_DISPLAY], [])))
12424 elif len(consumers) > MAX_DISPLAY:
12425 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12426 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12429 def _flush_elog_mod_echo():
12431 Dump the mod_echo output now so that our other
12432 notifications are shown last.
12434 @returns: True if messages were shown, False otherwise.
12436 messages_shown = False
12438 from portage.elog import mod_echo
12439 except ImportError:
12440 pass # happens during downgrade to a version without the module
12442 messages_shown = bool(mod_echo._items)
12443 mod_echo.finalize()
12444 return messages_shown
12446 def post_emerge(root_config, myopts, mtimedb, retval):
12448 Misc. things to run at the end of a merge session.
12451 Update Config Files
12454 Display preserved libs warnings
12457 @param trees: A dictionary mapping each ROOT to it's package databases
12459 @param mtimedb: The mtimeDB to store data needed across merge invocations
12460 @type mtimedb: MtimeDB class instance
12461 @param retval: Emerge's return value
12465 1. Calls sys.exit(retval)
12468 target_root = root_config.root
12469 trees = { target_root : root_config.trees }
12470 vardbapi = trees[target_root]["vartree"].dbapi
12471 settings = vardbapi.settings
12472 info_mtimes = mtimedb["info"]
12474 # Load the most current variables from ${ROOT}/etc/profile.env
12477 settings.regenerate()
12480 config_protect = settings.get("CONFIG_PROTECT","").split()
12481 infodirs = settings.get("INFOPATH","").split(":") + \
12482 settings.get("INFODIR","").split(":")
12486 if retval == os.EX_OK:
12487 exit_msg = " *** exiting successfully."
12489 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12490 emergelog("notitles" not in settings.features, exit_msg)
12492 _flush_elog_mod_echo()
12494 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12495 if "--pretend" in myopts or (counter_hash is not None and \
12496 counter_hash == vardbapi._counter_hash()):
12497 display_news_notification(root_config, myopts)
12498 # If vdb state has not changed then there's nothing else to do.
12501 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12502 portage.util.ensure_dirs(vdb_path)
12504 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12505 vdb_lock = portage.locks.lockdir(vdb_path)
12509 if "noinfo" not in settings.features:
12510 chk_updated_info_files(target_root,
12511 infodirs, info_mtimes, retval)
12515 portage.locks.unlockdir(vdb_lock)
12517 chk_updated_cfg_files(target_root, config_protect)
12519 display_news_notification(root_config, myopts)
12520 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12521 display_preserved_libs(vardbapi)
12526 def chk_updated_cfg_files(target_root, config_protect):
12528 #number of directories with some protect files in them
12530 for x in config_protect:
12531 x = os.path.join(target_root, x.lstrip(os.path.sep))
12532 if not os.access(x, os.W_OK):
12533 # Avoid Permission denied errors generated
12537 mymode = os.lstat(x).st_mode
12540 if stat.S_ISLNK(mymode):
12541 # We want to treat it like a directory if it
12542 # is a symlink to an existing directory.
12544 real_mode = os.stat(x).st_mode
12545 if stat.S_ISDIR(real_mode):
12549 if stat.S_ISDIR(mymode):
12550 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12552 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12553 os.path.split(x.rstrip(os.path.sep))
12554 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12555 a = commands.getstatusoutput(mycommand)
12557 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12559 # Show the error message alone, sending stdout to /dev/null.
12560 os.system(mycommand + " 1>/dev/null")
12562 files = a[1].split('\0')
12563 # split always produces an empty string as the last element
12564 if files and not files[-1]:
12568 print "\n"+colorize("WARN", " * IMPORTANT:"),
12569 if stat.S_ISDIR(mymode):
12570 print "%d config files in '%s' need updating." % \
12573 print "config file '%s' needs updating." % x
12576 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12577 " section of the " + bold("emerge")
12578 print " "+yellow("*")+" man page to learn how to update config files."
12580 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12583 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12584 Returns the number of unread (yet relevent) items.
12586 @param portdb: a portage tree database
12587 @type portdb: pordbapi
12588 @param vardb: an installed package database
12589 @type vardb: vardbapi
12592 @param UNREAD_PATH:
12598 1. The number of unread but relevant news items.
12601 from portage.news import NewsManager
12602 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12603 return manager.getUnreadItems( repo_id, update=update )
12605 def insert_category_into_atom(atom, category):
12606 alphanum = re.search(r'\w', atom)
12608 ret = atom[:alphanum.start()] + "%s/" % category + \
12609 atom[alphanum.start():]
12614 def is_valid_package_atom(x):
12616 alphanum = re.search(r'\w', x)
12618 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12619 return portage.isvalidatom(x)
12621 def show_blocker_docs_link():
12623 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12624 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12626 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12629 def show_mask_docs():
12630 print "For more information, see the MASKED PACKAGES section in the emerge"
12631 print "man page or refer to the Gentoo Handbook."
12633 def action_sync(settings, trees, mtimedb, myopts, myaction):
12634 xterm_titles = "notitles" not in settings.features
12635 emergelog(xterm_titles, " === sync")
12636 myportdir = settings.get("PORTDIR", None)
12637 out = portage.output.EOutput()
12639 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12641 if myportdir[-1]=="/":
12642 myportdir=myportdir[:-1]
12644 st = os.stat(myportdir)
12648 print ">>>",myportdir,"not found, creating it."
12649 os.makedirs(myportdir,0755)
12650 st = os.stat(myportdir)
12653 spawn_kwargs["env"] = settings.environ()
12654 if 'usersync' in settings.features and \
12655 portage.data.secpass >= 2 and \
12656 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12657 st.st_gid != os.getgid() and st.st_mode & 0070):
12659 homedir = pwd.getpwuid(st.st_uid).pw_dir
12663 # Drop privileges when syncing, in order to match
12664 # existing uid/gid settings.
12665 spawn_kwargs["uid"] = st.st_uid
12666 spawn_kwargs["gid"] = st.st_gid
12667 spawn_kwargs["groups"] = [st.st_gid]
12668 spawn_kwargs["env"]["HOME"] = homedir
12670 if not st.st_mode & 0020:
12671 umask = umask | 0020
12672 spawn_kwargs["umask"] = umask
12674 syncuri = settings.get("SYNC", "").strip()
12676 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12677 noiselevel=-1, level=logging.ERROR)
12680 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12681 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12684 dosyncuri = syncuri
12685 updatecache_flg = False
12686 if myaction == "metadata":
12687 print "skipping sync"
12688 updatecache_flg = True
12689 elif ".git" in vcs_dirs:
12690 # Update existing git repository, and ignore the syncuri. We are
12691 # going to trust the user and assume that the user is in the branch
12692 # that he/she wants updated. We'll let the user manage branches with
12694 if portage.process.find_binary("git") is None:
12695 msg = ["Command not found: git",
12696 "Type \"emerge dev-util/git\" to enable git support."]
12698 writemsg_level("!!! %s\n" % l,
12699 level=logging.ERROR, noiselevel=-1)
12701 msg = ">>> Starting git pull in %s..." % myportdir
12702 emergelog(xterm_titles, msg )
12703 writemsg_level(msg + "\n")
12704 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12705 (portage._shell_quote(myportdir),), **spawn_kwargs)
12706 if exitcode != os.EX_OK:
12707 msg = "!!! git pull error in %s." % myportdir
12708 emergelog(xterm_titles, msg)
12709 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12711 msg = ">>> Git pull in %s successful" % myportdir
12712 emergelog(xterm_titles, msg)
12713 writemsg_level(msg + "\n")
12714 exitcode = git_sync_timestamps(settings, myportdir)
12715 if exitcode == os.EX_OK:
12716 updatecache_flg = True
12717 elif syncuri[:8]=="rsync://":
12718 for vcs_dir in vcs_dirs:
12719 writemsg_level(("!!! %s appears to be under revision " + \
12720 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12721 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12723 if not os.path.exists("/usr/bin/rsync"):
12724 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12725 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12730 if settings["PORTAGE_RSYNC_OPTS"] == "":
12731 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12732 rsync_opts.extend([
12733 "--recursive", # Recurse directories
12734 "--links", # Consider symlinks
12735 "--safe-links", # Ignore links outside of tree
12736 "--perms", # Preserve permissions
12737 "--times", # Preserive mod times
12738 "--compress", # Compress the data transmitted
12739 "--force", # Force deletion on non-empty dirs
12740 "--whole-file", # Don't do block transfers, only entire files
12741 "--delete", # Delete files that aren't in the master tree
12742 "--stats", # Show final statistics about what was transfered
12743 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12744 "--exclude=/distfiles", # Exclude distfiles from consideration
12745 "--exclude=/local", # Exclude local from consideration
12746 "--exclude=/packages", # Exclude packages from consideration
12750 # The below validation is not needed when using the above hardcoded
12753 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12755 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12756 for opt in ("--recursive", "--times"):
12757 if opt not in rsync_opts:
12758 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12759 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12760 rsync_opts.append(opt)
12762 for exclude in ("distfiles", "local", "packages"):
12763 opt = "--exclude=/%s" % exclude
12764 if opt not in rsync_opts:
12765 portage.writemsg(yellow("WARNING:") + \
12766 " adding required option %s not included in " % opt + \
12767 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12768 rsync_opts.append(opt)
12770 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12771 def rsync_opt_startswith(opt_prefix):
12772 for x in rsync_opts:
12773 if x.startswith(opt_prefix):
12777 if not rsync_opt_startswith("--timeout="):
12778 rsync_opts.append("--timeout=%d" % mytimeout)
12780 for opt in ("--compress", "--whole-file"):
12781 if opt not in rsync_opts:
12782 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12783 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12784 rsync_opts.append(opt)
12786 if "--quiet" in myopts:
12787 rsync_opts.append("--quiet") # Shut up a lot
12789 rsync_opts.append("--verbose") # Print filelist
12791 if "--verbose" in myopts:
12792 rsync_opts.append("--progress") # Progress meter for each file
12794 if "--debug" in myopts:
12795 rsync_opts.append("--checksum") # Force checksum on all files
12797 # Real local timestamp file.
12798 servertimestampfile = os.path.join(
12799 myportdir, "metadata", "timestamp.chk")
12801 content = portage.util.grabfile(servertimestampfile)
12805 mytimestamp = time.mktime(time.strptime(content[0],
12806 "%a, %d %b %Y %H:%M:%S +0000"))
12807 except (OverflowError, ValueError):
12812 rsync_initial_timeout = \
12813 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12815 rsync_initial_timeout = 15
12818 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12819 except SystemExit, e:
12820 raise # Needed else can't exit
12822 maxretries=3 #default number of retries
12825 user_name, hostname, port = re.split(
12826 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12829 if user_name is None:
12831 updatecache_flg=True
12832 all_rsync_opts = set(rsync_opts)
12833 extra_rsync_opts = shlex.split(
12834 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12835 all_rsync_opts.update(extra_rsync_opts)
12836 family = socket.AF_INET
12837 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12838 family = socket.AF_INET
12839 elif socket.has_ipv6 and \
12840 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12841 family = socket.AF_INET6
12843 SERVER_OUT_OF_DATE = -1
12844 EXCEEDED_MAX_RETRIES = -2
12850 for addrinfo in socket.getaddrinfo(
12851 hostname, None, family, socket.SOCK_STREAM):
12852 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12853 # IPv6 addresses need to be enclosed in square brackets
12854 ips.append("[%s]" % addrinfo[4][0])
12856 ips.append(addrinfo[4][0])
12857 from random import shuffle
12859 except SystemExit, e:
12860 raise # Needed else can't exit
12861 except Exception, e:
12862 print "Notice:",str(e)
12867 dosyncuri = syncuri.replace(
12868 "//" + user_name + hostname + port + "/",
12869 "//" + user_name + ips[0] + port + "/", 1)
12870 except SystemExit, e:
12871 raise # Needed else can't exit
12872 except Exception, e:
12873 print "Notice:",str(e)
12877 if "--ask" in myopts:
12878 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12883 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12884 if "--quiet" not in myopts:
12885 print ">>> Starting rsync with "+dosyncuri+"..."
12887 emergelog(xterm_titles,
12888 ">>> Starting retry %d of %d with %s" % \
12889 (retries,maxretries,dosyncuri))
12890 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12892 if mytimestamp != 0 and "--quiet" not in myopts:
12893 print ">>> Checking server timestamp ..."
12895 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12897 if "--debug" in myopts:
12900 exitcode = os.EX_OK
12901 servertimestamp = 0
12902 # Even if there's no timestamp available locally, fetch the
12903 # timestamp anyway as an initial probe to verify that the server is
12904 # responsive. This protects us from hanging indefinitely on a
12905 # connection attempt to an unresponsive server which rsync's
12906 # --timeout option does not prevent.
12908 # Temporary file for remote server timestamp comparison.
12909 from tempfile import mkstemp
12910 fd, tmpservertimestampfile = mkstemp()
12912 mycommand = rsynccommand[:]
12913 mycommand.append(dosyncuri.rstrip("/") + \
12914 "/metadata/timestamp.chk")
12915 mycommand.append(tmpservertimestampfile)
12919 def timeout_handler(signum, frame):
12920 raise portage.exception.PortageException("timed out")
12921 signal.signal(signal.SIGALRM, timeout_handler)
12922 # Timeout here in case the server is unresponsive. The
12923 # --timeout rsync option doesn't apply to the initial
12924 # connection attempt.
12925 if rsync_initial_timeout:
12926 signal.alarm(rsync_initial_timeout)
12928 mypids.extend(portage.process.spawn(
12929 mycommand, env=settings.environ(), returnpid=True))
12930 exitcode = os.waitpid(mypids[0], 0)[1]
12931 content = portage.grabfile(tmpservertimestampfile)
12933 if rsync_initial_timeout:
12936 os.unlink(tmpservertimestampfile)
12939 except portage.exception.PortageException, e:
12943 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12944 os.kill(mypids[0], signal.SIGTERM)
12945 os.waitpid(mypids[0], 0)
12946 # This is the same code rsync uses for timeout.
12949 if exitcode != os.EX_OK:
12950 if exitcode & 0xff:
12951 exitcode = (exitcode & 0xff) << 8
12953 exitcode = exitcode >> 8
12955 portage.process.spawned_pids.remove(mypids[0])
12958 servertimestamp = time.mktime(time.strptime(
12959 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12960 except (OverflowError, ValueError):
12962 del mycommand, mypids, content
12963 if exitcode == os.EX_OK:
12964 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12965 emergelog(xterm_titles,
12966 ">>> Cancelling sync -- Already current.")
12969 print ">>> Timestamps on the server and in the local repository are the same."
12970 print ">>> Cancelling all further sync action. You are already up to date."
12972 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12976 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12977 emergelog(xterm_titles,
12978 ">>> Server out of date: %s" % dosyncuri)
12981 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12983 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12986 exitcode = SERVER_OUT_OF_DATE
12987 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12989 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12990 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12991 if exitcode in [0,1,3,4,11,14,20,21]:
12993 elif exitcode in [1,3,4,11,14,20,21]:
12996 # Code 2 indicates protocol incompatibility, which is expected
12997 # for servers with protocol < 29 that don't support
12998 # --prune-empty-directories. Retry for a server that supports
12999 # at least rsync protocol version 29 (>=rsync-2.6.4).
13004 if retries<=maxretries:
13005 print ">>> Retrying..."
13010 updatecache_flg=False
13011 exitcode = EXCEEDED_MAX_RETRIES
13015 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13016 elif exitcode == SERVER_OUT_OF_DATE:
13018 elif exitcode == EXCEEDED_MAX_RETRIES:
13020 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13025 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13026 msg.append("that your SYNC statement is proper.")
13027 msg.append("SYNC=" + settings["SYNC"])
13029 msg.append("Rsync has reported that there is a File IO error. Normally")
13030 msg.append("this means your disk is full, but can be caused by corruption")
13031 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13032 msg.append("and try again after the problem has been fixed.")
13033 msg.append("PORTDIR=" + settings["PORTDIR"])
13035 msg.append("Rsync was killed before it finished.")
13037 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13038 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13039 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13040 msg.append("temporary problem unless complications exist with your network")
13041 msg.append("(and possibly your system's filesystem) configuration.")
13045 elif syncuri[:6]=="cvs://":
13046 if not os.path.exists("/usr/bin/cvs"):
13047 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13048 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13050 cvsroot=syncuri[6:]
13051 cvsdir=os.path.dirname(myportdir)
13052 if not os.path.exists(myportdir+"/CVS"):
13054 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13055 if os.path.exists(cvsdir+"/gentoo-x86"):
13056 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13059 os.rmdir(myportdir)
13061 if e.errno != errno.ENOENT:
13063 "!!! existing '%s' directory; exiting.\n" % myportdir)
13066 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13067 print "!!! cvs checkout error; exiting."
13069 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13072 print ">>> Starting cvs update with "+syncuri+"..."
13073 retval = portage.process.spawn_bash(
13074 "cd %s; cvs -z0 -q update -dP" % \
13075 (portage._shell_quote(myportdir),), **spawn_kwargs)
13076 if retval != os.EX_OK:
13078 dosyncuri = syncuri
13080 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13081 noiselevel=-1, level=logging.ERROR)
13084 if updatecache_flg and \
13085 myaction != "metadata" and \
13086 "metadata-transfer" not in settings.features:
13087 updatecache_flg = False
13089 # Reload the whole config from scratch.
13090 settings, trees, mtimedb = load_emerge_config(trees=trees)
13091 root_config = trees[settings["ROOT"]]["root_config"]
13092 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13094 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13095 action_metadata(settings, portdb, myopts)
13097 if portage._global_updates(trees, mtimedb["updates"]):
13099 # Reload the whole config from scratch.
13100 settings, trees, mtimedb = load_emerge_config(trees=trees)
13101 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13102 root_config = trees[settings["ROOT"]]["root_config"]
13104 mybestpv = portdb.xmatch("bestmatch-visible",
13105 portage.const.PORTAGE_PACKAGE_ATOM)
13106 mypvs = portage.best(
13107 trees[settings["ROOT"]]["vartree"].dbapi.match(
13108 portage.const.PORTAGE_PACKAGE_ATOM))
13110 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13112 if myaction != "metadata":
13113 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13114 retval = portage.process.spawn(
13115 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13116 dosyncuri], env=settings.environ())
13117 if retval != os.EX_OK:
13118 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13120 if(mybestpv != mypvs) and not "--quiet" in myopts:
13122 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13123 print red(" * ")+"that you update portage now, before any other packages are updated."
13125 print red(" * ")+"To update portage, run 'emerge portage' now."
13128 display_news_notification(root_config, myopts)
13131 def git_sync_timestamps(settings, portdir):
13133 Since git doesn't preserve timestamps, synchronize timestamps between
13134 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13135 for a given file as long as the file in the working tree is not modified
13136 (relative to HEAD).
13138 cache_dir = os.path.join(portdir, "metadata", "cache")
13139 if not os.path.isdir(cache_dir):
13141 writemsg_level(">>> Synchronizing timestamps...\n")
13143 from portage.cache.cache_errors import CacheError
13145 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13146 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13147 except CacheError, e:
13148 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13149 level=logging.ERROR, noiselevel=-1)
13152 ec_dir = os.path.join(portdir, "eclass")
13154 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13155 if f.endswith(".eclass"))
13157 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13158 level=logging.ERROR, noiselevel=-1)
13161 args = [portage.const.BASH_BINARY, "-c",
13162 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13163 portage._shell_quote(portdir)]
13165 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13166 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13168 if rval != os.EX_OK:
13171 modified_eclasses = set(ec for ec in ec_names \
13172 if os.path.join("eclass", ec + ".eclass") in modified_files)
13174 updated_ec_mtimes = {}
13176 for cpv in cache_db:
13177 cpv_split = portage.catpkgsplit(cpv)
13178 if cpv_split is None:
13179 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13180 level=logging.ERROR, noiselevel=-1)
13183 cat, pn, ver, rev = cpv_split
13184 cat, pf = portage.catsplit(cpv)
13185 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13186 if relative_eb_path in modified_files:
13190 cache_entry = cache_db[cpv]
13191 eb_mtime = cache_entry.get("_mtime_")
13192 ec_mtimes = cache_entry.get("_eclasses_")
13194 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13195 level=logging.ERROR, noiselevel=-1)
13197 except CacheError, e:
13198 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13199 (cpv, e), level=logging.ERROR, noiselevel=-1)
13202 if eb_mtime is None:
13203 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13204 level=logging.ERROR, noiselevel=-1)
13208 eb_mtime = long(eb_mtime)
13210 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13211 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13214 if ec_mtimes is None:
13215 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13216 level=logging.ERROR, noiselevel=-1)
13219 if modified_eclasses.intersection(ec_mtimes):
13222 missing_eclasses = set(ec_mtimes).difference(ec_names)
13223 if missing_eclasses:
13224 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13225 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13229 eb_path = os.path.join(portdir, relative_eb_path)
13231 current_eb_mtime = os.stat(eb_path)
13233 writemsg_level("!!! Missing ebuild: %s\n" % \
13234 (cpv,), level=logging.ERROR, noiselevel=-1)
13237 inconsistent = False
13238 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13239 updated_mtime = updated_ec_mtimes.get(ec)
13240 if updated_mtime is not None and updated_mtime != ec_mtime:
13241 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13242 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13243 inconsistent = True
13249 if current_eb_mtime != eb_mtime:
13250 os.utime(eb_path, (eb_mtime, eb_mtime))
13252 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13253 if ec in updated_ec_mtimes:
13255 ec_path = os.path.join(ec_dir, ec + ".eclass")
13256 current_mtime = long(os.stat(ec_path).st_mtime)
13257 if current_mtime != ec_mtime:
13258 os.utime(ec_path, (ec_mtime, ec_mtime))
13259 updated_ec_mtimes[ec] = ec_mtime
13263 def action_metadata(settings, portdb, myopts):
13264 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13265 old_umask = os.umask(0002)
13266 cachedir = os.path.normpath(settings.depcachedir)
13267 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13268 "/lib", "/opt", "/proc", "/root", "/sbin",
13269 "/sys", "/tmp", "/usr", "/var"]:
13270 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13271 "ROOT DIRECTORY ON YOUR SYSTEM."
13272 print >> sys.stderr, \
13273 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13275 if not os.path.exists(cachedir):
13278 ec = portage.eclass_cache.cache(portdb.porttree_root)
13279 myportdir = os.path.realpath(settings["PORTDIR"])
13280 cm = settings.load_best_module("portdbapi.metadbmodule")(
13281 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13283 from portage.cache import util
13285 class percentage_noise_maker(util.quiet_mirroring):
13286 def __init__(self, dbapi):
13288 self.cp_all = dbapi.cp_all()
13289 l = len(self.cp_all)
13290 self.call_update_min = 100000000
13291 self.min_cp_all = l/100.0
13295 def __iter__(self):
13296 for x in self.cp_all:
13298 if self.count > self.min_cp_all:
13299 self.call_update_min = 0
13301 for y in self.dbapi.cp_list(x):
13303 self.call_update_mine = 0
13305 def update(self, *arg):
13307 self.pstr = int(self.pstr) + 1
13310 sys.stdout.write("%s%i%%" % \
13311 ("\b" * (len(str(self.pstr))+1), self.pstr))
13313 self.call_update_min = 10000000
13315 def finish(self, *arg):
13316 sys.stdout.write("\b\b\b\b100%\n")
13319 if "--quiet" in myopts:
13320 def quicky_cpv_generator(cp_all_list):
13321 for x in cp_all_list:
13322 for y in portdb.cp_list(x):
13324 source = quicky_cpv_generator(portdb.cp_all())
13325 noise_maker = portage.cache.util.quiet_mirroring()
13327 noise_maker = source = percentage_noise_maker(portdb)
13328 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13329 eclass_cache=ec, verbose_instance=noise_maker)
13332 os.umask(old_umask)
13334 def action_regen(settings, portdb, max_jobs, max_load):
13335 xterm_titles = "notitles" not in settings.features
13336 emergelog(xterm_titles, " === regen")
13337 #regenerate cache entries
13338 portage.writemsg_stdout("Regenerating cache entries...\n")
13340 os.close(sys.stdin.fileno())
13341 except SystemExit, e:
13342 raise # Needed else can't exit
13347 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13350 portage.writemsg_stdout("done!\n")
13351 return regen.returncode
13353 def action_config(settings, trees, myopts, myfiles):
13354 if len(myfiles) != 1:
13355 print red("!!! config can only take a single package atom at this time\n")
13357 if not is_valid_package_atom(myfiles[0]):
13358 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13360 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13361 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13365 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13366 except portage.exception.AmbiguousPackageName, e:
13367 # Multiple matches thrown from cpv_expand
13370 print "No packages found.\n"
13372 elif len(pkgs) > 1:
13373 if "--ask" in myopts:
13375 print "Please select a package to configure:"
13379 options.append(str(idx))
13380 print options[-1]+") "+pkg
13382 options.append("X")
13383 idx = userquery("Selection?", options)
13386 pkg = pkgs[int(idx)-1]
13388 print "The following packages available:"
13391 print "\nPlease use a specific atom or the --ask option."
13397 if "--ask" in myopts:
13398 if userquery("Ready to configure "+pkg+"?") == "No":
13401 print "Configuring pkg..."
13403 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13404 mysettings = portage.config(clone=settings)
13405 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13406 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13407 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13409 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13410 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13411 if retval == os.EX_OK:
13412 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13413 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13416 def action_info(settings, trees, myopts, myfiles):
13417 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13418 settings.profile_path, settings["CHOST"],
13419 trees[settings["ROOT"]]["vartree"].dbapi)
13421 header_title = "System Settings"
13423 print header_width * "="
13424 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13425 print header_width * "="
13426 print "System uname: "+platform.platform(aliased=1)
13428 lastSync = portage.grabfile(os.path.join(
13429 settings["PORTDIR"], "metadata", "timestamp.chk"))
13430 print "Timestamp of tree:",
13436 output=commands.getstatusoutput("distcc --version")
13438 print str(output[1].split("\n",1)[0]),
13439 if "distcc" in settings.features:
13444 output=commands.getstatusoutput("ccache -V")
13446 print str(output[1].split("\n",1)[0]),
13447 if "ccache" in settings.features:
13452 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13453 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13454 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13455 myvars = portage.util.unique_array(myvars)
13459 if portage.isvalidatom(x):
13460 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13461 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13462 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13464 for pn, ver, rev in pkg_matches:
13466 pkgs.append(ver + "-" + rev)
13470 pkgs = ", ".join(pkgs)
13471 print "%-20s %s" % (x+":", pkgs)
13473 print "%-20s %s" % (x+":", "[NOT VALID]")
13475 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13477 if "--verbose" in myopts:
13478 myvars=settings.keys()
13480 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13481 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13482 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13483 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13485 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13487 myvars = portage.util.unique_array(myvars)
13493 print '%s="%s"' % (x, settings[x])
13495 use = set(settings["USE"].split())
13496 use_expand = settings["USE_EXPAND"].split()
13498 for varname in use_expand:
13499 flag_prefix = varname.lower() + "_"
13500 for f in list(use):
13501 if f.startswith(flag_prefix):
13505 print 'USE="%s"' % " ".join(use),
13506 for varname in use_expand:
13507 myval = settings.get(varname)
13509 print '%s="%s"' % (varname, myval),
13512 unset_vars.append(x)
13514 print "Unset: "+", ".join(unset_vars)
13517 if "--debug" in myopts:
13518 for x in dir(portage):
13519 module = getattr(portage, x)
13520 if "cvs_id_string" in dir(module):
13521 print "%s: %s" % (str(x), str(module.cvs_id_string))
13523 # See if we can find any packages installed matching the strings
13524 # passed on the command line
13526 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13527 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13529 mypkgs.extend(vardb.match(x))
13531 # If some packages were found...
13533 # Get our global settings (we only print stuff if it varies from
13534 # the current config)
13535 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13536 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13538 pkgsettings = portage.config(clone=settings)
13540 for myvar in mydesiredvars:
13541 global_vals[myvar] = set(settings.get(myvar, "").split())
13543 # Loop through each package
13544 # Only print settings if they differ from global settings
13545 header_title = "Package Settings"
13546 print header_width * "="
13547 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13548 print header_width * "="
13549 from portage.output import EOutput
13552 # Get all package specific variables
13553 auxvalues = vardb.aux_get(pkg, auxkeys)
13555 for i in xrange(len(auxkeys)):
13556 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13558 for myvar in mydesiredvars:
13559 # If the package variable doesn't match the
13560 # current global variable, something has changed
13561 # so set diff_found so we know to print
13562 if valuesmap[myvar] != global_vals[myvar]:
13563 diff_values[myvar] = valuesmap[myvar]
13564 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13565 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13566 pkgsettings.reset()
13567 # If a matching ebuild is no longer available in the tree, maybe it
13568 # would make sense to compare against the flags for the best
13569 # available version with the same slot?
13571 if portdb.cpv_exists(pkg):
13573 pkgsettings.setcpv(pkg, mydb=mydb)
13574 if valuesmap["IUSE"].intersection(
13575 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13576 diff_values["USE"] = valuesmap["USE"]
13577 # If a difference was found, print the info for
13580 # Print package info
13581 print "%s was built with the following:" % pkg
13582 for myvar in mydesiredvars + ["USE"]:
13583 if myvar in diff_values:
13584 mylist = list(diff_values[myvar])
13586 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13588 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13589 ebuildpath = vardb.findname(pkg)
13590 if not ebuildpath or not os.path.exists(ebuildpath):
13591 out.ewarn("No ebuild found for '%s'" % pkg)
13593 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13594 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13595 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13598 def action_search(root_config, myopts, myfiles, spinner):
13600 print "emerge: no search terms provided."
13602 searchinstance = search(root_config,
13603 spinner, "--searchdesc" in myopts,
13604 "--quiet" not in myopts, "--usepkg" in myopts,
13605 "--usepkgonly" in myopts)
13606 for mysearch in myfiles:
13608 searchinstance.execute(mysearch)
13609 except re.error, comment:
13610 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13612 searchinstance.output()
13614 def action_depclean(settings, trees, ldpath_mtimes,
13615 myopts, action, myfiles, spinner):
13616 # Kill packages that aren't explicitly merged or are required as a
13617 # dependency of another package. World file is explicit.
13619 # Global depclean or prune operations are not very safe when there are
13620 # missing dependencies since it's unknown how badly incomplete
13621 # the dependency graph is, and we might accidentally remove packages
13622 # that should have been pulled into the graph. On the other hand, it's
13623 # relatively safe to ignore missing deps when only asked to remove
13624 # specific packages.
13625 allow_missing_deps = len(myfiles) > 0
13628 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13629 msg.append("mistakes. Packages that are part of the world set will always\n")
13630 msg.append("be kept. They can be manually added to this set with\n")
13631 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13632 msg.append("package.provided (see portage(5)) will be removed by\n")
13633 msg.append("depclean, even if they are part of the world set.\n")
13635 msg.append("As a safety measure, depclean will not remove any packages\n")
13636 msg.append("unless *all* required dependencies have been resolved. As a\n")
13637 msg.append("consequence, it is often necessary to run %s\n" % \
13638 good("`emerge --update"))
13639 msg.append(good("--newuse --deep @system @world`") + \
13640 " prior to depclean.\n")
13642 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13643 portage.writemsg_stdout("\n")
13645 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13647 xterm_titles = "notitles" not in settings.features
13648 myroot = settings["ROOT"]
13649 root_config = trees[myroot]["root_config"]
13650 getSetAtoms = root_config.setconfig.getSetAtoms
13651 vardb = trees[myroot]["vartree"].dbapi
13653 required_set_names = ("system", "world")
13657 for s in required_set_names:
13658 required_sets[s] = InternalPackageSet(
13659 initial_atoms=getSetAtoms(s))
13662 # When removing packages, use a temporary version of world
13663 # which excludes packages that are intended to be eligible for
13665 world_temp_set = required_sets["world"]
13666 system_set = required_sets["system"]
13668 if not system_set or not world_temp_set:
13671 writemsg_level("!!! You have no system list.\n",
13672 level=logging.ERROR, noiselevel=-1)
13674 if not world_temp_set:
13675 writemsg_level("!!! You have no world file.\n",
13676 level=logging.WARNING, noiselevel=-1)
13678 writemsg_level("!!! Proceeding is likely to " + \
13679 "break your installation.\n",
13680 level=logging.WARNING, noiselevel=-1)
13681 if "--pretend" not in myopts:
13682 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13684 if action == "depclean":
13685 emergelog(xterm_titles, " >>> depclean")
13688 args_set = InternalPackageSet()
13691 if not is_valid_package_atom(x):
13692 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13693 level=logging.ERROR, noiselevel=-1)
13694 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13697 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13698 except portage.exception.AmbiguousPackageName, e:
13699 msg = "The short ebuild name \"" + x + \
13700 "\" is ambiguous. Please specify " + \
13701 "one of the following " + \
13702 "fully-qualified ebuild names instead:"
13703 for line in textwrap.wrap(msg, 70):
13704 writemsg_level("!!! %s\n" % (line,),
13705 level=logging.ERROR, noiselevel=-1)
13707 writemsg_level(" %s\n" % colorize("INFORM", i),
13708 level=logging.ERROR, noiselevel=-1)
13709 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13712 matched_packages = False
13715 matched_packages = True
13717 if not matched_packages:
13718 writemsg_level(">>> No packages selected for removal by %s\n" % \
13722 writemsg_level("\nCalculating dependencies ")
13723 resolver_params = create_depgraph_params(myopts, "remove")
13724 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13725 vardb = resolver.trees[myroot]["vartree"].dbapi
13727 if action == "depclean":
13730 # Pull in everything that's installed but not matched
13731 # by an argument atom since we don't want to clean any
13732 # package if something depends on it.
13734 world_temp_set.clear()
13739 if args_set.findAtomForPackage(pkg) is None:
13740 world_temp_set.add("=" + pkg.cpv)
13742 except portage.exception.InvalidDependString, e:
13743 show_invalid_depstring_notice(pkg,
13744 pkg.metadata["PROVIDE"], str(e))
13746 world_temp_set.add("=" + pkg.cpv)
13749 elif action == "prune":
13751 # Pull in everything that's installed since we don't
13752 # to prune a package if something depends on it.
13753 world_temp_set.clear()
13754 world_temp_set.update(vardb.cp_all())
13758 # Try to prune everything that's slotted.
13759 for cp in vardb.cp_all():
13760 if len(vardb.cp_list(cp)) > 1:
13763 # Remove atoms from world that match installed packages
13764 # that are also matched by argument atoms, but do not remove
13765 # them if they match the highest installed version.
13768 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13769 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13770 raise AssertionError("package expected in matches: " + \
13771 "cp = %s, cpv = %s matches = %s" % \
13772 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13774 highest_version = pkgs_for_cp[-1]
13775 if pkg == highest_version:
13776 # pkg is the highest version
13777 world_temp_set.add("=" + pkg.cpv)
13780 if len(pkgs_for_cp) <= 1:
13781 raise AssertionError("more packages expected: " + \
13782 "cp = %s, cpv = %s matches = %s" % \
13783 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13786 if args_set.findAtomForPackage(pkg) is None:
13787 world_temp_set.add("=" + pkg.cpv)
13789 except portage.exception.InvalidDependString, e:
13790 show_invalid_depstring_notice(pkg,
13791 pkg.metadata["PROVIDE"], str(e))
13793 world_temp_set.add("=" + pkg.cpv)
13797 for s, package_set in required_sets.iteritems():
13798 set_atom = SETPREFIX + s
13799 set_arg = SetArg(arg=set_atom, set=package_set,
13800 root_config=resolver.roots[myroot])
13801 set_args[s] = set_arg
13802 for atom in set_arg.set:
13803 resolver._dep_stack.append(
13804 Dependency(atom=atom, root=myroot, parent=set_arg))
13805 resolver.digraph.add(set_arg, None)
13807 success = resolver._complete_graph()
13808 writemsg_level("\b\b... done!\n")
13810 resolver.display_problems()
13815 def unresolved_deps():
13817 unresolvable = set()
13818 for dep in resolver._initially_unsatisfied_deps:
13819 if isinstance(dep.parent, Package) and \
13820 (dep.priority > UnmergeDepPriority.SOFT):
13821 unresolvable.add((dep.atom, dep.parent.cpv))
13823 if not unresolvable:
13826 if unresolvable and not allow_missing_deps:
13827 prefix = bad(" * ")
13829 msg.append("Dependencies could not be completely resolved due to")
13830 msg.append("the following required packages not being installed:")
13832 for atom, parent in unresolvable:
13833 msg.append(" %s pulled in by:" % (atom,))
13834 msg.append(" %s" % (parent,))
13836 msg.append("Have you forgotten to run " + \
13837 good("`emerge --update --newuse --deep @system @world`") + " prior")
13838 msg.append(("to %s? It may be necessary to manually " + \
13839 "uninstall packages that no longer") % action)
13840 msg.append("exist in the portage tree since " + \
13841 "it may not be possible to satisfy their")
13842 msg.append("dependencies. Also, be aware of " + \
13843 "the --with-bdeps option that is documented")
13844 msg.append("in " + good("`man emerge`") + ".")
13845 if action == "prune":
13847 msg.append("If you would like to ignore " + \
13848 "dependencies then use %s." % good("--nodeps"))
13849 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13850 level=logging.ERROR, noiselevel=-1)
13854 if unresolved_deps():
13857 graph = resolver.digraph.copy()
13858 required_pkgs_total = 0
13860 if isinstance(node, Package):
13861 required_pkgs_total += 1
13863 def show_parents(child_node):
13864 parent_nodes = graph.parent_nodes(child_node)
13865 if not parent_nodes:
13866 # With --prune, the highest version can be pulled in without any
13867 # real parent since all installed packages are pulled in. In that
13868 # case there's nothing to show here.
13871 for node in parent_nodes:
13872 parent_strs.append(str(getattr(node, "cpv", node)))
13875 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13876 for parent_str in parent_strs:
13877 msg.append(" %s\n" % (parent_str,))
13879 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13881 def cmp_pkg_cpv(pkg1, pkg2):
13882 """Sort Package instances by cpv."""
13883 if pkg1.cpv > pkg2.cpv:
13885 elif pkg1.cpv == pkg2.cpv:
13890 def create_cleanlist():
13891 pkgs_to_remove = []
13893 if action == "depclean":
13896 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13899 arg_atom = args_set.findAtomForPackage(pkg)
13900 except portage.exception.InvalidDependString:
13901 # this error has already been displayed by now
13905 if pkg not in graph:
13906 pkgs_to_remove.append(pkg)
13907 elif "--verbose" in myopts:
13911 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13912 if pkg not in graph:
13913 pkgs_to_remove.append(pkg)
13914 elif "--verbose" in myopts:
13917 elif action == "prune":
13918 # Prune really uses all installed instead of world. It's not
13919 # a real reverse dependency so don't display it as such.
13920 graph.remove(set_args["world"])
13922 for atom in args_set:
13923 for pkg in vardb.match_pkgs(atom):
13924 if pkg not in graph:
13925 pkgs_to_remove.append(pkg)
13926 elif "--verbose" in myopts:
13929 if not pkgs_to_remove:
13931 ">>> No packages selected for removal by %s\n" % action)
13932 if "--verbose" not in myopts:
13934 ">>> To see reverse dependencies, use %s\n" % \
13936 if action == "prune":
13938 ">>> To ignore dependencies, use %s\n" % \
13941 return pkgs_to_remove
13943 cleanlist = create_cleanlist()
13946 clean_set = set(cleanlist)
13948 # Check if any of these package are the sole providers of libraries
13949 # with consumers that have not been selected for removal. If so, these
13950 # packages and any dependencies need to be added to the graph.
13951 real_vardb = trees[myroot]["vartree"].dbapi
13952 linkmap = real_vardb.linkmap
13953 liblist = linkmap.listLibraryObjects()
13954 consumer_cache = {}
13955 provider_cache = {}
13959 writemsg_level(">>> Checking for lib consumers...\n")
13961 for pkg in cleanlist:
13962 pkg_dblink = real_vardb._dblink(pkg.cpv)
13963 provided_libs = set()
13965 for lib in liblist:
13966 if pkg_dblink.isowner(lib, myroot):
13967 provided_libs.add(lib)
13969 if not provided_libs:
13973 for lib in provided_libs:
13974 lib_consumers = consumer_cache.get(lib)
13975 if lib_consumers is None:
13976 lib_consumers = linkmap.findConsumers(lib)
13977 consumer_cache[lib] = lib_consumers
13979 consumers[lib] = lib_consumers
13984 for lib, lib_consumers in consumers.items():
13985 for consumer_file in list(lib_consumers):
13986 if pkg_dblink.isowner(consumer_file, myroot):
13987 lib_consumers.remove(consumer_file)
13988 if not lib_consumers:
13994 for lib, lib_consumers in consumers.iteritems():
13996 soname = soname_cache.get(lib)
13998 soname = linkmap.getSoname(lib)
13999 soname_cache[lib] = soname
14001 consumer_providers = []
14002 for lib_consumer in lib_consumers:
14003 providers = provider_cache.get(lib)
14004 if providers is None:
14005 providers = linkmap.findProviders(lib_consumer)
14006 provider_cache[lib_consumer] = providers
14007 if soname not in providers:
14008 # Why does this happen?
14010 consumer_providers.append(
14011 (lib_consumer, providers[soname]))
14013 consumers[lib] = consumer_providers
14015 consumer_map[pkg] = consumers
14019 search_files = set()
14020 for consumers in consumer_map.itervalues():
14021 for lib, consumer_providers in consumers.iteritems():
14022 for lib_consumer, providers in consumer_providers:
14023 search_files.add(lib_consumer)
14024 search_files.update(providers)
14026 writemsg_level(">>> Assigning files to packages...\n")
14027 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14029 for pkg, consumers in consumer_map.items():
14030 for lib, consumer_providers in consumers.items():
14031 lib_consumers = set()
14033 for lib_consumer, providers in consumer_providers:
14034 owner_set = file_owners.get(lib_consumer)
14035 provider_dblinks = set()
14036 provider_pkgs = set()
14038 if len(providers) > 1:
14039 for provider in providers:
14040 provider_set = file_owners.get(provider)
14041 if provider_set is not None:
14042 provider_dblinks.update(provider_set)
14044 if len(provider_dblinks) > 1:
14045 for provider_dblink in provider_dblinks:
14046 pkg_key = ("installed", myroot,
14047 provider_dblink.mycpv, "nomerge")
14048 if pkg_key not in clean_set:
14049 provider_pkgs.add(vardb.get(pkg_key))
14054 if owner_set is not None:
14055 lib_consumers.update(owner_set)
14057 for consumer_dblink in list(lib_consumers):
14058 if ("installed", myroot, consumer_dblink.mycpv,
14059 "nomerge") in clean_set:
14060 lib_consumers.remove(consumer_dblink)
14064 consumers[lib] = lib_consumers
14068 del consumer_map[pkg]
14071 # TODO: Implement a package set for rebuilding consumer packages.
14073 msg = "In order to avoid breakage of link level " + \
14074 "dependencies, one or more packages will not be removed. " + \
14075 "This can be solved by rebuilding " + \
14076 "the packages that pulled them in."
14078 prefix = bad(" * ")
14079 from textwrap import wrap
14080 writemsg_level("".join(prefix + "%s\n" % line for \
14081 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14084 for pkg, consumers in consumer_map.iteritems():
14085 unique_consumers = set(chain(*consumers.values()))
14086 unique_consumers = sorted(consumer.mycpv \
14087 for consumer in unique_consumers)
14089 msg.append(" %s pulled in by:" % (pkg.cpv,))
14090 for consumer in unique_consumers:
14091 msg.append(" %s" % (consumer,))
14093 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14094 level=logging.WARNING, noiselevel=-1)
14096 # Add lib providers to the graph as children of lib consumers,
14097 # and also add any dependencies pulled in by the provider.
14098 writemsg_level(">>> Adding lib providers to graph...\n")
14100 for pkg, consumers in consumer_map.iteritems():
14101 for consumer_dblink in set(chain(*consumers.values())):
14102 consumer_pkg = vardb.get(("installed", myroot,
14103 consumer_dblink.mycpv, "nomerge"))
14104 if not resolver._add_pkg(pkg,
14105 Dependency(parent=consumer_pkg,
14106 priority=UnmergeDepPriority(runtime=True),
14108 resolver.display_problems()
14111 writemsg_level("\nCalculating dependencies ")
14112 success = resolver._complete_graph()
14113 writemsg_level("\b\b... done!\n")
14114 resolver.display_problems()
14117 if unresolved_deps():
14120 graph = resolver.digraph.copy()
14121 required_pkgs_total = 0
14123 if isinstance(node, Package):
14124 required_pkgs_total += 1
14125 cleanlist = create_cleanlist()
14128 clean_set = set(cleanlist)
14130 # Use a topological sort to create an unmerge order such that
14131 # each package is unmerged before it's dependencies. This is
14132 # necessary to avoid breaking things that may need to run
14133 # during pkg_prerm or pkg_postrm phases.
14135 # Create a new graph to account for dependencies between the
14136 # packages being unmerged.
14140 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14141 runtime = UnmergeDepPriority(runtime=True)
14142 runtime_post = UnmergeDepPriority(runtime_post=True)
14143 buildtime = UnmergeDepPriority(buildtime=True)
14145 "RDEPEND": runtime,
14146 "PDEPEND": runtime_post,
14147 "DEPEND": buildtime,
14150 for node in clean_set:
14151 graph.add(node, None)
14153 node_use = node.metadata["USE"].split()
14154 for dep_type in dep_keys:
14155 depstr = node.metadata[dep_type]
14159 portage.dep._dep_check_strict = False
14160 success, atoms = portage.dep_check(depstr, None, settings,
14161 myuse=node_use, trees=resolver._graph_trees,
14164 portage.dep._dep_check_strict = True
14166 # Ignore invalid deps of packages that will
14167 # be uninstalled anyway.
14170 priority = priority_map[dep_type]
14172 if not isinstance(atom, portage.dep.Atom):
14173 # Ignore invalid atoms returned from dep_check().
14177 matches = vardb.match_pkgs(atom)
14180 for child_node in matches:
14181 if child_node in clean_set:
14182 graph.add(child_node, node, priority=priority)
14185 if len(graph.order) == len(graph.root_nodes()):
14186 # If there are no dependencies between packages
14187 # let unmerge() group them by cat/pn.
14189 cleanlist = [pkg.cpv for pkg in graph.order]
14191 # Order nodes from lowest to highest overall reference count for
14192 # optimal root node selection.
14193 node_refcounts = {}
14194 for node in graph.order:
14195 node_refcounts[node] = len(graph.parent_nodes(node))
14196 def cmp_reference_count(node1, node2):
14197 return node_refcounts[node1] - node_refcounts[node2]
14198 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14200 ignore_priority_range = [None]
14201 ignore_priority_range.extend(
14202 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14203 while not graph.empty():
14204 for ignore_priority in ignore_priority_range:
14205 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14209 raise AssertionError("no root nodes")
14210 if ignore_priority is not None:
14211 # Some deps have been dropped due to circular dependencies,
14212 # so only pop one node in order do minimize the number that
14217 cleanlist.append(node.cpv)
14219 unmerge(root_config, myopts, "unmerge", cleanlist,
14220 ldpath_mtimes, ordered=ordered)
14222 if action == "prune":
14225 if not cleanlist and "--quiet" in myopts:
14228 print "Packages installed: "+str(len(vardb.cpv_all()))
14229 print "Packages in world: " + \
14230 str(len(root_config.sets["world"].getAtoms()))
14231 print "Packages in system: " + \
14232 str(len(root_config.sets["system"].getAtoms()))
14233 print "Required packages: "+str(required_pkgs_total)
14234 if "--pretend" in myopts:
14235 print "Number to remove: "+str(len(cleanlist))
14237 print "Number removed: "+str(len(cleanlist))
14239 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14241 Construct a depgraph for the given resume list. This will raise
14242 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14244 @returns: (success, depgraph, dropped_tasks)
14247 skip_unsatisfied = True
14248 mergelist = mtimedb["resume"]["mergelist"]
14249 dropped_tasks = set()
14251 mydepgraph = depgraph(settings, trees,
14252 myopts, myparams, spinner)
14254 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14255 skip_masked=skip_masked)
14256 except depgraph.UnsatisfiedResumeDep, e:
14257 if not skip_unsatisfied:
14260 graph = mydepgraph.digraph
14261 unsatisfied_parents = dict((dep.parent, dep.parent) \
14262 for dep in e.value)
14263 traversed_nodes = set()
14264 unsatisfied_stack = list(unsatisfied_parents)
14265 while unsatisfied_stack:
14266 pkg = unsatisfied_stack.pop()
14267 if pkg in traversed_nodes:
14269 traversed_nodes.add(pkg)
14271 # If this package was pulled in by a parent
14272 # package scheduled for merge, removing this
14273 # package may cause the the parent package's
14274 # dependency to become unsatisfied.
14275 for parent_node in graph.parent_nodes(pkg):
14276 if not isinstance(parent_node, Package) \
14277 or parent_node.operation not in ("merge", "nomerge"):
14280 graph.child_nodes(parent_node,
14281 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14282 if pkg in unsatisfied:
14283 unsatisfied_parents[parent_node] = parent_node
14284 unsatisfied_stack.append(parent_node)
14286 pruned_mergelist = []
14287 for x in mergelist:
14288 if isinstance(x, list) and \
14289 tuple(x) not in unsatisfied_parents:
14290 pruned_mergelist.append(x)
14292 # If the mergelist doesn't shrink then this loop is infinite.
14293 if len(pruned_mergelist) == len(mergelist):
14294 # This happens if a package can't be dropped because
14295 # it's already installed, but it has unsatisfied PDEPEND.
14297 mergelist[:] = pruned_mergelist
14299 # Exclude installed packages that have been removed from the graph due
14300 # to failure to build/install runtime dependencies after the dependent
14301 # package has already been installed.
14302 dropped_tasks.update(pkg for pkg in \
14303 unsatisfied_parents if pkg.operation != "nomerge")
14304 mydepgraph.break_refs(unsatisfied_parents)
14306 del e, graph, traversed_nodes, \
14307 unsatisfied_parents, unsatisfied_stack
14311 return (success, mydepgraph, dropped_tasks)
14313 def action_build(settings, trees, mtimedb,
14314 myopts, myaction, myfiles, spinner):
14316 # validate the state of the resume data
14317 # so that we can make assumptions later.
14318 for k in ("resume", "resume_backup"):
14319 if k not in mtimedb:
14321 resume_data = mtimedb[k]
14322 if not isinstance(resume_data, dict):
14325 mergelist = resume_data.get("mergelist")
14326 if not isinstance(mergelist, list):
14329 for x in mergelist:
14330 if not (isinstance(x, list) and len(x) == 4):
14332 pkg_type, pkg_root, pkg_key, pkg_action = x
14333 if pkg_root not in trees:
14334 # Current $ROOT setting differs,
14335 # so the list must be stale.
14341 resume_opts = resume_data.get("myopts")
14342 if not isinstance(resume_opts, (dict, list)):
14345 favorites = resume_data.get("favorites")
14346 if not isinstance(favorites, list):
14351 if "--resume" in myopts and \
14352 ("resume" in mtimedb or
14353 "resume_backup" in mtimedb):
14355 if "resume" not in mtimedb:
14356 mtimedb["resume"] = mtimedb["resume_backup"]
14357 del mtimedb["resume_backup"]
14359 # "myopts" is a list for backward compatibility.
14360 resume_opts = mtimedb["resume"].get("myopts", [])
14361 if isinstance(resume_opts, list):
14362 resume_opts = dict((k,True) for k in resume_opts)
14363 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14364 resume_opts.pop(opt, None)
14365 myopts.update(resume_opts)
14367 if "--debug" in myopts:
14368 writemsg_level("myopts %s\n" % (myopts,))
14370 # Adjust config according to options of the command being resumed.
14371 for myroot in trees:
14372 mysettings = trees[myroot]["vartree"].settings
14373 mysettings.unlock()
14374 adjust_config(myopts, mysettings)
14376 del myroot, mysettings
14378 ldpath_mtimes = mtimedb["ldpath"]
14381 buildpkgonly = "--buildpkgonly" in myopts
14382 pretend = "--pretend" in myopts
14383 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14384 ask = "--ask" in myopts
14385 nodeps = "--nodeps" in myopts
14386 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14387 tree = "--tree" in myopts
14388 if nodeps and tree:
14390 del myopts["--tree"]
14391 portage.writemsg(colorize("WARN", " * ") + \
14392 "--tree is broken with --nodeps. Disabling...\n")
14393 debug = "--debug" in myopts
14394 verbose = "--verbose" in myopts
14395 quiet = "--quiet" in myopts
14396 if pretend or fetchonly:
14397 # make the mtimedb readonly
14398 mtimedb.filename = None
14399 if '--digest' in myopts or 'digest' in settings.features:
14400 if '--digest' in myopts:
14401 msg = "The --digest option"
14403 msg = "The FEATURES=digest setting"
14405 msg += " can prevent corruption from being" + \
14406 " noticed. The `repoman manifest` command is the preferred" + \
14407 " way to generate manifests and it is capable of doing an" + \
14408 " entire repository or category at once."
14409 prefix = bad(" * ")
14410 writemsg(prefix + "\n")
14411 from textwrap import wrap
14412 for line in wrap(msg, 72):
14413 writemsg("%s%s\n" % (prefix, line))
14414 writemsg(prefix + "\n")
14416 if "--quiet" not in myopts and \
14417 ("--pretend" in myopts or "--ask" in myopts or \
14418 "--tree" in myopts or "--verbose" in myopts):
14420 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14422 elif "--buildpkgonly" in myopts:
14426 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14428 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14432 print darkgreen("These are the packages that would be %s, in order:") % action
14435 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14436 if not show_spinner:
14437 spinner.update = spinner.update_quiet
14440 favorites = mtimedb["resume"].get("favorites")
14441 if not isinstance(favorites, list):
14445 print "Calculating dependencies ",
14446 myparams = create_depgraph_params(myopts, myaction)
14448 resume_data = mtimedb["resume"]
14449 mergelist = resume_data["mergelist"]
14450 if mergelist and "--skipfirst" in myopts:
14451 for i, task in enumerate(mergelist):
14452 if isinstance(task, list) and \
14453 task and task[-1] == "merge":
14460 success, mydepgraph, dropped_tasks = resume_depgraph(
14461 settings, trees, mtimedb, myopts, myparams, spinner)
14462 except (portage.exception.PackageNotFound,
14463 depgraph.UnsatisfiedResumeDep), e:
14464 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14465 mydepgraph = e.depgraph
14468 from textwrap import wrap
14469 from portage.output import EOutput
14472 resume_data = mtimedb["resume"]
14473 mergelist = resume_data.get("mergelist")
14474 if not isinstance(mergelist, list):
14476 if mergelist and debug or (verbose and not quiet):
14477 out.eerror("Invalid resume list:")
14480 for task in mergelist:
14481 if isinstance(task, list):
14482 out.eerror(indent + str(tuple(task)))
14485 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14486 out.eerror("One or more packages are either masked or " + \
14487 "have missing dependencies:")
14490 for dep in e.value:
14491 if dep.atom is None:
14492 out.eerror(indent + "Masked package:")
14493 out.eerror(2 * indent + str(dep.parent))
14496 out.eerror(indent + str(dep.atom) + " pulled in by:")
14497 out.eerror(2 * indent + str(dep.parent))
14499 msg = "The resume list contains packages " + \
14500 "that are either masked or have " + \
14501 "unsatisfied dependencies. " + \
14502 "Please restart/continue " + \
14503 "the operation manually, or use --skipfirst " + \
14504 "to skip the first package in the list and " + \
14505 "any other packages that may be " + \
14506 "masked or have missing dependencies."
14507 for line in wrap(msg, 72):
14509 elif isinstance(e, portage.exception.PackageNotFound):
14510 out.eerror("An expected package is " + \
14511 "not available: %s" % str(e))
14513 msg = "The resume list contains one or more " + \
14514 "packages that are no longer " + \
14515 "available. Please restart/continue " + \
14516 "the operation manually."
14517 for line in wrap(msg, 72):
14521 print "\b\b... done!"
14525 portage.writemsg("!!! One or more packages have been " + \
14526 "dropped due to\n" + \
14527 "!!! masking or unsatisfied dependencies:\n\n",
14529 for task in dropped_tasks:
14530 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14531 portage.writemsg("\n", noiselevel=-1)
14534 if mydepgraph is not None:
14535 mydepgraph.display_problems()
14536 if not (ask or pretend):
14537 # delete the current list and also the backup
14538 # since it's probably stale too.
14539 for k in ("resume", "resume_backup"):
14540 mtimedb.pop(k, None)
14545 if ("--resume" in myopts):
14546 print darkgreen("emerge: It seems we have nothing to resume...")
14549 myparams = create_depgraph_params(myopts, myaction)
14550 if "--quiet" not in myopts and "--nodeps" not in myopts:
14551 print "Calculating dependencies ",
14553 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14555 retval, favorites = mydepgraph.select_files(myfiles)
14556 except portage.exception.PackageNotFound, e:
14557 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14559 except portage.exception.PackageSetNotFound, e:
14560 root_config = trees[settings["ROOT"]]["root_config"]
14561 display_missing_pkg_set(root_config, e.value)
14564 print "\b\b... done!"
14566 mydepgraph.display_problems()
14569 if "--pretend" not in myopts and \
14570 ("--ask" in myopts or "--tree" in myopts or \
14571 "--verbose" in myopts) and \
14572 not ("--quiet" in myopts and "--ask" not in myopts):
14573 if "--resume" in myopts:
14574 mymergelist = mydepgraph.altlist()
14575 if len(mymergelist) == 0:
14576 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14578 favorites = mtimedb["resume"]["favorites"]
14579 retval = mydepgraph.display(
14580 mydepgraph.altlist(reversed=tree),
14581 favorites=favorites)
14582 mydepgraph.display_problems()
14583 if retval != os.EX_OK:
14585 prompt="Would you like to resume merging these packages?"
14587 retval = mydepgraph.display(
14588 mydepgraph.altlist(reversed=("--tree" in myopts)),
14589 favorites=favorites)
14590 mydepgraph.display_problems()
14591 if retval != os.EX_OK:
14594 for x in mydepgraph.altlist():
14595 if isinstance(x, Package) and x.operation == "merge":
14599 sets = trees[settings["ROOT"]]["root_config"].sets
14600 world_candidates = None
14601 if "--noreplace" in myopts and \
14602 not oneshot and favorites:
14603 # Sets that are not world candidates are filtered
14604 # out here since the favorites list needs to be
14605 # complete for depgraph.loadResumeCommand() to
14606 # operate correctly.
14607 world_candidates = [x for x in favorites \
14608 if not (x.startswith(SETPREFIX) and \
14609 not sets[x[1:]].world_candidate)]
14610 if "--noreplace" in myopts and \
14611 not oneshot and world_candidates:
14613 for x in world_candidates:
14614 print " %s %s" % (good("*"), x)
14615 prompt="Would you like to add these packages to your world favorites?"
14616 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14617 prompt="Nothing to merge; would you like to auto-clean packages?"
14620 print "Nothing to merge; quitting."
14623 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14624 prompt="Would you like to fetch the source files for these packages?"
14626 prompt="Would you like to merge these packages?"
14628 if "--ask" in myopts and userquery(prompt) == "No":
14633 # Don't ask again (e.g. when auto-cleaning packages after merge)
14634 myopts.pop("--ask", None)
14636 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14637 if ("--resume" in myopts):
14638 mymergelist = mydepgraph.altlist()
14639 if len(mymergelist) == 0:
14640 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14642 favorites = mtimedb["resume"]["favorites"]
14643 retval = mydepgraph.display(
14644 mydepgraph.altlist(reversed=tree),
14645 favorites=favorites)
14646 mydepgraph.display_problems()
14647 if retval != os.EX_OK:
14650 retval = mydepgraph.display(
14651 mydepgraph.altlist(reversed=("--tree" in myopts)),
14652 favorites=favorites)
14653 mydepgraph.display_problems()
14654 if retval != os.EX_OK:
14656 if "--buildpkgonly" in myopts:
14657 graph_copy = mydepgraph.digraph.clone()
14658 removed_nodes = set()
14659 for node in graph_copy:
14660 if not isinstance(node, Package) or \
14661 node.operation == "nomerge":
14662 removed_nodes.add(node)
14663 graph_copy.difference_update(removed_nodes)
14664 if not graph_copy.hasallzeros(ignore_priority = \
14665 DepPrioritySatisfiedRange.ignore_medium):
14666 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14667 print "!!! You have to merge the dependencies before you can build this package.\n"
14670 if "--buildpkgonly" in myopts:
14671 graph_copy = mydepgraph.digraph.clone()
14672 removed_nodes = set()
14673 for node in graph_copy:
14674 if not isinstance(node, Package) or \
14675 node.operation == "nomerge":
14676 removed_nodes.add(node)
14677 graph_copy.difference_update(removed_nodes)
14678 if not graph_copy.hasallzeros(ignore_priority = \
14679 DepPrioritySatisfiedRange.ignore_medium):
14680 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14681 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14684 if ("--resume" in myopts):
14685 favorites=mtimedb["resume"]["favorites"]
14686 mymergelist = mydepgraph.altlist()
14687 mydepgraph.break_refs(mymergelist)
14688 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14689 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14690 del mydepgraph, mymergelist
14691 clear_caches(trees)
14693 retval = mergetask.merge()
14694 merge_count = mergetask.curval
14696 if "resume" in mtimedb and \
14697 "mergelist" in mtimedb["resume"] and \
14698 len(mtimedb["resume"]["mergelist"]) > 1:
14699 mtimedb["resume_backup"] = mtimedb["resume"]
14700 del mtimedb["resume"]
14702 mtimedb["resume"]={}
14703 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14704 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14705 # a list type for options.
14706 mtimedb["resume"]["myopts"] = myopts.copy()
14708 # Convert Atom instances to plain str.
14709 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14711 pkglist = mydepgraph.altlist()
14712 mydepgraph.saveNomergeFavorites()
14713 mydepgraph.break_refs(pkglist)
14714 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14715 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14716 del mydepgraph, pkglist
14717 clear_caches(trees)
14719 retval = mergetask.merge()
14720 merge_count = mergetask.curval
14722 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14723 if "yes" == settings.get("AUTOCLEAN"):
14724 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14725 unmerge(trees[settings["ROOT"]]["root_config"],
14726 myopts, "clean", [],
14727 ldpath_mtimes, autoclean=1)
14729 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14730 + " AUTOCLEAN is disabled. This can cause serious"
14731 + " problems due to overlapping packages.\n")
14732 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14736 def multiple_actions(action1, action2):
14737 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14738 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14741 def insert_optional_args(args):
14743 Parse optional arguments and insert a value if one has
14744 not been provided. This is done before feeding the args
14745 to the optparse parser since that parser does not support
14746 this feature natively.
14750 jobs_opts = ("-j", "--jobs")
14751 arg_stack = args[:]
14752 arg_stack.reverse()
14754 arg = arg_stack.pop()
14756 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14757 if not (short_job_opt or arg in jobs_opts):
14758 new_args.append(arg)
14761 # Insert an empty placeholder in order to
14762 # satisfy the requirements of optparse.
14764 new_args.append("--jobs")
14767 if short_job_opt and len(arg) > 2:
14768 if arg[:2] == "-j":
14770 job_count = int(arg[2:])
14772 saved_opts = arg[2:]
14775 saved_opts = arg[1:].replace("j", "")
14777 if job_count is None and arg_stack:
14779 job_count = int(arg_stack[-1])
14783 # Discard the job count from the stack
14784 # since we're consuming it here.
14787 if job_count is None:
14788 # unlimited number of jobs
14789 new_args.append("True")
14791 new_args.append(str(job_count))
14793 if saved_opts is not None:
14794 new_args.append("-" + saved_opts)
14798 def parse_opts(tmpcmdline, silent=False):
14803 global actions, options, shortmapping
14805 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14806 argument_options = {
14808 "help":"specify the location for portage configuration files",
14812 "help":"enable or disable color output",
14814 "choices":("y", "n")
14819 "help" : "Specifies the number of packages to build " + \
14825 "--load-average": {
14827 "help" :"Specifies that no new builds should be started " + \
14828 "if there are other builds running and the load average " + \
14829 "is at least LOAD (a floating-point number).",
14835 "help":"include unnecessary build time dependencies",
14837 "choices":("y", "n")
14840 "help":"specify conditions to trigger package reinstallation",
14842 "choices":["changed-use"]
14845 "help" : "specify the target root filesystem for merging packages",
14850 from optparse import OptionParser
14851 parser = OptionParser()
14852 if parser.has_option("--help"):
14853 parser.remove_option("--help")
14855 for action_opt in actions:
14856 parser.add_option("--" + action_opt, action="store_true",
14857 dest=action_opt.replace("-", "_"), default=False)
14858 for myopt in options:
14859 parser.add_option(myopt, action="store_true",
14860 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14861 for shortopt, longopt in shortmapping.iteritems():
14862 parser.add_option("-" + shortopt, action="store_true",
14863 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14864 for myalias, myopt in longopt_aliases.iteritems():
14865 parser.add_option(myalias, action="store_true",
14866 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14868 for myopt, kwargs in argument_options.iteritems():
14869 parser.add_option(myopt,
14870 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14872 tmpcmdline = insert_optional_args(tmpcmdline)
14874 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14878 if myoptions.jobs == "True":
14882 jobs = int(myoptions.jobs)
14886 if jobs is not True and \
14890 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14891 (myoptions.jobs,), noiselevel=-1)
14893 myoptions.jobs = jobs
14895 if myoptions.load_average:
14897 load_average = float(myoptions.load_average)
14901 if load_average <= 0.0:
14902 load_average = None
14904 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14905 (myoptions.load_average,), noiselevel=-1)
14907 myoptions.load_average = load_average
14909 for myopt in options:
14910 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14912 myopts[myopt] = True
14914 for myopt in argument_options:
14915 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14919 if myoptions.searchdesc:
14920 myoptions.search = True
14922 for action_opt in actions:
14923 v = getattr(myoptions, action_opt.replace("-", "_"))
14926 multiple_actions(myaction, action_opt)
14928 myaction = action_opt
14932 return myaction, myopts, myfiles
14934 def validate_ebuild_environment(trees):
14935 for myroot in trees:
14936 settings = trees[myroot]["vartree"].settings
14937 settings.validate()
14939 def clear_caches(trees):
14940 for d in trees.itervalues():
14941 d["porttree"].dbapi.melt()
14942 d["porttree"].dbapi._aux_cache.clear()
14943 d["bintree"].dbapi._aux_cache.clear()
14944 d["bintree"].dbapi._clear_cache()
14945 d["vartree"].dbapi.linkmap._clear_cache()
14946 portage.dircache.clear()
14949 def load_emerge_config(trees=None):
14951 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14952 v = os.environ.get(envvar, None)
14953 if v and v.strip():
14955 trees = portage.create_trees(trees=trees, **kwargs)
14957 for root, root_trees in trees.iteritems():
14958 settings = root_trees["vartree"].settings
14959 setconfig = load_default_config(settings, root_trees)
14960 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14962 settings = trees["/"]["vartree"].settings
14964 for myroot in trees:
14966 settings = trees[myroot]["vartree"].settings
14969 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14970 mtimedb = portage.MtimeDB(mtimedbfile)
14972 return settings, trees, mtimedb
14974 def adjust_config(myopts, settings):
14975 """Make emerge specific adjustments to the config."""
14977 # To enhance usability, make some vars case insensitive by forcing them to
14979 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14980 if myvar in settings:
14981 settings[myvar] = settings[myvar].lower()
14982 settings.backup_changes(myvar)
14985 # Kill noauto as it will break merges otherwise.
14986 if "noauto" in settings.features:
14987 settings.features.remove('noauto')
14988 settings['FEATURES'] = ' '.join(sorted(settings.features))
14989 settings.backup_changes("FEATURES")
14993 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14994 except ValueError, e:
14995 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14996 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14997 settings["CLEAN_DELAY"], noiselevel=-1)
14998 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14999 settings.backup_changes("CLEAN_DELAY")
15001 EMERGE_WARNING_DELAY = 10
15003 EMERGE_WARNING_DELAY = int(settings.get(
15004 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15005 except ValueError, e:
15006 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15007 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15008 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15009 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15010 settings.backup_changes("EMERGE_WARNING_DELAY")
15012 if "--quiet" in myopts:
15013 settings["PORTAGE_QUIET"]="1"
15014 settings.backup_changes("PORTAGE_QUIET")
15016 if "--verbose" in myopts:
15017 settings["PORTAGE_VERBOSE"] = "1"
15018 settings.backup_changes("PORTAGE_VERBOSE")
15020 # Set so that configs will be merged regardless of remembered status
15021 if ("--noconfmem" in myopts):
15022 settings["NOCONFMEM"]="1"
15023 settings.backup_changes("NOCONFMEM")
15025 # Set various debug markers... They should be merged somehow.
15028 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15029 if PORTAGE_DEBUG not in (0, 1):
15030 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15031 PORTAGE_DEBUG, noiselevel=-1)
15032 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15035 except ValueError, e:
15036 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15037 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15038 settings["PORTAGE_DEBUG"], noiselevel=-1)
15040 if "--debug" in myopts:
15042 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15043 settings.backup_changes("PORTAGE_DEBUG")
15045 if settings.get("NOCOLOR") not in ("yes","true"):
15046 portage.output.havecolor = 1
15048 """The explicit --color < y | n > option overrides the NOCOLOR environment
15049 variable and stdout auto-detection."""
15050 if "--color" in myopts:
15051 if "y" == myopts["--color"]:
15052 portage.output.havecolor = 1
15053 settings["NOCOLOR"] = "false"
15055 portage.output.havecolor = 0
15056 settings["NOCOLOR"] = "true"
15057 settings.backup_changes("NOCOLOR")
15058 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15059 portage.output.havecolor = 0
15060 settings["NOCOLOR"] = "true"
15061 settings.backup_changes("NOCOLOR")
15063 def apply_priorities(settings):
15067 def nice(settings):
15069 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15070 except (OSError, ValueError), e:
15071 out = portage.output.EOutput()
15072 out.eerror("Failed to change nice value to '%s'" % \
15073 settings["PORTAGE_NICENESS"])
15074 out.eerror("%s\n" % str(e))
15076 def ionice(settings):
15078 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15080 ionice_cmd = shlex.split(ionice_cmd)
15084 from portage.util import varexpand
15085 variables = {"PID" : str(os.getpid())}
15086 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15089 rval = portage.process.spawn(cmd, env=os.environ)
15090 except portage.exception.CommandNotFound:
15091 # The OS kernel probably doesn't support ionice,
15092 # so return silently.
15095 if rval != os.EX_OK:
15096 out = portage.output.EOutput()
15097 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15098 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15100 def display_missing_pkg_set(root_config, set_name):
15103 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15104 "The following sets exist:") % \
15105 colorize("INFORM", set_name))
15108 for s in sorted(root_config.sets):
15109 msg.append(" %s" % s)
15112 writemsg_level("".join("%s\n" % l for l in msg),
15113 level=logging.ERROR, noiselevel=-1)
15115 def expand_set_arguments(myfiles, myaction, root_config):
15117 setconfig = root_config.setconfig
15119 sets = setconfig.getSets()
15121 # In order to know exactly which atoms/sets should be added to the
15122 # world file, the depgraph performs set expansion later. It will get
15123 # confused about where the atoms came from if it's not allowed to
15124 # expand them itself.
15125 do_not_expand = (None, )
15128 if a in ("system", "world"):
15129 newargs.append(SETPREFIX+a)
15136 # separators for set arguments
15140 # WARNING: all operators must be of equal length
15142 DIFF_OPERATOR = "-@"
15143 UNION_OPERATOR = "+@"
15145 for i in range(0, len(myfiles)):
15146 if myfiles[i].startswith(SETPREFIX):
15149 x = myfiles[i][len(SETPREFIX):]
15152 start = x.find(ARG_START)
15153 end = x.find(ARG_END)
15154 if start > 0 and start < end:
15155 namepart = x[:start]
15156 argpart = x[start+1:end]
15158 # TODO: implement proper quoting
15159 args = argpart.split(",")
15163 k, v = a.split("=", 1)
15166 options[a] = "True"
15167 setconfig.update(namepart, options)
15168 newset += (x[:start-len(namepart)]+namepart)
15169 x = x[end+len(ARG_END):]
15173 myfiles[i] = SETPREFIX+newset
15175 sets = setconfig.getSets()
15177 # display errors that occured while loading the SetConfig instance
15178 for e in setconfig.errors:
15179 print colorize("BAD", "Error during set creation: %s" % e)
15181 # emerge relies on the existance of sets with names "world" and "system"
15182 required_sets = ("world", "system")
15185 for s in required_sets:
15187 missing_sets.append(s)
15189 if len(missing_sets) > 2:
15190 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15191 missing_sets_str += ', and "%s"' % missing_sets[-1]
15192 elif len(missing_sets) == 2:
15193 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15195 missing_sets_str = '"%s"' % missing_sets[-1]
15196 msg = ["emerge: incomplete set configuration, " + \
15197 "missing set(s): %s" % missing_sets_str]
15199 msg.append(" sets defined: %s" % ", ".join(sets))
15200 msg.append(" This usually means that '%s'" % \
15201 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15202 msg.append(" is missing or corrupt.")
15204 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15206 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15209 if a.startswith(SETPREFIX):
15210 # support simple set operations (intersection, difference and union)
15211 # on the commandline. Expressions are evaluated strictly left-to-right
15212 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15213 expression = a[len(SETPREFIX):]
15216 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15217 is_pos = expression.rfind(IS_OPERATOR)
15218 diff_pos = expression.rfind(DIFF_OPERATOR)
15219 union_pos = expression.rfind(UNION_OPERATOR)
15220 op_pos = max(is_pos, diff_pos, union_pos)
15221 s1 = expression[:op_pos]
15222 s2 = expression[op_pos+len(IS_OPERATOR):]
15223 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15225 display_missing_pkg_set(root_config, s2)
15227 expr_sets.insert(0, s2)
15228 expr_ops.insert(0, op)
15230 if not expression in sets:
15231 display_missing_pkg_set(root_config, expression)
15233 expr_sets.insert(0, expression)
15234 result = set(setconfig.getSetAtoms(expression))
15235 for i in range(0, len(expr_ops)):
15236 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15237 if expr_ops[i] == IS_OPERATOR:
15238 result.intersection_update(s2)
15239 elif expr_ops[i] == DIFF_OPERATOR:
15240 result.difference_update(s2)
15241 elif expr_ops[i] == UNION_OPERATOR:
15244 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15245 newargs.extend(result)
15247 s = a[len(SETPREFIX):]
15249 display_missing_pkg_set(root_config, s)
15251 setconfig.active.append(s)
15253 set_atoms = setconfig.getSetAtoms(s)
15254 except portage.exception.PackageSetNotFound, e:
15255 writemsg_level(("emerge: the given set '%s' " + \
15256 "contains a non-existent set named '%s'.\n") % \
15257 (s, e), level=logging.ERROR, noiselevel=-1)
15259 if myaction in unmerge_actions and \
15260 not sets[s].supportsOperation("unmerge"):
15261 sys.stderr.write("emerge: the given set '%s' does " % s + \
15262 "not support unmerge operations\n")
15264 elif not set_atoms:
15265 print "emerge: '%s' is an empty set" % s
15266 elif myaction not in do_not_expand:
15267 newargs.extend(set_atoms)
15269 newargs.append(SETPREFIX+s)
15270 for e in sets[s].errors:
15274 return (newargs, retval)
15276 def repo_name_check(trees):
15277 missing_repo_names = set()
15278 for root, root_trees in trees.iteritems():
15279 if "porttree" in root_trees:
15280 portdb = root_trees["porttree"].dbapi
15281 missing_repo_names.update(portdb.porttrees)
15282 repos = portdb.getRepositories()
15284 missing_repo_names.discard(portdb.getRepositoryPath(r))
15285 if portdb.porttree_root in missing_repo_names and \
15286 not os.path.exists(os.path.join(
15287 portdb.porttree_root, "profiles")):
15288 # This is normal if $PORTDIR happens to be empty,
15289 # so don't warn about it.
15290 missing_repo_names.remove(portdb.porttree_root)
15292 if missing_repo_names:
15294 msg.append("WARNING: One or more repositories " + \
15295 "have missing repo_name entries:")
15297 for p in missing_repo_names:
15298 msg.append("\t%s/profiles/repo_name" % (p,))
15300 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15301 "should be a plain text file containing a unique " + \
15302 "name for the repository on the first line.", 70))
15303 writemsg_level("".join("%s\n" % l for l in msg),
15304 level=logging.WARNING, noiselevel=-1)
15306 return bool(missing_repo_names)
15308 def config_protect_check(trees):
15309 for root, root_trees in trees.iteritems():
15310 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15311 msg = "!!! CONFIG_PROTECT is empty"
15313 msg += " for '%s'" % root
15314 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15316 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15318 if "--quiet" in myopts:
15319 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15320 print "!!! one of the following fully-qualified ebuild names instead:\n"
15321 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15322 print " " + colorize("INFORM", cp)
15325 s = search(root_config, spinner, "--searchdesc" in myopts,
15326 "--quiet" not in myopts, "--usepkg" in myopts,
15327 "--usepkgonly" in myopts)
15328 null_cp = portage.dep_getkey(insert_category_into_atom(
15330 cat, atom_pn = portage.catsplit(null_cp)
15331 s.searchkey = atom_pn
15332 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15335 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15336 print "!!! one of the above fully-qualified ebuild names instead.\n"
15338 def profile_check(trees, myaction, myopts):
15339 if myaction in ("info", "sync"):
15341 elif "--version" in myopts or "--help" in myopts:
15343 for root, root_trees in trees.iteritems():
15344 if root_trees["root_config"].settings.profiles:
15346 # generate some profile related warning messages
15347 validate_ebuild_environment(trees)
15348 msg = "If you have just changed your profile configuration, you " + \
15349 "should revert back to the previous configuration. Due to " + \
15350 "your current profile being invalid, allowed actions are " + \
15351 "limited to --help, --info, --sync, and --version."
15352 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15353 level=logging.ERROR, noiselevel=-1)
15358 global portage # NFC why this is necessary now - genone
15359 portage._disable_legacy_globals()
15360 # Disable color until we're sure that it should be enabled (after
15361 # EMERGE_DEFAULT_OPTS has been parsed).
15362 portage.output.havecolor = 0
15363 # This first pass is just for options that need to be known as early as
15364 # possible, such as --config-root. They will be parsed again later,
15365 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15366 # the value of --config-root).
15367 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15368 if "--debug" in myopts:
15369 os.environ["PORTAGE_DEBUG"] = "1"
15370 if "--config-root" in myopts:
15371 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15372 if "--root" in myopts:
15373 os.environ["ROOT"] = myopts["--root"]
15375 # Portage needs to ensure a sane umask for the files it creates.
15377 settings, trees, mtimedb = load_emerge_config()
15378 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15379 rval = profile_check(trees, myaction, myopts)
15380 if rval != os.EX_OK:
15383 if portage._global_updates(trees, mtimedb["updates"]):
15385 # Reload the whole config from scratch.
15386 settings, trees, mtimedb = load_emerge_config(trees=trees)
15387 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15389 xterm_titles = "notitles" not in settings.features
15392 if "--ignore-default-opts" not in myopts:
15393 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15394 tmpcmdline.extend(sys.argv[1:])
15395 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15397 if "--digest" in myopts:
15398 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15399 # Reload the whole config from scratch so that the portdbapi internal
15400 # config is updated with new FEATURES.
15401 settings, trees, mtimedb = load_emerge_config(trees=trees)
15402 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15404 for myroot in trees:
15405 mysettings = trees[myroot]["vartree"].settings
15406 mysettings.unlock()
15407 adjust_config(myopts, mysettings)
15408 if '--pretend' not in myopts and myaction in \
15409 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15410 mysettings["PORTAGE_COUNTER_HASH"] = \
15411 trees[myroot]["vartree"].dbapi._counter_hash()
15412 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15414 del myroot, mysettings
15416 apply_priorities(settings)
15418 spinner = stdout_spinner()
15419 if "candy" in settings.features:
15420 spinner.update = spinner.update_scroll
15422 if "--quiet" not in myopts:
15423 portage.deprecated_profile_check(settings=settings)
15424 repo_name_check(trees)
15425 config_protect_check(trees)
15427 eclasses_overridden = {}
15428 for mytrees in trees.itervalues():
15429 mydb = mytrees["porttree"].dbapi
15430 # Freeze the portdbapi for performance (memoize all xmatch results).
15432 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15435 if eclasses_overridden and \
15436 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15437 prefix = bad(" * ")
15438 if len(eclasses_overridden) == 1:
15439 writemsg(prefix + "Overlay eclass overrides " + \
15440 "eclass from PORTDIR:\n", noiselevel=-1)
15442 writemsg(prefix + "Overlay eclasses override " + \
15443 "eclasses from PORTDIR:\n", noiselevel=-1)
15444 writemsg(prefix + "\n", noiselevel=-1)
15445 for eclass_name in sorted(eclasses_overridden):
15446 writemsg(prefix + " '%s/%s.eclass'\n" % \
15447 (eclasses_overridden[eclass_name], eclass_name),
15449 writemsg(prefix + "\n", noiselevel=-1)
15450 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15451 "because it will trigger invalidation of cached ebuild metadata " + \
15452 "that is distributed with the portage tree. If you must " + \
15453 "override eclasses from PORTDIR then you are advised to add " + \
15454 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15455 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15456 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15457 "you would like to disable this warning."
15458 from textwrap import wrap
15459 for line in wrap(msg, 72):
15460 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15462 if "moo" in myfiles:
15465 Larry loves Gentoo (""" + platform.system() + """)
15467 _______________________
15468 < Have you mooed today? >
15469 -----------------------
15479 ext = os.path.splitext(x)[1]
15480 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15481 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15484 root_config = trees[settings["ROOT"]]["root_config"]
15485 if myaction == "list-sets":
15486 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15490 # only expand sets for actions taking package arguments
15491 oldargs = myfiles[:]
15492 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15493 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15494 if retval != os.EX_OK:
15497 # Need to handle empty sets specially, otherwise emerge will react
15498 # with the help message for empty argument lists
15499 if oldargs and not myfiles:
15500 print "emerge: no targets left after set expansion"
15503 if ("--tree" in myopts) and ("--columns" in myopts):
15504 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15507 if ("--quiet" in myopts):
15508 spinner.update = spinner.update_quiet
15509 portage.util.noiselimit = -1
15511 # Always create packages if FEATURES=buildpkg
15512 # Imply --buildpkg if --buildpkgonly
15513 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15514 if "--buildpkg" not in myopts:
15515 myopts["--buildpkg"] = True
15517 # Always try and fetch binary packages if FEATURES=getbinpkg
15518 if ("getbinpkg" in settings.features):
15519 myopts["--getbinpkg"] = True
15521 if "--buildpkgonly" in myopts:
15522 # --buildpkgonly will not merge anything, so
15523 # it cancels all binary package options.
15524 for opt in ("--getbinpkg", "--getbinpkgonly",
15525 "--usepkg", "--usepkgonly"):
15526 myopts.pop(opt, None)
15528 if "--fetch-all-uri" in myopts:
15529 myopts["--fetchonly"] = True
15531 if "--skipfirst" in myopts and "--resume" not in myopts:
15532 myopts["--resume"] = True
15534 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15535 myopts["--usepkgonly"] = True
15537 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15538 myopts["--getbinpkg"] = True
15540 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15541 myopts["--usepkg"] = True
15543 # Also allow -K to apply --usepkg/-k
15544 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15545 myopts["--usepkg"] = True
15547 # Allow -p to remove --ask
15548 if ("--pretend" in myopts) and ("--ask" in myopts):
15549 print ">>> --pretend disables --ask... removing --ask from options."
15550 del myopts["--ask"]
15552 # forbid --ask when not in a terminal
15553 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15554 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15555 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15559 if settings.get("PORTAGE_DEBUG", "") == "1":
15560 spinner.update = spinner.update_quiet
15562 if "python-trace" in settings.features:
15563 import portage.debug
15564 portage.debug.set_trace(True)
15566 if not ("--quiet" in myopts):
15567 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15568 spinner.update = spinner.update_basic
15570 if myaction == 'version':
15571 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15572 settings.profile_path, settings["CHOST"],
15573 trees[settings["ROOT"]]["vartree"].dbapi)
15575 elif "--help" in myopts:
15576 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15579 if "--debug" in myopts:
15580 print "myaction", myaction
15581 print "myopts", myopts
15583 if not myaction and not myfiles and "--resume" not in myopts:
15584 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15587 pretend = "--pretend" in myopts
15588 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15589 buildpkgonly = "--buildpkgonly" in myopts
15591 # check if root user is the current user for the actions where emerge needs this
15592 if portage.secpass < 2:
15593 # We've already allowed "--version" and "--help" above.
15594 if "--pretend" not in myopts and myaction not in ("search","info"):
15595 need_superuser = not \
15597 (buildpkgonly and secpass >= 1) or \
15598 myaction in ("metadata", "regen") or \
15599 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15600 if portage.secpass < 1 or \
15603 access_desc = "superuser"
15605 access_desc = "portage group"
15606 # Always show portage_group_warning() when only portage group
15607 # access is required but the user is not in the portage group.
15608 from portage.data import portage_group_warning
15609 if "--ask" in myopts:
15610 myopts["--pretend"] = True
15611 del myopts["--ask"]
15612 print ("%s access is required... " + \
15613 "adding --pretend to options.\n") % access_desc
15614 if portage.secpass < 1 and not need_superuser:
15615 portage_group_warning()
15617 sys.stderr.write(("emerge: %s access is " + \
15618 "required.\n\n") % access_desc)
15619 if portage.secpass < 1 and not need_superuser:
15620 portage_group_warning()
15623 disable_emergelog = False
15624 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15626 disable_emergelog = True
15628 if myaction in ("search", "info"):
15629 disable_emergelog = True
15630 if disable_emergelog:
15631 """ Disable emergelog for everything except build or unmerge
15632 operations. This helps minimize parallel emerge.log entries that can
15633 confuse log parsers. We especially want it disabled during
15634 parallel-fetch, which uses --resume --fetchonly."""
15636 def emergelog(*pargs, **kargs):
15639 if not "--pretend" in myopts:
15640 emergelog(xterm_titles, "Started emerge on: "+\
15641 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15644 myelogstr=" ".join(myopts)
15646 myelogstr+=" "+myaction
15648 myelogstr += " " + " ".join(oldargs)
15649 emergelog(xterm_titles, " *** emerge " + myelogstr)
15652 def emergeexitsig(signum, frame):
15653 signal.signal(signal.SIGINT, signal.SIG_IGN)
15654 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15655 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15656 sys.exit(100+signum)
15657 signal.signal(signal.SIGINT, emergeexitsig)
15658 signal.signal(signal.SIGTERM, emergeexitsig)
15661 """This gets out final log message in before we quit."""
15662 if "--pretend" not in myopts:
15663 emergelog(xterm_titles, " *** terminating.")
15664 if "notitles" not in settings.features:
15666 portage.atexit_register(emergeexit)
15668 if myaction in ("config", "metadata", "regen", "sync"):
15669 if "--pretend" in myopts:
15670 sys.stderr.write(("emerge: The '%s' action does " + \
15671 "not support '--pretend'.\n") % myaction)
15674 if "sync" == myaction:
15675 return action_sync(settings, trees, mtimedb, myopts, myaction)
15676 elif "metadata" == myaction:
15677 action_metadata(settings, portdb, myopts)
15678 elif myaction=="regen":
15679 validate_ebuild_environment(trees)
15680 return action_regen(settings, portdb, myopts.get("--jobs"),
15681 myopts.get("--load-average"))
15683 elif "config"==myaction:
15684 validate_ebuild_environment(trees)
15685 action_config(settings, trees, myopts, myfiles)
15688 elif "search"==myaction:
15689 validate_ebuild_environment(trees)
15690 action_search(trees[settings["ROOT"]]["root_config"],
15691 myopts, myfiles, spinner)
15692 elif myaction in ("clean", "unmerge") or \
15693 (myaction == "prune" and "--nodeps" in myopts):
15694 validate_ebuild_environment(trees)
15696 # Ensure atoms are valid before calling unmerge().
15697 # For backward compat, leading '=' is not required.
15699 if is_valid_package_atom(x) or \
15700 is_valid_package_atom("=" + x):
15703 msg.append("'%s' is not a valid package atom." % (x,))
15704 msg.append("Please check ebuild(5) for full details.")
15705 writemsg_level("".join("!!! %s\n" % line for line in msg),
15706 level=logging.ERROR, noiselevel=-1)
15709 # When given a list of atoms, unmerge
15710 # them in the order given.
15711 ordered = myaction == "unmerge"
15712 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15713 mtimedb["ldpath"], ordered=ordered):
15714 if not (buildpkgonly or fetchonly or pretend):
15715 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15717 elif myaction in ("depclean", "info", "prune"):
15719 # Ensure atoms are valid before calling unmerge().
15720 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15723 if is_valid_package_atom(x):
15725 valid_atoms.append(
15726 portage.dep_expand(x, mydb=vardb, settings=settings))
15727 except portage.exception.AmbiguousPackageName, e:
15728 msg = "The short ebuild name \"" + x + \
15729 "\" is ambiguous. Please specify " + \
15730 "one of the following " + \
15731 "fully-qualified ebuild names instead:"
15732 for line in textwrap.wrap(msg, 70):
15733 writemsg_level("!!! %s\n" % (line,),
15734 level=logging.ERROR, noiselevel=-1)
15736 writemsg_level(" %s\n" % colorize("INFORM", i),
15737 level=logging.ERROR, noiselevel=-1)
15738 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15742 msg.append("'%s' is not a valid package atom." % (x,))
15743 msg.append("Please check ebuild(5) for full details.")
15744 writemsg_level("".join("!!! %s\n" % line for line in msg),
15745 level=logging.ERROR, noiselevel=-1)
15748 if myaction == "info":
15749 return action_info(settings, trees, myopts, valid_atoms)
15751 validate_ebuild_environment(trees)
15752 action_depclean(settings, trees, mtimedb["ldpath"],
15753 myopts, myaction, valid_atoms, spinner)
15754 if not (buildpkgonly or fetchonly or pretend):
15755 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15756 # "update", "system", or just process files:
15758 validate_ebuild_environment(trees)
15761 if x.startswith(SETPREFIX) or \
15762 is_valid_package_atom(x):
15764 if x[:1] == os.sep:
15772 msg.append("'%s' is not a valid package atom." % (x,))
15773 msg.append("Please check ebuild(5) for full details.")
15774 writemsg_level("".join("!!! %s\n" % line for line in msg),
15775 level=logging.ERROR, noiselevel=-1)
15778 if "--pretend" not in myopts:
15779 display_news_notification(root_config, myopts)
15780 retval = action_build(settings, trees, mtimedb,
15781 myopts, myaction, myfiles, spinner)
15782 root_config = trees[settings["ROOT"]]["root_config"]
15783 post_emerge(root_config, myopts, mtimedb, retval)