2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
59 from itertools import chain, izip
62 import cPickle as pickle
67 from cStringIO import StringIO
69 from StringIO import StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
190 "sync", "unmerge", "version",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--rdeps-only", "--root-deps",
211 "--searchdesc", "--selective",
215 "--usepkg", "--usepkgonly",
222 "b":"--buildpkg", "B":"--buildpkgonly",
223 "c":"--clean", "C":"--unmerge",
224 "d":"--debug", "D":"--deep",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
229 "k":"--usepkg", "K":"--usepkgonly",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps", "O":"--nodeps",
233 "p":"--pretend", "P":"--prune",
235 "s":"--search", "S":"--searchdesc",
238 "v":"--verbose", "V":"--version"
241 def emergelog(xterm_titles, mystr, short_msg=None):
242 if xterm_titles and short_msg:
243 if "HOSTNAME" in os.environ:
244 short_msg = os.environ["HOSTNAME"]+": "+short_msg
245 xtermTitle(short_msg)
247 file_path = "/var/log/emerge.log"
248 mylogfile = open(file_path, "a")
249 portage.util.apply_secpass_permissions(file_path,
250 uid=portage.portage_uid, gid=portage.portage_gid,
254 mylock = portage.locks.lockfile(mylogfile)
255 # seek because we may have gotten held up by the lock.
256 # if so, we may not be positioned at the end of the file.
258 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
262 portage.locks.unlockfile(mylock)
264 except (IOError,OSError,portage.exception.PortageException), e:
266 print >> sys.stderr, "emergelog():",e
268 def countdown(secs=5, doing="Starting"):
270 print ">>> Waiting",secs,"seconds before starting..."
271 print ">>> (Control-C to abort)...\n"+doing+" in: ",
275 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282 if isinstance(mysize, basestring):
284 if 0 != mysize % 1024:
285 # Always round up to the next kB so that it doesn't show 0 kB when
286 # some small file still needs to be fetched.
287 mysize += 1024 - mysize % 1024
288 mystr=str(mysize/1024)
292 mystr=mystr[:mycount]+","+mystr[mycount:]
296 def getgccversion(chost):
299 return: the current in-use gcc version
302 gcc_ver_command = 'gcc -dumpversion'
303 gcc_ver_prefix = 'gcc-'
305 gcc_not_found_error = red(
306 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307 "!!! to update the environment of this terminal and possibly\n" +
308 "!!! other terminals also.\n"
311 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
315 mystatus, myoutput = commands.getstatusoutput(
316 chost + "-" + gcc_ver_command)
317 if mystatus == os.EX_OK:
318 return gcc_ver_prefix + myoutput
320 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321 if mystatus == os.EX_OK:
322 return gcc_ver_prefix + myoutput
324 portage.writemsg(gcc_not_found_error, noiselevel=-1)
325 return "[unavailable]"
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328 profilever = "unavailable"
330 realpath = os.path.realpath(profile)
331 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
332 if realpath.startswith(basepath):
333 profilever = realpath[1 + len(basepath):]
336 profilever = "!" + os.readlink(profile)
339 del realpath, basepath
342 libclist = vardb.match("virtual/libc")
343 libclist += vardb.match("virtual/glibc")
344 libclist = portage.util.unique_array(libclist)
346 xs=portage.catpkgsplit(x)
348 libcver+=","+"-".join(xs[1:])
350 libcver="-".join(xs[1:])
352 libcver="unavailable"
354 gccver = getgccversion(chost)
355 unameout=platform.release()+" "+platform.machine()
357 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
359 def create_depgraph_params(myopts, myaction):
360 #configure emerge engine parameters
362 # self: include _this_ package regardless of if it is merged.
363 # selective: exclude the package if it is merged
364 # recurse: go into the dependencies
365 # deep: go into the dependencies of already merged packages
366 # empty: pretend nothing is merged
367 # complete: completely account for all known dependencies
368 # remove: build graph for use in removing packages
369 myparams = set(["recurse"])
371 if myaction == "remove":
372 myparams.add("remove")
373 myparams.add("complete")
376 if "--update" in myopts or \
377 "--newuse" in myopts or \
378 "--reinstall" in myopts or \
379 "--noreplace" in myopts:
380 myparams.add("selective")
381 if "--emptytree" in myopts:
382 myparams.add("empty")
383 myparams.discard("selective")
384 if "--nodeps" in myopts:
385 myparams.discard("recurse")
386 if "--deep" in myopts:
388 if "--complete-graph" in myopts:
389 myparams.add("complete")
392 # search functionality
393 class search(object):
404 def __init__(self, root_config, spinner, searchdesc,
405 verbose, usepkg, usepkgonly):
406 """Searches the available and installed packages for the supplied search key.
407 The list of available and installed packages is created at object instantiation.
408 This makes successive searches faster."""
409 self.settings = root_config.settings
410 self.vartree = root_config.trees["vartree"]
411 self.spinner = spinner
412 self.verbose = verbose
413 self.searchdesc = searchdesc
414 self.root_config = root_config
415 self.setconfig = root_config.setconfig
416 self.matches = {"pkg" : []}
421 self.portdb = fake_portdb
422 for attrib in ("aux_get", "cp_all",
423 "xmatch", "findname", "getFetchMap"):
424 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
428 portdb = root_config.trees["porttree"].dbapi
429 bindb = root_config.trees["bintree"].dbapi
430 vardb = root_config.trees["vartree"].dbapi
432 if not usepkgonly and portdb._have_root_eclass_dir:
433 self._dbs.append(portdb)
435 if (usepkg or usepkgonly) and bindb.cp_all():
436 self._dbs.append(bindb)
438 self._dbs.append(vardb)
439 self._portdb = portdb
444 cp_all.update(db.cp_all())
445 return list(sorted(cp_all))
447 def _aux_get(self, *args, **kwargs):
450 return db.aux_get(*args, **kwargs)
455 def _findname(self, *args, **kwargs):
457 if db is not self._portdb:
458 # We don't want findname to return anything
459 # unless it's an ebuild in a portage tree.
460 # Otherwise, it's already built and we don't
463 func = getattr(db, "findname", None)
465 value = func(*args, **kwargs)
470 def _getFetchMap(self, *args, **kwargs):
472 func = getattr(db, "getFetchMap", None)
474 value = func(*args, **kwargs)
479 def _visible(self, db, cpv, metadata):
480 installed = db is self.vartree.dbapi
481 built = installed or db is not self._portdb
484 pkg_type = "installed"
487 return visible(self.settings,
488 Package(type_name=pkg_type, root_config=self.root_config,
489 cpv=cpv, built=built, installed=installed, metadata=metadata))
491 def _xmatch(self, level, atom):
493 This method does not expand old-style virtuals because it
494 is restricted to returning matches for a single ${CATEGORY}/${PN}
495 and old-style virual matches unreliable for that when querying
496 multiple package databases. If necessary, old-style virtuals
497 can be performed on atoms prior to calling this method.
499 cp = portage.dep_getkey(atom)
500 if level == "match-all":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 matches.update(db.match(atom))
507 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508 db._cpv_sort_ascending(result)
509 elif level == "match-visible":
512 if hasattr(db, "xmatch"):
513 matches.update(db.xmatch(level, atom))
515 db_keys = list(db._aux_cache_keys)
516 for cpv in db.match(atom):
517 metadata = izip(db_keys,
518 db.aux_get(cpv, db_keys))
519 if not self._visible(db, cpv, metadata):
522 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523 db._cpv_sort_ascending(result)
524 elif level == "bestmatch-visible":
527 if hasattr(db, "xmatch"):
528 cpv = db.xmatch("bestmatch-visible", atom)
529 if not cpv or portage.cpv_getkey(cpv) != cp:
531 if not result or cpv == portage.best([cpv, result]):
534 db_keys = Package.metadata_keys
535 # break out of this loop with highest visible
536 # match, checked in descending order
537 for cpv in reversed(db.match(atom)):
538 if portage.cpv_getkey(cpv) != cp:
540 metadata = izip(db_keys,
541 db.aux_get(cpv, db_keys))
542 if not self._visible(db, cpv, metadata):
544 if not result or cpv == portage.best([cpv, result]):
548 raise NotImplementedError(level)
551 def execute(self,searchkey):
552 """Performs the search for the supplied search key"""
554 self.searchkey=searchkey
555 self.packagematches = []
558 self.matches = {"pkg":[], "desc":[], "set":[]}
561 self.matches = {"pkg":[], "set":[]}
562 print "Searching... ",
565 if self.searchkey.startswith('%'):
567 self.searchkey = self.searchkey[1:]
568 if self.searchkey.startswith('@'):
570 self.searchkey = self.searchkey[1:]
572 self.searchre=re.compile(self.searchkey,re.I)
574 self.searchre=re.compile(re.escape(self.searchkey), re.I)
575 for package in self.portdb.cp_all():
576 self.spinner.update()
579 match_string = package[:]
581 match_string = package.split("/")[-1]
584 if self.searchre.search(match_string):
585 if not self.portdb.xmatch("match-visible", package):
587 self.matches["pkg"].append([package,masked])
588 elif self.searchdesc: # DESCRIPTION searching
589 full_package = self.portdb.xmatch("bestmatch-visible", package)
591 #no match found; we don't want to query description
592 full_package = portage.best(
593 self.portdb.xmatch("match-all", package))
599 full_desc = self.portdb.aux_get(
600 full_package, ["DESCRIPTION"])[0]
602 print "emerge: search: aux_get() failed, skipping"
604 if self.searchre.search(full_desc):
605 self.matches["desc"].append([full_package,masked])
607 self.sdict = self.setconfig.getSets()
608 for setname in self.sdict:
609 self.spinner.update()
611 match_string = setname
613 match_string = setname.split("/")[-1]
615 if self.searchre.search(match_string):
616 self.matches["set"].append([setname, False])
617 elif self.searchdesc:
618 if self.searchre.search(
619 self.sdict[setname].getMetadata("DESCRIPTION")):
620 self.matches["set"].append([setname, False])
623 for mtype in self.matches:
624 self.matches[mtype].sort()
625 self.mlen += len(self.matches[mtype])
628 if not self.portdb.xmatch("match-all", cp):
631 if not self.portdb.xmatch("bestmatch-visible", cp):
633 self.matches["pkg"].append([cp, masked])
637 """Outputs the results of the search."""
638 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
639 print "[ Applications found : "+white(str(self.mlen))+" ]"
641 vardb = self.vartree.dbapi
642 for mtype in self.matches:
643 for match,masked in self.matches[mtype]:
647 full_package = self.portdb.xmatch(
648 "bestmatch-visible", match)
650 #no match found; we don't want to query description
652 full_package = portage.best(
653 self.portdb.xmatch("match-all",match))
654 elif mtype == "desc":
656 match = portage.cpv_getkey(match)
658 print green("*")+" "+white(match)
659 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
663 desc, homepage, license = self.portdb.aux_get(
664 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
666 print "emerge: search: aux_get() failed, skipping"
669 print green("*")+" "+white(match)+" "+red("[ Masked ]")
671 print green("*")+" "+white(match)
672 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
676 mycat = match.split("/")[0]
677 mypkg = match.split("/")[1]
678 mycpv = match + "-" + myversion
679 myebuild = self.portdb.findname(mycpv)
681 pkgdir = os.path.dirname(myebuild)
682 from portage import manifest
683 mf = manifest.Manifest(
684 pkgdir, self.settings["DISTDIR"])
686 uri_map = self.portdb.getFetchMap(mycpv)
687 except portage.exception.InvalidDependString, e:
688 file_size_str = "Unknown (%s)" % (e,)
692 mysum[0] = mf.getDistfilesSize(uri_map)
694 file_size_str = "Unknown (missing " + \
695 "digest for %s)" % (e,)
700 if db is not vardb and \
701 db.cpv_exists(mycpv):
703 if not myebuild and hasattr(db, "bintree"):
704 myebuild = db.bintree.getname(mycpv)
706 mysum[0] = os.stat(myebuild).st_size
711 if myebuild and file_size_str is None:
712 mystr = str(mysum[0] / 1024)
716 mystr = mystr[:mycount] + "," + mystr[mycount:]
717 file_size_str = mystr + " kB"
721 print " ", darkgreen("Latest version available:"),myversion
722 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
725 (darkgreen("Size of files:"), file_size_str)
726 print " ", darkgreen("Homepage:")+" ",homepage
727 print " ", darkgreen("Description:")+" ",desc
728 print " ", darkgreen("License:")+" ",license
733 def getInstallationStatus(self,package):
734 installed_package = self.vartree.dep_bestmatch(package)
736 version = self.getVersion(installed_package,search.VERSION_RELEASE)
738 result = darkgreen("Latest version installed:")+" "+version
740 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
743 def getVersion(self,full_package,detail):
744 if len(full_package) > 1:
745 package_parts = portage.catpkgsplit(full_package)
746 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747 result = package_parts[2]+ "-" + package_parts[3]
749 result = package_parts[2]
754 class RootConfig(object):
755 """This is used internally by depgraph to track information about a
759 "ebuild" : "porttree",
760 "binary" : "bintree",
761 "installed" : "vartree"
765 for k, v in pkg_tree_map.iteritems():
768 def __init__(self, settings, trees, setconfig):
770 self.settings = settings
771 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772 self.root = self.settings["ROOT"]
773 self.setconfig = setconfig
774 self.sets = self.setconfig.getSets()
775 self.visible_pkgs = PackageVirtualDbapi(self.settings)
777 def create_world_atom(pkg, args_set, root_config):
778 """Create a new atom for the world file if one does not exist. If the
779 argument atom is precise enough to identify a specific slot then a slot
780 atom will be returned. Atoms that are in the system set may also be stored
781 in world since system atoms can only match one slot while world atoms can
782 be greedy with respect to slots. Unslotted system packages will not be
785 arg_atom = args_set.findAtomForPackage(pkg)
788 cp = portage.dep_getkey(arg_atom)
790 sets = root_config.sets
791 portdb = root_config.trees["porttree"].dbapi
792 vardb = root_config.trees["vartree"].dbapi
793 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
794 for cpv in portdb.match(cp))
795 slotted = len(available_slots) > 1 or \
796 (len(available_slots) == 1 and "0" not in available_slots)
798 # check the vdb in case this is multislot
799 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
800 for cpv in vardb.match(cp))
801 slotted = len(available_slots) > 1 or \
802 (len(available_slots) == 1 and "0" not in available_slots)
803 if slotted and arg_atom != cp:
804 # If the user gave a specific atom, store it as a
805 # slot atom in the world file.
806 slot_atom = pkg.slot_atom
808 # For USE=multislot, there are a couple of cases to
811 # 1) SLOT="0", but the real SLOT spontaneously changed to some
812 # unknown value, so just record an unslotted atom.
814 # 2) SLOT comes from an installed package and there is no
815 # matching SLOT in the portage tree.
817 # Make sure that the slot atom is available in either the
818 # portdb or the vardb, since otherwise the user certainly
819 # doesn't want the SLOT atom recorded in the world file
820 # (case 1 above). If it's only available in the vardb,
821 # the user may be trying to prevent a USE=multislot
822 # package from being removed by --depclean (case 2 above).
825 if not portdb.match(slot_atom):
826 # SLOT seems to come from an installed multislot package
828 # If there is no installed package matching the SLOT atom,
829 # it probably changed SLOT spontaneously due to USE=multislot,
830 # so just record an unslotted atom.
831 if vardb.match(slot_atom):
832 # Now verify that the argument is precise
833 # enough to identify a specific slot.
834 matches = mydb.match(arg_atom)
835 matched_slots = set()
837 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
838 if len(matched_slots) == 1:
839 new_world_atom = slot_atom
841 if new_world_atom == sets["world"].findAtomForPackage(pkg):
842 # Both atoms would be identical, so there's nothing to add.
845 # Unlike world atoms, system atoms are not greedy for slots, so they
846 # can't be safely excluded from world if they are slotted.
847 system_atom = sets["system"].findAtomForPackage(pkg)
849 if not portage.dep_getkey(system_atom).startswith("virtual/"):
851 # System virtuals aren't safe to exclude from world since they can
852 # match multiple old-style virtuals but only one of them will be
853 # pulled in by update or depclean.
854 providers = portdb.mysettings.getvirtuals().get(
855 portage.dep_getkey(system_atom))
856 if providers and len(providers) == 1 and providers[0] == cp:
858 return new_world_atom
860 def filter_iuse_defaults(iuse):
862 if flag.startswith("+") or flag.startswith("-"):
867 class SlotObject(object):
868 __slots__ = ("__weakref__",)
870 def __init__(self, **kwargs):
871 classes = [self.__class__]
876 classes.extend(c.__bases__)
877 slots = getattr(c, "__slots__", None)
881 myvalue = kwargs.get(myattr, None)
882 setattr(self, myattr, myvalue)
886 Create a new instance and copy all attributes
887 defined from __slots__ (including those from
890 obj = self.__class__()
892 classes = [self.__class__]
897 classes.extend(c.__bases__)
898 slots = getattr(c, "__slots__", None)
902 setattr(obj, myattr, getattr(self, myattr))
906 class AbstractDepPriority(SlotObject):
907 __slots__ = ("buildtime", "runtime", "runtime_post")
909 def __lt__(self, other):
910 return self.__int__() < other
912 def __le__(self, other):
913 return self.__int__() <= other
915 def __eq__(self, other):
916 return self.__int__() == other
918 def __ne__(self, other):
919 return self.__int__() != other
921 def __gt__(self, other):
922 return self.__int__() > other
924 def __ge__(self, other):
925 return self.__int__() >= other
929 return copy.copy(self)
931 class DepPriority(AbstractDepPriority):
933 __slots__ = ("satisfied", "optional", "rebuild")
945 if self.runtime_post:
946 return "runtime_post"
949 class BlockerDepPriority(DepPriority):
957 BlockerDepPriority.instance = BlockerDepPriority()
959 class UnmergeDepPriority(AbstractDepPriority):
960 __slots__ = ("optional", "satisfied",)
962 Combination of properties Priority Category
967 (none of the above) -2 SOFT
977 if self.runtime_post:
984 myvalue = self.__int__()
985 if myvalue > self.SOFT:
989 class DepPriorityNormalRange(object):
991 DepPriority properties Index Category
995 runtime_post 2 MEDIUM_SOFT
997 (none of the above) 0 NONE
1005 def _ignore_optional(cls, priority):
1006 if priority.__class__ is not DepPriority:
1008 return bool(priority.optional)
1011 def _ignore_runtime_post(cls, priority):
1012 if priority.__class__ is not DepPriority:
1014 return bool(priority.optional or priority.runtime_post)
1017 def _ignore_runtime(cls, priority):
1018 if priority.__class__ is not DepPriority:
1020 return not priority.buildtime
1022 ignore_medium = _ignore_runtime
1023 ignore_medium_soft = _ignore_runtime_post
1024 ignore_soft = _ignore_optional
1026 DepPriorityNormalRange.ignore_priority = (
1028 DepPriorityNormalRange._ignore_optional,
1029 DepPriorityNormalRange._ignore_runtime_post,
1030 DepPriorityNormalRange._ignore_runtime
1033 class DepPrioritySatisfiedRange(object):
1035 DepPriority Index Category
1037 not satisfied and buildtime HARD
1038 not satisfied and runtime 7 MEDIUM
1039 not satisfied and runtime_post 6 MEDIUM_SOFT
1040 satisfied and buildtime and rebuild 5 SOFT
1041 satisfied and buildtime 4 SOFT
1042 satisfied and runtime 3 SOFT
1043 satisfied and runtime_post 2 SOFT
1045 (none of the above) 0 NONE
1053 def _ignore_optional(cls, priority):
1054 if priority.__class__ is not DepPriority:
1056 return bool(priority.optional)
1059 def _ignore_satisfied_runtime_post(cls, priority):
1060 if priority.__class__ is not DepPriority:
1062 if priority.optional:
1064 if not priority.satisfied:
1066 return bool(priority.runtime_post)
1069 def _ignore_satisfied_runtime(cls, priority):
1070 if priority.__class__ is not DepPriority:
1072 if priority.optional:
1074 if not priority.satisfied:
1076 return not priority.buildtime
1079 def _ignore_satisfied_buildtime(cls, priority):
1080 if priority.__class__ is not DepPriority:
1082 if priority.optional:
1084 if not priority.satisfied:
1086 if priority.buildtime:
1087 return not priority.rebuild
1091 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1092 if priority.__class__ is not DepPriority:
1094 if priority.optional:
1096 return bool(priority.satisfied)
1099 def _ignore_runtime_post(cls, priority):
1100 if priority.__class__ is not DepPriority:
1102 return bool(priority.optional or \
1103 priority.satisfied or \
1104 priority.runtime_post)
1107 def _ignore_runtime(cls, priority):
1108 if priority.__class__ is not DepPriority:
1110 return bool(priority.satisfied or \
1111 not priority.buildtime)
1113 ignore_medium = _ignore_runtime
1114 ignore_medium_soft = _ignore_runtime_post
1115 ignore_soft = _ignore_satisfied_buildtime_rebuild
1117 DepPrioritySatisfiedRange.ignore_priority = (
1119 DepPrioritySatisfiedRange._ignore_optional,
1120 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1121 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1122 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1123 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1124 DepPrioritySatisfiedRange._ignore_runtime_post,
1125 DepPrioritySatisfiedRange._ignore_runtime
1128 def _find_deep_system_runtime_deps(graph):
1129 deep_system_deps = set()
1132 if not isinstance(node, Package) or \
1133 node.operation == 'uninstall':
1135 if node.root_config.sets['system'].findAtomForPackage(node):
1136 node_stack.append(node)
1138 def ignore_priority(priority):
1140 Ignore non-runtime priorities.
1142 if isinstance(priority, DepPriority) and \
1143 (priority.runtime or priority.runtime_post):
1148 node = node_stack.pop()
1149 if node in deep_system_deps:
1151 deep_system_deps.add(node)
1152 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1153 if not isinstance(child, Package) or \
1154 child.operation == 'uninstall':
1156 node_stack.append(child)
1158 return deep_system_deps
1160 class FakeVartree(portage.vartree):
1161 """This is implements an in-memory copy of a vartree instance that provides
1162 all the interfaces required for use by the depgraph. The vardb is locked
1163 during the constructor call just long enough to read a copy of the
1164 installed package information. This allows the depgraph to do it's
1165 dependency calculations without holding a lock on the vardb. It also
1166 allows things like vardb global updates to be done in memory so that the
1167 user doesn't necessarily need write access to the vardb in cases where
1168 global updates are necessary (updates are performed when necessary if there
1169 is not a matching ebuild in the tree)."""
1170 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1171 self._root_config = root_config
1172 if pkg_cache is None:
1174 real_vartree = root_config.trees["vartree"]
1175 portdb = root_config.trees["porttree"].dbapi
1176 self.root = real_vartree.root
1177 self.settings = real_vartree.settings
1178 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1179 if "_mtime_" not in mykeys:
1180 mykeys.append("_mtime_")
1181 self._db_keys = mykeys
1182 self._pkg_cache = pkg_cache
1183 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1184 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1186 # At least the parent needs to exist for the lock file.
1187 portage.util.ensure_dirs(vdb_path)
1188 except portage.exception.PortageException:
1192 if acquire_lock and os.access(vdb_path, os.W_OK):
1193 vdb_lock = portage.locks.lockdir(vdb_path)
1194 real_dbapi = real_vartree.dbapi
1196 for cpv in real_dbapi.cpv_all():
1197 cache_key = ("installed", self.root, cpv, "nomerge")
1198 pkg = self._pkg_cache.get(cache_key)
1200 metadata = pkg.metadata
1202 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1203 myslot = metadata["SLOT"]
1204 mycp = portage.dep_getkey(cpv)
1205 myslot_atom = "%s:%s" % (mycp, myslot)
1207 mycounter = long(metadata["COUNTER"])
1210 metadata["COUNTER"] = str(mycounter)
1211 other_counter = slot_counters.get(myslot_atom, None)
1212 if other_counter is not None:
1213 if other_counter > mycounter:
1215 slot_counters[myslot_atom] = mycounter
1217 pkg = Package(built=True, cpv=cpv,
1218 installed=True, metadata=metadata,
1219 root_config=root_config, type_name="installed")
1220 self._pkg_cache[pkg] = pkg
1221 self.dbapi.cpv_inject(pkg)
1222 real_dbapi.flush_cache()
1225 portage.locks.unlockdir(vdb_lock)
1226 # Populate the old-style virtuals using the cached values.
1227 if not self.settings.treeVirtuals:
1228 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1229 portage.getCPFromCPV, self.get_all_provides())
1231 # Intialize variables needed for lazy cache pulls of the live ebuild
1232 # metadata. This ensures that the vardb lock is released ASAP, without
1233 # being delayed in case cache generation is triggered.
1234 self._aux_get = self.dbapi.aux_get
1235 self.dbapi.aux_get = self._aux_get_wrapper
1236 self._match = self.dbapi.match
1237 self.dbapi.match = self._match_wrapper
1238 self._aux_get_history = set()
1239 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1240 self._portdb = portdb
1241 self._global_updates = None
1243 def _match_wrapper(self, cpv, use_cache=1):
1245 Make sure the metadata in Package instances gets updated for any
1246 cpv that is returned from a match() call, since the metadata can
1247 be accessed directly from the Package instance instead of via
1250 matches = self._match(cpv, use_cache=use_cache)
1252 if cpv in self._aux_get_history:
1254 self._aux_get_wrapper(cpv, [])
1257 def _aux_get_wrapper(self, pkg, wants):
1258 if pkg in self._aux_get_history:
1259 return self._aux_get(pkg, wants)
1260 self._aux_get_history.add(pkg)
1262 # Use the live ebuild metadata if possible.
1263 live_metadata = dict(izip(self._portdb_keys,
1264 self._portdb.aux_get(pkg, self._portdb_keys)))
1265 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1267 self.dbapi.aux_update(pkg, live_metadata)
1268 except (KeyError, portage.exception.PortageException):
1269 if self._global_updates is None:
1270 self._global_updates = \
1271 grab_global_updates(self._portdb.porttree_root)
1272 perform_global_updates(
1273 pkg, self.dbapi, self._global_updates)
1274 return self._aux_get(pkg, wants)
1276 def sync(self, acquire_lock=1):
1278 Call this method to synchronize state with the real vardb
1279 after one or more packages may have been installed or
1282 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1284 # At least the parent needs to exist for the lock file.
1285 portage.util.ensure_dirs(vdb_path)
1286 except portage.exception.PortageException:
1290 if acquire_lock and os.access(vdb_path, os.W_OK):
1291 vdb_lock = portage.locks.lockdir(vdb_path)
1295 portage.locks.unlockdir(vdb_lock)
1299 real_vardb = self._root_config.trees["vartree"].dbapi
1300 current_cpv_set = frozenset(real_vardb.cpv_all())
1301 pkg_vardb = self.dbapi
1302 aux_get_history = self._aux_get_history
1304 # Remove any packages that have been uninstalled.
1305 for pkg in list(pkg_vardb):
1306 if pkg.cpv not in current_cpv_set:
1307 pkg_vardb.cpv_remove(pkg)
1308 aux_get_history.discard(pkg.cpv)
1310 # Validate counters and timestamps.
1313 validation_keys = ["COUNTER", "_mtime_"]
1314 for cpv in current_cpv_set:
1316 pkg_hash_key = ("installed", root, cpv, "nomerge")
1317 pkg = pkg_vardb.get(pkg_hash_key)
1319 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1321 counter = long(counter)
1325 if counter != pkg.counter or \
1327 pkg_vardb.cpv_remove(pkg)
1328 aux_get_history.discard(pkg.cpv)
1332 pkg = self._pkg(cpv)
1334 other_counter = slot_counters.get(pkg.slot_atom)
1335 if other_counter is not None:
1336 if other_counter > pkg.counter:
1339 slot_counters[pkg.slot_atom] = pkg.counter
1340 pkg_vardb.cpv_inject(pkg)
1342 real_vardb.flush_cache()
1344 def _pkg(self, cpv):
1345 root_config = self._root_config
1346 real_vardb = root_config.trees["vartree"].dbapi
1347 pkg = Package(cpv=cpv, installed=True,
1348 metadata=izip(self._db_keys,
1349 real_vardb.aux_get(cpv, self._db_keys)),
1350 root_config=root_config,
1351 type_name="installed")
1354 mycounter = long(pkg.metadata["COUNTER"])
1357 pkg.metadata["COUNTER"] = str(mycounter)
1361 def grab_global_updates(portdir):
1362 from portage.update import grab_updates, parse_updates
1363 updpath = os.path.join(portdir, "profiles", "updates")
1365 rawupdates = grab_updates(updpath)
1366 except portage.exception.DirectoryNotFound:
1369 for mykey, mystat, mycontent in rawupdates:
1370 commands, errors = parse_updates(mycontent)
1371 upd_commands.extend(commands)
1374 def perform_global_updates(mycpv, mydb, mycommands):
1375 from portage.update import update_dbentries
1376 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1377 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1378 updates = update_dbentries(mycommands, aux_dict)
1380 mydb.aux_update(mycpv, updates)
1382 def visible(pkgsettings, pkg):
1384 Check if a package is visible. This can raise an InvalidDependString
1385 exception if LICENSE is invalid.
1386 TODO: optionally generate a list of masking reasons
1388 @returns: True if the package is visible, False otherwise.
1390 if not pkg.metadata["SLOT"]:
1392 if not pkg.installed:
1393 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1395 eapi = pkg.metadata["EAPI"]
1396 if not portage.eapi_is_supported(eapi):
1398 if not pkg.installed:
1399 if portage._eapi_is_deprecated(eapi):
1401 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1405 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1408 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1410 except portage.exception.InvalidDependString:
1414 def get_masking_status(pkg, pkgsettings, root_config):
1416 mreasons = portage.getmaskingstatus(
1417 pkg, settings=pkgsettings,
1418 portdb=root_config.trees["porttree"].dbapi)
1420 if not pkg.installed:
1421 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1422 mreasons.append("CHOST: %s" % \
1423 pkg.metadata["CHOST"])
1425 if not pkg.metadata["SLOT"]:
1426 mreasons.append("invalid: SLOT is undefined")
1430 def get_mask_info(root_config, cpv, pkgsettings,
1431 db, pkg_type, built, installed, db_keys):
1434 metadata = dict(izip(db_keys,
1435 db.aux_get(cpv, db_keys)))
1438 if metadata and not built:
1439 pkgsettings.setcpv(cpv, mydb=metadata)
1440 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1441 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1442 if metadata is None:
1443 mreasons = ["corruption"]
1445 eapi = metadata['EAPI']
1448 if not portage.eapi_is_supported(eapi):
1449 mreasons = ['EAPI %s' % eapi]
1451 pkg = Package(type_name=pkg_type, root_config=root_config,
1452 cpv=cpv, built=built, installed=installed, metadata=metadata)
1453 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1454 return metadata, mreasons
1456 def show_masked_packages(masked_packages):
1457 shown_licenses = set()
1458 shown_comments = set()
1459 # Maybe there is both an ebuild and a binary. Only
1460 # show one of them to avoid redundant appearance.
1462 have_eapi_mask = False
1463 for (root_config, pkgsettings, cpv,
1464 metadata, mreasons) in masked_packages:
1465 if cpv in shown_cpvs:
1468 comment, filename = None, None
1469 if "package.mask" in mreasons:
1470 comment, filename = \
1471 portage.getmaskingreason(
1472 cpv, metadata=metadata,
1473 settings=pkgsettings,
1474 portdb=root_config.trees["porttree"].dbapi,
1475 return_location=True)
1476 missing_licenses = []
1478 if not portage.eapi_is_supported(metadata["EAPI"]):
1479 have_eapi_mask = True
1481 missing_licenses = \
1482 pkgsettings._getMissingLicenses(
1484 except portage.exception.InvalidDependString:
1485 # This will have already been reported
1486 # above via mreasons.
1489 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1490 if comment and comment not in shown_comments:
1493 shown_comments.add(comment)
1494 portdb = root_config.trees["porttree"].dbapi
1495 for l in missing_licenses:
1496 l_path = portdb.findLicensePath(l)
1497 if l in shown_licenses:
1499 msg = ("A copy of the '%s' license" + \
1500 " is located at '%s'.") % (l, l_path)
1503 shown_licenses.add(l)
1504 return have_eapi_mask
1506 class Task(SlotObject):
1507 __slots__ = ("_hash_key", "_hash_value")
1509 def _get_hash_key(self):
1510 hash_key = getattr(self, "_hash_key", None)
1511 if hash_key is None:
1512 raise NotImplementedError(self)
1515 def __eq__(self, other):
1516 return self._get_hash_key() == other
1518 def __ne__(self, other):
1519 return self._get_hash_key() != other
1522 hash_value = getattr(self, "_hash_value", None)
1523 if hash_value is None:
1524 self._hash_value = hash(self._get_hash_key())
1525 return self._hash_value
1528 return len(self._get_hash_key())
1530 def __getitem__(self, key):
1531 return self._get_hash_key()[key]
1534 return iter(self._get_hash_key())
1536 def __contains__(self, key):
1537 return key in self._get_hash_key()
1540 return str(self._get_hash_key())
1542 class Blocker(Task):
1544 __hash__ = Task.__hash__
1545 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1547 def __init__(self, **kwargs):
1548 Task.__init__(self, **kwargs)
1549 self.cp = portage.dep_getkey(self.atom)
1551 def _get_hash_key(self):
1552 hash_key = getattr(self, "_hash_key", None)
1553 if hash_key is None:
1555 ("blocks", self.root, self.atom, self.eapi)
1556 return self._hash_key
1558 class Package(Task):
1560 __hash__ = Task.__hash__
1561 __slots__ = ("built", "cpv", "depth",
1562 "installed", "metadata", "onlydeps", "operation",
1563 "root_config", "type_name",
1564 "category", "counter", "cp", "cpv_split",
1565 "inherited", "iuse", "mtime",
1566 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1569 "CHOST", "COUNTER", "DEPEND", "EAPI",
1570 "INHERITED", "IUSE", "KEYWORDS",
1571 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1572 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1574 def __init__(self, **kwargs):
1575 Task.__init__(self, **kwargs)
1576 self.root = self.root_config.root
1577 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1578 self.cp = portage.cpv_getkey(self.cpv)
1581 # Avoid an InvalidAtom exception when creating slot_atom.
1582 # This package instance will be masked due to empty SLOT.
1584 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1585 self.category, self.pf = portage.catsplit(self.cpv)
1586 self.cpv_split = portage.catpkgsplit(self.cpv)
1587 self.pv_split = self.cpv_split[1:]
1591 __slots__ = ("__weakref__", "enabled")
1593 def __init__(self, use):
1594 self.enabled = frozenset(use)
1596 class _iuse(object):
1598 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1600 def __init__(self, tokens, iuse_implicit):
1601 self.tokens = tuple(tokens)
1602 self.iuse_implicit = iuse_implicit
1609 enabled.append(x[1:])
1611 disabled.append(x[1:])
1614 self.enabled = frozenset(enabled)
1615 self.disabled = frozenset(disabled)
1616 self.all = frozenset(chain(enabled, disabled, other))
1618 def __getattribute__(self, name):
1621 return object.__getattribute__(self, "regex")
1622 except AttributeError:
1623 all = object.__getattribute__(self, "all")
1624 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1625 # Escape anything except ".*" which is supposed
1626 # to pass through from _get_implicit_iuse()
1627 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1628 regex = "^(%s)$" % "|".join(regex)
1629 regex = regex.replace("\\.\\*", ".*")
1630 self.regex = re.compile(regex)
1631 return object.__getattribute__(self, name)
1633 def _get_hash_key(self):
1634 hash_key = getattr(self, "_hash_key", None)
1635 if hash_key is None:
1636 if self.operation is None:
1637 self.operation = "merge"
1638 if self.onlydeps or self.installed:
1639 self.operation = "nomerge"
1641 (self.type_name, self.root, self.cpv, self.operation)
1642 return self._hash_key
1644 def __lt__(self, other):
1645 if other.cp != self.cp:
1647 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1651 def __le__(self, other):
1652 if other.cp != self.cp:
1654 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1658 def __gt__(self, other):
1659 if other.cp != self.cp:
1661 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1665 def __ge__(self, other):
1666 if other.cp != self.cp:
1668 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1672 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1673 if not x.startswith("UNUSED_"))
1674 _all_metadata_keys.discard("CDEPEND")
1675 _all_metadata_keys.update(Package.metadata_keys)
1677 from portage.cache.mappings import slot_dict_class
1678 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1680 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1682 Detect metadata updates and synchronize Package attributes.
1685 __slots__ = ("_pkg",)
1686 _wrapped_keys = frozenset(
1687 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1689 def __init__(self, pkg, metadata):
1690 _PackageMetadataWrapperBase.__init__(self)
1692 self.update(metadata)
1694 def __setitem__(self, k, v):
1695 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1696 if k in self._wrapped_keys:
1697 getattr(self, "_set_" + k.lower())(k, v)
1699 def _set_inherited(self, k, v):
1700 if isinstance(v, basestring):
1701 v = frozenset(v.split())
1702 self._pkg.inherited = v
1704 def _set_iuse(self, k, v):
1705 self._pkg.iuse = self._pkg._iuse(
1706 v.split(), self._pkg.root_config.iuse_implicit)
1708 def _set_slot(self, k, v):
1711 def _set_use(self, k, v):
1712 self._pkg.use = self._pkg._use(v.split())
1714 def _set_counter(self, k, v):
1715 if isinstance(v, basestring):
1720 self._pkg.counter = v
1722 def _set__mtime_(self, k, v):
1723 if isinstance(v, basestring):
1730 class EbuildFetchonly(SlotObject):
1732 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1735 settings = self.settings
1737 portdb = pkg.root_config.trees["porttree"].dbapi
1738 ebuild_path = portdb.findname(pkg.cpv)
1739 settings.setcpv(pkg)
1740 debug = settings.get("PORTAGE_DEBUG") == "1"
1741 use_cache = 1 # always true
1742 portage.doebuild_environment(ebuild_path, "fetch",
1743 settings["ROOT"], settings, debug, use_cache, portdb)
1744 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1747 rval = self._execute_with_builddir()
1749 rval = portage.doebuild(ebuild_path, "fetch",
1750 settings["ROOT"], settings, debug=debug,
1751 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1752 mydbapi=portdb, tree="porttree")
1754 if rval != os.EX_OK:
1755 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1756 eerror(msg, phase="unpack", key=pkg.cpv)
1760 def _execute_with_builddir(self):
1761 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1762 # ensuring sane $PWD (bug #239560) and storing elog
1763 # messages. Use a private temp directory, in order
1764 # to avoid locking the main one.
1765 settings = self.settings
1766 global_tmpdir = settings["PORTAGE_TMPDIR"]
1767 from tempfile import mkdtemp
1769 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1771 if e.errno != portage.exception.PermissionDenied.errno:
1773 raise portage.exception.PermissionDenied(global_tmpdir)
1774 settings["PORTAGE_TMPDIR"] = private_tmpdir
1775 settings.backup_changes("PORTAGE_TMPDIR")
1777 retval = self._execute()
1779 settings["PORTAGE_TMPDIR"] = global_tmpdir
1780 settings.backup_changes("PORTAGE_TMPDIR")
1781 shutil.rmtree(private_tmpdir)
1785 settings = self.settings
1787 root_config = pkg.root_config
1788 portdb = root_config.trees["porttree"].dbapi
1789 ebuild_path = portdb.findname(pkg.cpv)
1790 debug = settings.get("PORTAGE_DEBUG") == "1"
1791 retval = portage.doebuild(ebuild_path, "fetch",
1792 self.settings["ROOT"], self.settings, debug=debug,
1793 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1794 mydbapi=portdb, tree="porttree")
1796 if retval != os.EX_OK:
1797 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1798 eerror(msg, phase="unpack", key=pkg.cpv)
1800 portage.elog.elog_process(self.pkg.cpv, self.settings)
1803 class PollConstants(object):
1806 Provides POLL* constants that are equivalent to those from the
1807 select module, for use by PollSelectAdapter.
1810 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1813 locals()[k] = getattr(select, k, v)
1817 class AsynchronousTask(SlotObject):
1819 Subclasses override _wait() and _poll() so that calls
1820 to public methods can be wrapped for implementing
1821 hooks such as exit listener notification.
1823 Sublasses should call self.wait() to notify exit listeners after
1824 the task is complete and self.returncode has been set.
1827 __slots__ = ("background", "cancelled", "returncode") + \
1828 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1832 Start an asynchronous task and then return as soon as possible.
1838 raise NotImplementedError(self)
1841 return self.returncode is None
1848 return self.returncode
1851 if self.returncode is None:
1854 return self.returncode
1857 return self.returncode
1860 self.cancelled = True
1863 def addStartListener(self, f):
1865 The function will be called with one argument, a reference to self.
1867 if self._start_listeners is None:
1868 self._start_listeners = []
1869 self._start_listeners.append(f)
1871 def removeStartListener(self, f):
1872 if self._start_listeners is None:
1874 self._start_listeners.remove(f)
1876 def _start_hook(self):
1877 if self._start_listeners is not None:
1878 start_listeners = self._start_listeners
1879 self._start_listeners = None
1881 for f in start_listeners:
1884 def addExitListener(self, f):
1886 The function will be called with one argument, a reference to self.
1888 if self._exit_listeners is None:
1889 self._exit_listeners = []
1890 self._exit_listeners.append(f)
1892 def removeExitListener(self, f):
1893 if self._exit_listeners is None:
1894 if self._exit_listener_stack is not None:
1895 self._exit_listener_stack.remove(f)
1897 self._exit_listeners.remove(f)
1899 def _wait_hook(self):
1901 Call this method after the task completes, just before returning
1902 the returncode from wait() or poll(). This hook is
1903 used to trigger exit listeners when the returncode first
1906 if self.returncode is not None and \
1907 self._exit_listeners is not None:
1909 # This prevents recursion, in case one of the
1910 # exit handlers triggers this method again by
1911 # calling wait(). Use a stack that gives
1912 # removeExitListener() an opportunity to consume
1913 # listeners from the stack, before they can get
1914 # called below. This is necessary because a call
1915 # to one exit listener may result in a call to
1916 # removeExitListener() for another listener on
1917 # the stack. That listener needs to be removed
1918 # from the stack since it would be inconsistent
1919 # to call it after it has been been passed into
1920 # removeExitListener().
1921 self._exit_listener_stack = self._exit_listeners
1922 self._exit_listeners = None
1924 self._exit_listener_stack.reverse()
1925 while self._exit_listener_stack:
1926 self._exit_listener_stack.pop()(self)
1928 class AbstractPollTask(AsynchronousTask):
1930 __slots__ = ("scheduler",) + \
1934 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1935 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1938 def _unregister(self):
1939 raise NotImplementedError(self)
1941 def _unregister_if_appropriate(self, event):
1942 if self._registered:
1943 if event & self._exceptional_events:
1946 elif event & PollConstants.POLLHUP:
1950 class PipeReader(AbstractPollTask):
1953 Reads output from one or more files and saves it in memory,
1954 for retrieval via the getvalue() method. This is driven by
1955 the scheduler's poll() loop, so it runs entirely within the
1959 __slots__ = ("input_files",) + \
1960 ("_read_data", "_reg_ids")
1963 self._reg_ids = set()
1964 self._read_data = []
1965 for k, f in self.input_files.iteritems():
1966 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1967 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1968 self._reg_ids.add(self.scheduler.register(f.fileno(),
1969 self._registered_events, self._output_handler))
1970 self._registered = True
1973 return self._registered
1976 if self.returncode is None:
1978 self.cancelled = True
1982 if self.returncode is not None:
1983 return self.returncode
1985 if self._registered:
1986 self.scheduler.schedule(self._reg_ids)
1989 self.returncode = os.EX_OK
1990 return self.returncode
1993 """Retrieve the entire contents"""
1994 if sys.hexversion >= 0x3000000:
1995 return bytes().join(self._read_data)
1996 return "".join(self._read_data)
1999 """Free the memory buffer."""
2000 self._read_data = None
2002 def _output_handler(self, fd, event):
2004 if event & PollConstants.POLLIN:
2006 for f in self.input_files.itervalues():
2007 if fd == f.fileno():
2010 buf = array.array('B')
2012 buf.fromfile(f, self._bufsize)
2017 self._read_data.append(buf.tostring())
2022 self._unregister_if_appropriate(event)
2023 return self._registered
2025 def _unregister(self):
2027 Unregister from the scheduler and close open files.
2030 self._registered = False
2032 if self._reg_ids is not None:
2033 for reg_id in self._reg_ids:
2034 self.scheduler.unregister(reg_id)
2035 self._reg_ids = None
2037 if self.input_files is not None:
2038 for f in self.input_files.itervalues():
2040 self.input_files = None
2042 class CompositeTask(AsynchronousTask):
2044 __slots__ = ("scheduler",) + ("_current_task",)
2047 return self._current_task is not None
2050 self.cancelled = True
2051 if self._current_task is not None:
2052 self._current_task.cancel()
2056 This does a loop calling self._current_task.poll()
2057 repeatedly as long as the value of self._current_task
2058 keeps changing. It calls poll() a maximum of one time
2059 for a given self._current_task instance. This is useful
2060 since calling poll() on a task can trigger advance to
2061 the next task could eventually lead to the returncode
2062 being set in cases when polling only a single task would
2063 not have the same effect.
2068 task = self._current_task
2069 if task is None or task is prev:
2070 # don't poll the same task more than once
2075 return self.returncode
2081 task = self._current_task
2083 # don't wait for the same task more than once
2086 # Before the task.wait() method returned, an exit
2087 # listener should have set self._current_task to either
2088 # a different task or None. Something is wrong.
2089 raise AssertionError("self._current_task has not " + \
2090 "changed since calling wait", self, task)
2094 return self.returncode
2096 def _assert_current(self, task):
2098 Raises an AssertionError if the given task is not the
2099 same one as self._current_task. This can be useful
2102 if task is not self._current_task:
2103 raise AssertionError("Unrecognized task: %s" % (task,))
2105 def _default_exit(self, task):
2107 Calls _assert_current() on the given task and then sets the
2108 composite returncode attribute if task.returncode != os.EX_OK.
2109 If the task failed then self._current_task will be set to None.
2110 Subclasses can use this as a generic task exit callback.
2113 @returns: The task.returncode attribute.
2115 self._assert_current(task)
2116 if task.returncode != os.EX_OK:
2117 self.returncode = task.returncode
2118 self._current_task = None
2119 return task.returncode
2121 def _final_exit(self, task):
2123 Assumes that task is the final task of this composite task.
2124 Calls _default_exit() and sets self.returncode to the task's
2125 returncode and sets self._current_task to None.
2127 self._default_exit(task)
2128 self._current_task = None
2129 self.returncode = task.returncode
2130 return self.returncode
2132 def _default_final_exit(self, task):
2134 This calls _final_exit() and then wait().
2136 Subclasses can use this as a generic final task exit callback.
2139 self._final_exit(task)
2142 def _start_task(self, task, exit_handler):
2144 Register exit handler for the given task, set it
2145 as self._current_task, and call task.start().
2147 Subclasses can use this as a generic way to start
2151 task.addExitListener(exit_handler)
2152 self._current_task = task
2155 class TaskSequence(CompositeTask):
2157 A collection of tasks that executes sequentially. Each task
2158 must have a addExitListener() method that can be used as
2159 a means to trigger movement from one task to the next.
2162 __slots__ = ("_task_queue",)
2164 def __init__(self, **kwargs):
2165 AsynchronousTask.__init__(self, **kwargs)
2166 self._task_queue = deque()
2168 def add(self, task):
2169 self._task_queue.append(task)
2172 self._start_next_task()
2175 self._task_queue.clear()
2176 CompositeTask.cancel(self)
2178 def _start_next_task(self):
2179 self._start_task(self._task_queue.popleft(),
2180 self._task_exit_handler)
2182 def _task_exit_handler(self, task):
2183 if self._default_exit(task) != os.EX_OK:
2185 elif self._task_queue:
2186 self._start_next_task()
2188 self._final_exit(task)
2191 class SubProcess(AbstractPollTask):
2193 __slots__ = ("pid",) + \
2194 ("_files", "_reg_id")
2196 # A file descriptor is required for the scheduler to monitor changes from
2197 # inside a poll() loop. When logging is not enabled, create a pipe just to
2198 # serve this purpose alone.
2202 if self.returncode is not None:
2203 return self.returncode
2204 if self.pid is None:
2205 return self.returncode
2206 if self._registered:
2207 return self.returncode
2210 retval = os.waitpid(self.pid, os.WNOHANG)
2212 if e.errno != errno.ECHILD:
2215 retval = (self.pid, 1)
2217 if retval == (0, 0):
2219 self._set_returncode(retval)
2220 return self.returncode
2225 os.kill(self.pid, signal.SIGTERM)
2227 if e.errno != errno.ESRCH:
2231 self.cancelled = True
2232 if self.pid is not None:
2234 return self.returncode
2237 return self.pid is not None and \
2238 self.returncode is None
2242 if self.returncode is not None:
2243 return self.returncode
2245 if self._registered:
2246 self.scheduler.schedule(self._reg_id)
2248 if self.returncode is not None:
2249 return self.returncode
2252 wait_retval = os.waitpid(self.pid, 0)
2254 if e.errno != errno.ECHILD:
2257 self._set_returncode((self.pid, 1))
2259 self._set_returncode(wait_retval)
2261 return self.returncode
2263 def _unregister(self):
2265 Unregister from the scheduler and close open files.
2268 self._registered = False
2270 if self._reg_id is not None:
2271 self.scheduler.unregister(self._reg_id)
2274 if self._files is not None:
2275 for f in self._files.itervalues():
2279 def _set_returncode(self, wait_retval):
2281 retval = wait_retval[1]
2283 if retval != os.EX_OK:
2285 retval = (retval & 0xff) << 8
2287 retval = retval >> 8
2289 self.returncode = retval
2291 class SpawnProcess(SubProcess):
2294 Constructor keyword args are passed into portage.process.spawn().
2295 The required "args" keyword argument will be passed as the first
2299 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2300 "uid", "gid", "groups", "umask", "logfile",
2301 "path_lookup", "pre_exec")
2303 __slots__ = ("args",) + \
2306 _file_names = ("log", "process", "stdout")
2307 _files_dict = slot_dict_class(_file_names, prefix="")
2314 if self.fd_pipes is None:
2316 fd_pipes = self.fd_pipes
2317 fd_pipes.setdefault(0, sys.stdin.fileno())
2318 fd_pipes.setdefault(1, sys.stdout.fileno())
2319 fd_pipes.setdefault(2, sys.stderr.fileno())
2321 # flush any pending output
2322 for fd in fd_pipes.itervalues():
2323 if fd == sys.stdout.fileno():
2325 if fd == sys.stderr.fileno():
2328 logfile = self.logfile
2329 self._files = self._files_dict()
2332 master_fd, slave_fd = self._pipe(fd_pipes)
2333 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2334 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2337 fd_pipes_orig = fd_pipes.copy()
2339 # TODO: Use job control functions like tcsetpgrp() to control
2340 # access to stdin. Until then, use /dev/null so that any
2341 # attempts to read from stdin will immediately return EOF
2342 # instead of blocking indefinitely.
2343 null_input = open('/dev/null', 'rb')
2344 fd_pipes[0] = null_input.fileno()
2346 fd_pipes[0] = fd_pipes_orig[0]
2348 files.process = os.fdopen(master_fd, 'rb')
2349 if logfile is not None:
2351 fd_pipes[1] = slave_fd
2352 fd_pipes[2] = slave_fd
2354 files.log = open(logfile, mode='ab')
2355 portage.util.apply_secpass_permissions(logfile,
2356 uid=portage.portage_uid, gid=portage.portage_gid,
2359 if not self.background:
2360 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2362 output_handler = self._output_handler
2366 # Create a dummy pipe so the scheduler can monitor
2367 # the process from inside a poll() loop.
2368 fd_pipes[self._dummy_pipe_fd] = slave_fd
2370 fd_pipes[1] = slave_fd
2371 fd_pipes[2] = slave_fd
2372 output_handler = self._dummy_handler
2375 for k in self._spawn_kwarg_names:
2376 v = getattr(self, k)
2380 kwargs["fd_pipes"] = fd_pipes
2381 kwargs["returnpid"] = True
2382 kwargs.pop("logfile", None)
2384 self._reg_id = self.scheduler.register(files.process.fileno(),
2385 self._registered_events, output_handler)
2386 self._registered = True
2388 retval = self._spawn(self.args, **kwargs)
2391 if null_input is not None:
2394 if isinstance(retval, int):
2397 self.returncode = retval
2401 self.pid = retval[0]
2402 portage.process.spawned_pids.remove(self.pid)
2404 def _pipe(self, fd_pipes):
2406 @type fd_pipes: dict
2407 @param fd_pipes: pipes from which to copy terminal size if desired.
2411 def _spawn(self, args, **kwargs):
2412 return portage.process.spawn(args, **kwargs)
2414 def _output_handler(self, fd, event):
2416 if event & PollConstants.POLLIN:
2419 buf = array.array('B')
2421 buf.fromfile(files.process, self._bufsize)
2426 if not self.background:
2427 buf.tofile(files.stdout)
2428 files.stdout.flush()
2429 buf.tofile(files.log)
2435 self._unregister_if_appropriate(event)
2436 return self._registered
2438 def _dummy_handler(self, fd, event):
2440 This method is mainly interested in detecting EOF, since
2441 the only purpose of the pipe is to allow the scheduler to
2442 monitor the process from inside a poll() loop.
2445 if event & PollConstants.POLLIN:
2447 buf = array.array('B')
2449 buf.fromfile(self._files.process, self._bufsize)
2459 self._unregister_if_appropriate(event)
2460 return self._registered
2462 class MiscFunctionsProcess(SpawnProcess):
2464 Spawns misc-functions.sh with an existing ebuild environment.
2467 __slots__ = ("commands", "phase", "pkg", "settings")
2470 settings = self.settings
2471 settings.pop("EBUILD_PHASE", None)
2472 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2473 misc_sh_binary = os.path.join(portage_bin_path,
2474 os.path.basename(portage.const.MISC_SH_BINARY))
2476 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2477 self.logfile = settings.get("PORTAGE_LOG_FILE")
2479 portage._doebuild_exit_status_unlink(
2480 settings.get("EBUILD_EXIT_STATUS_FILE"))
2482 SpawnProcess._start(self)
2484 def _spawn(self, args, **kwargs):
2485 settings = self.settings
2486 debug = settings.get("PORTAGE_DEBUG") == "1"
2487 return portage.spawn(" ".join(args), settings,
2488 debug=debug, **kwargs)
2490 def _set_returncode(self, wait_retval):
2491 SpawnProcess._set_returncode(self, wait_retval)
2492 self.returncode = portage._doebuild_exit_status_check_and_log(
2493 self.settings, self.phase, self.returncode)
2495 class EbuildFetcher(SpawnProcess):
2497 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2502 root_config = self.pkg.root_config
2503 portdb = root_config.trees["porttree"].dbapi
2504 ebuild_path = portdb.findname(self.pkg.cpv)
2505 settings = self.config_pool.allocate()
2506 settings.setcpv(self.pkg)
2508 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2509 # should not be touched since otherwise it could interfere with
2510 # another instance of the same cpv concurrently being built for a
2511 # different $ROOT (currently, builds only cooperate with prefetchers
2512 # that are spawned for the same $ROOT).
2513 if not self.prefetch:
2514 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2515 self._build_dir.lock()
2516 self._build_dir.clean_log()
2517 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2518 if self.logfile is None:
2519 self.logfile = settings.get("PORTAGE_LOG_FILE")
2525 # If any incremental variables have been overridden
2526 # via the environment, those values need to be passed
2527 # along here so that they are correctly considered by
2528 # the config instance in the subproccess.
2529 fetch_env = os.environ.copy()
2531 nocolor = settings.get("NOCOLOR")
2532 if nocolor is not None:
2533 fetch_env["NOCOLOR"] = nocolor
2535 fetch_env["PORTAGE_NICENESS"] = "0"
2537 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2539 ebuild_binary = os.path.join(
2540 settings["PORTAGE_BIN_PATH"], "ebuild")
2542 fetch_args = [ebuild_binary, ebuild_path, phase]
2543 debug = settings.get("PORTAGE_DEBUG") == "1"
2545 fetch_args.append("--debug")
2547 self.args = fetch_args
2548 self.env = fetch_env
2549 SpawnProcess._start(self)
2551 def _pipe(self, fd_pipes):
2552 """When appropriate, use a pty so that fetcher progress bars,
2553 like wget has, will work properly."""
2554 if self.background or not sys.stdout.isatty():
2555 # When the output only goes to a log file,
2556 # there's no point in creating a pty.
2558 stdout_pipe = fd_pipes.get(1)
2559 got_pty, master_fd, slave_fd = \
2560 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2561 return (master_fd, slave_fd)
2563 def _set_returncode(self, wait_retval):
2564 SpawnProcess._set_returncode(self, wait_retval)
2565 # Collect elog messages that might have been
2566 # created by the pkg_nofetch phase.
2567 if self._build_dir is not None:
2568 # Skip elog messages for prefetch, in order to avoid duplicates.
2569 if not self.prefetch and self.returncode != os.EX_OK:
2571 if self.logfile is not None:
2573 elog_out = open(self.logfile, 'a')
2574 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2575 if self.logfile is not None:
2576 msg += ", Log file:"
2577 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2578 if self.logfile is not None:
2579 eerror(" '%s'" % (self.logfile,),
2580 phase="unpack", key=self.pkg.cpv, out=elog_out)
2581 if elog_out is not None:
2583 if not self.prefetch:
2584 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2585 features = self._build_dir.settings.features
2586 if self.returncode == os.EX_OK:
2587 self._build_dir.clean_log()
2588 self._build_dir.unlock()
2589 self.config_pool.deallocate(self._build_dir.settings)
2590 self._build_dir = None
2592 class EbuildBuildDir(SlotObject):
2594 __slots__ = ("dir_path", "pkg", "settings",
2595 "locked", "_catdir", "_lock_obj")
2597 def __init__(self, **kwargs):
2598 SlotObject.__init__(self, **kwargs)
2603 This raises an AlreadyLocked exception if lock() is called
2604 while a lock is already held. In order to avoid this, call
2605 unlock() or check whether the "locked" attribute is True
2606 or False before calling lock().
2608 if self._lock_obj is not None:
2609 raise self.AlreadyLocked((self._lock_obj,))
2611 dir_path = self.dir_path
2612 if dir_path is None:
2613 root_config = self.pkg.root_config
2614 portdb = root_config.trees["porttree"].dbapi
2615 ebuild_path = portdb.findname(self.pkg.cpv)
2616 settings = self.settings
2617 settings.setcpv(self.pkg)
2618 debug = settings.get("PORTAGE_DEBUG") == "1"
2619 use_cache = 1 # always true
2620 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2621 self.settings, debug, use_cache, portdb)
2622 dir_path = self.settings["PORTAGE_BUILDDIR"]
2624 catdir = os.path.dirname(dir_path)
2625 self._catdir = catdir
2627 portage.util.ensure_dirs(os.path.dirname(catdir),
2628 gid=portage.portage_gid,
2632 catdir_lock = portage.locks.lockdir(catdir)
2633 portage.util.ensure_dirs(catdir,
2634 gid=portage.portage_gid,
2636 self._lock_obj = portage.locks.lockdir(dir_path)
2638 self.locked = self._lock_obj is not None
2639 if catdir_lock is not None:
2640 portage.locks.unlockdir(catdir_lock)
2642 def clean_log(self):
2643 """Discard existing log."""
2644 settings = self.settings
2646 for x in ('.logid', 'temp/build.log'):
2648 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2653 if self._lock_obj is None:
2656 portage.locks.unlockdir(self._lock_obj)
2657 self._lock_obj = None
2660 catdir = self._catdir
2663 catdir_lock = portage.locks.lockdir(catdir)
2669 if e.errno not in (errno.ENOENT,
2670 errno.ENOTEMPTY, errno.EEXIST):
2673 portage.locks.unlockdir(catdir_lock)
2675 class AlreadyLocked(portage.exception.PortageException):
2678 class EbuildBuild(CompositeTask):
2680 __slots__ = ("args_set", "config_pool", "find_blockers",
2681 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2682 "prefetcher", "settings", "world_atom") + \
2683 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2687 logger = self.logger
2690 settings = self.settings
2691 world_atom = self.world_atom
2692 root_config = pkg.root_config
2695 portdb = root_config.trees[tree].dbapi
2696 settings.setcpv(pkg)
2697 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2698 ebuild_path = portdb.findname(self.pkg.cpv)
2699 self._ebuild_path = ebuild_path
2701 prefetcher = self.prefetcher
2702 if prefetcher is None:
2704 elif not prefetcher.isAlive():
2706 elif prefetcher.poll() is None:
2708 waiting_msg = "Fetching files " + \
2709 "in the background. " + \
2710 "To view fetch progress, run `tail -f " + \
2711 "/var/log/emerge-fetch.log` in another " + \
2713 msg_prefix = colorize("GOOD", " * ")
2714 from textwrap import wrap
2715 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2716 for line in wrap(waiting_msg, 65))
2717 if not self.background:
2718 writemsg(waiting_msg, noiselevel=-1)
2720 self._current_task = prefetcher
2721 prefetcher.addExitListener(self._prefetch_exit)
2724 self._prefetch_exit(prefetcher)
2726 def _prefetch_exit(self, prefetcher):
2730 settings = self.settings
2733 fetcher = EbuildFetchonly(
2734 fetch_all=opts.fetch_all_uri,
2735 pkg=pkg, pretend=opts.pretend,
2737 retval = fetcher.execute()
2738 self.returncode = retval
2742 fetcher = EbuildFetcher(config_pool=self.config_pool,
2743 fetchall=opts.fetch_all_uri,
2744 fetchonly=opts.fetchonly,
2745 background=self.background,
2746 pkg=pkg, scheduler=self.scheduler)
2748 self._start_task(fetcher, self._fetch_exit)
2750 def _fetch_exit(self, fetcher):
2754 fetch_failed = False
2756 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2758 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2760 if fetch_failed and fetcher.logfile is not None and \
2761 os.path.exists(fetcher.logfile):
2762 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2764 if not fetch_failed and fetcher.logfile is not None:
2765 # Fetch was successful, so remove the fetch log.
2767 os.unlink(fetcher.logfile)
2771 if fetch_failed or opts.fetchonly:
2775 logger = self.logger
2777 pkg_count = self.pkg_count
2778 scheduler = self.scheduler
2779 settings = self.settings
2780 features = settings.features
2781 ebuild_path = self._ebuild_path
2782 system_set = pkg.root_config.sets["system"]
2784 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2785 self._build_dir.lock()
2787 # Cleaning is triggered before the setup
2788 # phase, in portage.doebuild().
2789 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2790 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2791 short_msg = "emerge: (%s of %s) %s Clean" % \
2792 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2793 logger.log(msg, short_msg=short_msg)
2795 #buildsyspkg: Check if we need to _force_ binary package creation
2796 self._issyspkg = "buildsyspkg" in features and \
2797 system_set.findAtomForPackage(pkg) and \
2800 if opts.buildpkg or self._issyspkg:
2802 self._buildpkg = True
2804 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2805 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2806 short_msg = "emerge: (%s of %s) %s Compile" % \
2807 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2808 logger.log(msg, short_msg=short_msg)
2811 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2812 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2813 short_msg = "emerge: (%s of %s) %s Compile" % \
2814 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2815 logger.log(msg, short_msg=short_msg)
2817 build = EbuildExecuter(background=self.background, pkg=pkg,
2818 scheduler=scheduler, settings=settings)
2819 self._start_task(build, self._build_exit)
2821 def _unlock_builddir(self):
2822 portage.elog.elog_process(self.pkg.cpv, self.settings)
2823 self._build_dir.unlock()
2825 def _build_exit(self, build):
2826 if self._default_exit(build) != os.EX_OK:
2827 self._unlock_builddir()
2832 buildpkg = self._buildpkg
2835 self._final_exit(build)
2840 msg = ">>> This is a system package, " + \
2841 "let's pack a rescue tarball.\n"
2843 log_path = self.settings.get("PORTAGE_LOG_FILE")
2844 if log_path is not None:
2845 log_file = open(log_path, 'a')
2851 if not self.background:
2852 portage.writemsg_stdout(msg, noiselevel=-1)
2854 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2855 scheduler=self.scheduler, settings=self.settings)
2857 self._start_task(packager, self._buildpkg_exit)
2859 def _buildpkg_exit(self, packager):
2861 Released build dir lock when there is a failure or
2862 when in buildpkgonly mode. Otherwise, the lock will
2863 be released when merge() is called.
2866 if self._default_exit(packager) != os.EX_OK:
2867 self._unlock_builddir()
2871 if self.opts.buildpkgonly:
2872 # Need to call "clean" phase for buildpkgonly mode
2873 portage.elog.elog_process(self.pkg.cpv, self.settings)
2875 clean_phase = EbuildPhase(background=self.background,
2876 pkg=self.pkg, phase=phase,
2877 scheduler=self.scheduler, settings=self.settings,
2879 self._start_task(clean_phase, self._clean_exit)
2882 # Continue holding the builddir lock until
2883 # after the package has been installed.
2884 self._current_task = None
2885 self.returncode = packager.returncode
2888 def _clean_exit(self, clean_phase):
2889 if self._final_exit(clean_phase) != os.EX_OK or \
2890 self.opts.buildpkgonly:
2891 self._unlock_builddir()
2896 Install the package and then clean up and release locks.
2897 Only call this after the build has completed successfully
2898 and neither fetchonly nor buildpkgonly mode are enabled.
2901 find_blockers = self.find_blockers
2902 ldpath_mtimes = self.ldpath_mtimes
2903 logger = self.logger
2905 pkg_count = self.pkg_count
2906 settings = self.settings
2907 world_atom = self.world_atom
2908 ebuild_path = self._ebuild_path
2911 merge = EbuildMerge(find_blockers=self.find_blockers,
2912 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2913 pkg_count=pkg_count, pkg_path=ebuild_path,
2914 scheduler=self.scheduler,
2915 settings=settings, tree=tree, world_atom=world_atom)
2917 msg = " === (%s of %s) Merging (%s::%s)" % \
2918 (pkg_count.curval, pkg_count.maxval,
2919 pkg.cpv, ebuild_path)
2920 short_msg = "emerge: (%s of %s) %s Merge" % \
2921 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2922 logger.log(msg, short_msg=short_msg)
2925 rval = merge.execute()
2927 self._unlock_builddir()
2931 class EbuildExecuter(CompositeTask):
2933 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2935 _phases = ("prepare", "configure", "compile", "test", "install")
2937 _live_eclasses = frozenset([
2947 self._tree = "porttree"
2950 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2951 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2952 self._start_task(clean_phase, self._clean_phase_exit)
2954 def _clean_phase_exit(self, clean_phase):
2956 if self._default_exit(clean_phase) != os.EX_OK:
2961 scheduler = self.scheduler
2962 settings = self.settings
2965 # This initializes PORTAGE_LOG_FILE.
2966 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2968 setup_phase = EbuildPhase(background=self.background,
2969 pkg=pkg, phase="setup", scheduler=scheduler,
2970 settings=settings, tree=self._tree)
2972 setup_phase.addExitListener(self._setup_exit)
2973 self._current_task = setup_phase
2974 self.scheduler.scheduleSetup(setup_phase)
2976 def _setup_exit(self, setup_phase):
2978 if self._default_exit(setup_phase) != os.EX_OK:
2982 unpack_phase = EbuildPhase(background=self.background,
2983 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2984 settings=self.settings, tree=self._tree)
2986 if self._live_eclasses.intersection(self.pkg.inherited):
2987 # Serialize $DISTDIR access for live ebuilds since
2988 # otherwise they can interfere with eachother.
2990 unpack_phase.addExitListener(self._unpack_exit)
2991 self._current_task = unpack_phase
2992 self.scheduler.scheduleUnpack(unpack_phase)
2995 self._start_task(unpack_phase, self._unpack_exit)
2997 def _unpack_exit(self, unpack_phase):
2999 if self._default_exit(unpack_phase) != os.EX_OK:
3003 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3006 phases = self._phases
3007 eapi = pkg.metadata["EAPI"]
3008 if eapi in ("0", "1"):
3009 # skip src_prepare and src_configure
3012 for phase in phases:
3013 ebuild_phases.add(EbuildPhase(background=self.background,
3014 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3015 settings=self.settings, tree=self._tree))
3017 self._start_task(ebuild_phases, self._default_final_exit)
3019 class EbuildMetadataPhase(SubProcess):
3022 Asynchronous interface for the ebuild "depend" phase which is
3023 used to extract metadata from the ebuild.
3026 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3027 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3030 _file_names = ("ebuild",)
3031 _files_dict = slot_dict_class(_file_names, prefix="")
3035 settings = self.settings
3036 settings.setcpv(self.cpv)
3037 ebuild_path = self.ebuild_path
3040 if 'parse-eapi-glep-55' in settings.features:
3041 pf, eapi = portage._split_ebuild_name_glep55(
3042 os.path.basename(ebuild_path))
3043 if eapi is None and \
3044 'parse-eapi-ebuild-head' in settings.features:
3045 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3046 mode='r', encoding='utf_8', errors='replace'))
3048 if eapi is not None:
3049 if not portage.eapi_is_supported(eapi):
3050 self.metadata_callback(self.cpv, self.ebuild_path,
3051 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3052 self.returncode = os.EX_OK
3056 settings.configdict['pkg']['EAPI'] = eapi
3058 debug = settings.get("PORTAGE_DEBUG") == "1"
3062 if self.fd_pipes is not None:
3063 fd_pipes = self.fd_pipes.copy()
3067 fd_pipes.setdefault(0, sys.stdin.fileno())
3068 fd_pipes.setdefault(1, sys.stdout.fileno())
3069 fd_pipes.setdefault(2, sys.stderr.fileno())
3071 # flush any pending output
3072 for fd in fd_pipes.itervalues():
3073 if fd == sys.stdout.fileno():
3075 if fd == sys.stderr.fileno():
3078 fd_pipes_orig = fd_pipes.copy()
3079 self._files = self._files_dict()
3082 master_fd, slave_fd = os.pipe()
3083 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3084 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3086 fd_pipes[self._metadata_fd] = slave_fd
3088 self._raw_metadata = []
3089 files.ebuild = os.fdopen(master_fd, 'r')
3090 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3091 self._registered_events, self._output_handler)
3092 self._registered = True
3094 retval = portage.doebuild(ebuild_path, "depend",
3095 settings["ROOT"], settings, debug,
3096 mydbapi=self.portdb, tree="porttree",
3097 fd_pipes=fd_pipes, returnpid=True)
3101 if isinstance(retval, int):
3102 # doebuild failed before spawning
3104 self.returncode = retval
3108 self.pid = retval[0]
3109 portage.process.spawned_pids.remove(self.pid)
3111 def _output_handler(self, fd, event):
3113 if event & PollConstants.POLLIN:
3114 self._raw_metadata.append(self._files.ebuild.read())
3115 if not self._raw_metadata[-1]:
3119 self._unregister_if_appropriate(event)
3120 return self._registered
3122 def _set_returncode(self, wait_retval):
3123 SubProcess._set_returncode(self, wait_retval)
3124 if self.returncode == os.EX_OK:
3125 metadata_lines = "".join(self._raw_metadata).splitlines()
3126 if len(portage.auxdbkeys) != len(metadata_lines):
3127 # Don't trust bash's returncode if the
3128 # number of lines is incorrect.
3131 metadata = izip(portage.auxdbkeys, metadata_lines)
3132 self.metadata = self.metadata_callback(self.cpv,
3133 self.ebuild_path, self.repo_path, metadata,
3136 class EbuildProcess(SpawnProcess):
3138 __slots__ = ("phase", "pkg", "settings", "tree")
3141 # Don't open the log file during the clean phase since the
3142 # open file can result in an nfs lock on $T/build.log which
3143 # prevents the clean phase from removing $T.
3144 if self.phase not in ("clean", "cleanrm"):
3145 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3146 SpawnProcess._start(self)
3148 def _pipe(self, fd_pipes):
3149 stdout_pipe = fd_pipes.get(1)
3150 got_pty, master_fd, slave_fd = \
3151 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3152 return (master_fd, slave_fd)
3154 def _spawn(self, args, **kwargs):
3156 root_config = self.pkg.root_config
3158 mydbapi = root_config.trees[tree].dbapi
3159 settings = self.settings
3160 ebuild_path = settings["EBUILD"]
3161 debug = settings.get("PORTAGE_DEBUG") == "1"
3163 rval = portage.doebuild(ebuild_path, self.phase,
3164 root_config.root, settings, debug,
3165 mydbapi=mydbapi, tree=tree, **kwargs)
3169 def _set_returncode(self, wait_retval):
3170 SpawnProcess._set_returncode(self, wait_retval)
3172 if self.phase not in ("clean", "cleanrm"):
3173 self.returncode = portage._doebuild_exit_status_check_and_log(
3174 self.settings, self.phase, self.returncode)
3176 if self.phase == "test" and self.returncode != os.EX_OK and \
3177 "test-fail-continue" in self.settings.features:
3178 self.returncode = os.EX_OK
3180 portage._post_phase_userpriv_perms(self.settings)
3182 class EbuildPhase(CompositeTask):
3184 __slots__ = ("background", "pkg", "phase",
3185 "scheduler", "settings", "tree")
3187 _post_phase_cmds = portage._post_phase_cmds
3191 ebuild_process = EbuildProcess(background=self.background,
3192 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3193 settings=self.settings, tree=self.tree)
3195 self._start_task(ebuild_process, self._ebuild_exit)
3197 def _ebuild_exit(self, ebuild_process):
3199 if self.phase == "install":
3201 log_path = self.settings.get("PORTAGE_LOG_FILE")
3203 if self.background and log_path is not None:
3204 log_file = open(log_path, 'a')
3207 portage._check_build_log(self.settings, out=out)
3209 if log_file is not None:
3212 if self._default_exit(ebuild_process) != os.EX_OK:
3216 settings = self.settings
3218 if self.phase == "install":
3219 portage._post_src_install_chost_fix(settings)
3220 portage._post_src_install_uid_fix(settings)
3222 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3223 if post_phase_cmds is not None:
3224 post_phase = MiscFunctionsProcess(background=self.background,
3225 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3226 scheduler=self.scheduler, settings=settings)
3227 self._start_task(post_phase, self._post_phase_exit)
3230 self.returncode = ebuild_process.returncode
3231 self._current_task = None
3234 def _post_phase_exit(self, post_phase):
3235 if self._final_exit(post_phase) != os.EX_OK:
3236 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3238 self._current_task = None
3242 class EbuildBinpkg(EbuildProcess):
3244 This assumes that src_install() has successfully completed.
3246 __slots__ = ("_binpkg_tmpfile",)
3249 self.phase = "package"
3250 self.tree = "porttree"
3252 root_config = pkg.root_config
3253 portdb = root_config.trees["porttree"].dbapi
3254 bintree = root_config.trees["bintree"]
3255 ebuild_path = portdb.findname(self.pkg.cpv)
3256 settings = self.settings
3257 debug = settings.get("PORTAGE_DEBUG") == "1"
3259 bintree.prevent_collision(pkg.cpv)
3260 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3261 pkg.cpv + ".tbz2." + str(os.getpid()))
3262 self._binpkg_tmpfile = binpkg_tmpfile
3263 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3264 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3267 EbuildProcess._start(self)
3269 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3271 def _set_returncode(self, wait_retval):
3272 EbuildProcess._set_returncode(self, wait_retval)
3275 bintree = pkg.root_config.trees["bintree"]
3276 binpkg_tmpfile = self._binpkg_tmpfile
3277 if self.returncode == os.EX_OK:
3278 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3280 class EbuildMerge(SlotObject):
3282 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3283 "pkg", "pkg_count", "pkg_path", "pretend",
3284 "scheduler", "settings", "tree", "world_atom")
3287 root_config = self.pkg.root_config
3288 settings = self.settings
3289 retval = portage.merge(settings["CATEGORY"],
3290 settings["PF"], settings["D"],
3291 os.path.join(settings["PORTAGE_BUILDDIR"],
3292 "build-info"), root_config.root, settings,
3293 myebuild=settings["EBUILD"],
3294 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3295 vartree=root_config.trees["vartree"],
3296 prev_mtimes=self.ldpath_mtimes,
3297 scheduler=self.scheduler,
3298 blockers=self.find_blockers)
3300 if retval == os.EX_OK:
3301 self.world_atom(self.pkg)
3306 def _log_success(self):
3308 pkg_count = self.pkg_count
3309 pkg_path = self.pkg_path
3310 logger = self.logger
3311 if "noclean" not in self.settings.features:
3312 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3313 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3314 logger.log((" === (%s of %s) " + \
3315 "Post-Build Cleaning (%s::%s)") % \
3316 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3317 short_msg=short_msg)
3318 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3319 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3321 class PackageUninstall(AsynchronousTask):
3323 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3327 unmerge(self.pkg.root_config, self.opts, "unmerge",
3328 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3329 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3330 writemsg_level=self._writemsg_level)
3331 except UninstallFailure, e:
3332 self.returncode = e.status
3334 self.returncode = os.EX_OK
3337 def _writemsg_level(self, msg, level=0, noiselevel=0):
3339 log_path = self.settings.get("PORTAGE_LOG_FILE")
3340 background = self.background
3342 if log_path is None:
3343 if not (background and level < logging.WARNING):
3344 portage.util.writemsg_level(msg,
3345 level=level, noiselevel=noiselevel)
3348 portage.util.writemsg_level(msg,
3349 level=level, noiselevel=noiselevel)
3351 f = open(log_path, 'a')
3357 class Binpkg(CompositeTask):
3359 __slots__ = ("find_blockers",
3360 "ldpath_mtimes", "logger", "opts",
3361 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3362 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3363 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3365 def _writemsg_level(self, msg, level=0, noiselevel=0):
3367 if not self.background:
3368 portage.util.writemsg_level(msg,
3369 level=level, noiselevel=noiselevel)
3371 log_path = self.settings.get("PORTAGE_LOG_FILE")
3372 if log_path is not None:
3373 f = open(log_path, 'a')
3382 settings = self.settings
3383 settings.setcpv(pkg)
3384 self._tree = "bintree"
3385 self._bintree = self.pkg.root_config.trees[self._tree]
3386 self._verify = not self.opts.pretend
3388 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3389 "portage", pkg.category, pkg.pf)
3390 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3391 pkg=pkg, settings=settings)
3392 self._image_dir = os.path.join(dir_path, "image")
3393 self._infloc = os.path.join(dir_path, "build-info")
3394 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3395 settings["EBUILD"] = self._ebuild_path
3396 debug = settings.get("PORTAGE_DEBUG") == "1"
3397 portage.doebuild_environment(self._ebuild_path, "setup",
3398 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3399 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3401 # The prefetcher has already completed or it
3402 # could be running now. If it's running now,
3403 # wait for it to complete since it holds
3404 # a lock on the file being fetched. The
3405 # portage.locks functions are only designed
3406 # to work between separate processes. Since
3407 # the lock is held by the current process,
3408 # use the scheduler and fetcher methods to
3409 # synchronize with the fetcher.
3410 prefetcher = self.prefetcher
3411 if prefetcher is None:
3413 elif not prefetcher.isAlive():
3415 elif prefetcher.poll() is None:
3417 waiting_msg = ("Fetching '%s' " + \
3418 "in the background. " + \
3419 "To view fetch progress, run `tail -f " + \
3420 "/var/log/emerge-fetch.log` in another " + \
3421 "terminal.") % prefetcher.pkg_path
3422 msg_prefix = colorize("GOOD", " * ")
3423 from textwrap import wrap
3424 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3425 for line in wrap(waiting_msg, 65))
3426 if not self.background:
3427 writemsg(waiting_msg, noiselevel=-1)
3429 self._current_task = prefetcher
3430 prefetcher.addExitListener(self._prefetch_exit)
3433 self._prefetch_exit(prefetcher)
3435 def _prefetch_exit(self, prefetcher):
3438 pkg_count = self.pkg_count
3439 if not (self.opts.pretend or self.opts.fetchonly):
3440 self._build_dir.lock()
3441 # If necessary, discard old log so that we don't
3443 self._build_dir.clean_log()
3444 # Initialze PORTAGE_LOG_FILE.
3445 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3446 fetcher = BinpkgFetcher(background=self.background,
3447 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3448 pretend=self.opts.pretend, scheduler=self.scheduler)
3449 pkg_path = fetcher.pkg_path
3450 self._pkg_path = pkg_path
3452 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3454 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3455 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3456 short_msg = "emerge: (%s of %s) %s Fetch" % \
3457 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3458 self.logger.log(msg, short_msg=short_msg)
3459 self._start_task(fetcher, self._fetcher_exit)
3462 self._fetcher_exit(fetcher)
3464 def _fetcher_exit(self, fetcher):
3466 # The fetcher only has a returncode when
3467 # --getbinpkg is enabled.
3468 if fetcher.returncode is not None:
3469 self._fetched_pkg = True
3470 if self._default_exit(fetcher) != os.EX_OK:
3471 self._unlock_builddir()
3475 if self.opts.pretend:
3476 self._current_task = None
3477 self.returncode = os.EX_OK
3485 logfile = self.settings.get("PORTAGE_LOG_FILE")
3486 verifier = BinpkgVerifier(background=self.background,
3487 logfile=logfile, pkg=self.pkg)
3488 self._start_task(verifier, self._verifier_exit)
3491 self._verifier_exit(verifier)
3493 def _verifier_exit(self, verifier):
3494 if verifier is not None and \
3495 self._default_exit(verifier) != os.EX_OK:
3496 self._unlock_builddir()
3500 logger = self.logger
3502 pkg_count = self.pkg_count
3503 pkg_path = self._pkg_path
3505 if self._fetched_pkg:
3506 self._bintree.inject(pkg.cpv, filename=pkg_path)
3508 if self.opts.fetchonly:
3509 self._current_task = None
3510 self.returncode = os.EX_OK
3514 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3515 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3516 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3517 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3518 logger.log(msg, short_msg=short_msg)
3521 settings = self.settings
3522 ebuild_phase = EbuildPhase(background=self.background,
3523 pkg=pkg, phase=phase, scheduler=self.scheduler,
3524 settings=settings, tree=self._tree)
3526 self._start_task(ebuild_phase, self._clean_exit)
3528 def _clean_exit(self, clean_phase):
3529 if self._default_exit(clean_phase) != os.EX_OK:
3530 self._unlock_builddir()
3534 dir_path = self._build_dir.dir_path
3536 infloc = self._infloc
3538 pkg_path = self._pkg_path
3541 for mydir in (dir_path, self._image_dir, infloc):
3542 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3543 gid=portage.data.portage_gid, mode=dir_mode)
3545 # This initializes PORTAGE_LOG_FILE.
3546 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3547 self._writemsg_level(">>> Extracting info\n")
3549 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3550 check_missing_metadata = ("CATEGORY", "PF")
3551 missing_metadata = set()
3552 for k in check_missing_metadata:
3553 v = pkg_xpak.getfile(k)
3555 missing_metadata.add(k)
3557 pkg_xpak.unpackinfo(infloc)
3558 for k in missing_metadata:
3566 f = open(os.path.join(infloc, k), 'wb')
3572 # Store the md5sum in the vdb.
3573 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3575 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3579 # This gives bashrc users an opportunity to do various things
3580 # such as remove binary packages after they're installed.
3581 settings = self.settings
3582 settings.setcpv(self.pkg)
3583 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3584 settings.backup_changes("PORTAGE_BINPKG_FILE")
3587 setup_phase = EbuildPhase(background=self.background,
3588 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3589 settings=settings, tree=self._tree)
3591 setup_phase.addExitListener(self._setup_exit)
3592 self._current_task = setup_phase
3593 self.scheduler.scheduleSetup(setup_phase)
3595 def _setup_exit(self, setup_phase):
3596 if self._default_exit(setup_phase) != os.EX_OK:
3597 self._unlock_builddir()
3601 extractor = BinpkgExtractorAsync(background=self.background,
3602 image_dir=self._image_dir,
3603 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3604 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3605 self._start_task(extractor, self._extractor_exit)
3607 def _extractor_exit(self, extractor):
3608 if self._final_exit(extractor) != os.EX_OK:
3609 self._unlock_builddir()
3610 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3614 def _unlock_builddir(self):
3615 if self.opts.pretend or self.opts.fetchonly:
3617 portage.elog.elog_process(self.pkg.cpv, self.settings)
3618 self._build_dir.unlock()
3622 # This gives bashrc users an opportunity to do various things
3623 # such as remove binary packages after they're installed.
3624 settings = self.settings
3625 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3626 settings.backup_changes("PORTAGE_BINPKG_FILE")
3628 merge = EbuildMerge(find_blockers=self.find_blockers,
3629 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3630 pkg=self.pkg, pkg_count=self.pkg_count,
3631 pkg_path=self._pkg_path, scheduler=self.scheduler,
3632 settings=settings, tree=self._tree, world_atom=self.world_atom)
3635 retval = merge.execute()
3637 settings.pop("PORTAGE_BINPKG_FILE", None)
3638 self._unlock_builddir()
3641 class BinpkgFetcher(SpawnProcess):
3643 __slots__ = ("pkg", "pretend",
3644 "locked", "pkg_path", "_lock_obj")
3646 def __init__(self, **kwargs):
3647 SpawnProcess.__init__(self, **kwargs)
3649 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3657 pretend = self.pretend
3658 bintree = pkg.root_config.trees["bintree"]
3659 settings = bintree.settings
3660 use_locks = "distlocks" in settings.features
3661 pkg_path = self.pkg_path
3664 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3667 exists = os.path.exists(pkg_path)
3668 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3669 if not (pretend or resume):
3670 # Remove existing file or broken symlink.
3676 # urljoin doesn't work correctly with
3677 # unrecognized protocols like sftp
3678 if bintree._remote_has_index:
3679 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3681 rel_uri = pkg.cpv + ".tbz2"
3682 uri = bintree._remote_base_uri.rstrip("/") + \
3683 "/" + rel_uri.lstrip("/")
3685 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3686 "/" + pkg.pf + ".tbz2"
3689 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3690 self.returncode = os.EX_OK
3694 protocol = urlparse.urlparse(uri)[0]
3695 fcmd_prefix = "FETCHCOMMAND"
3697 fcmd_prefix = "RESUMECOMMAND"
3698 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3700 fcmd = settings.get(fcmd_prefix)
3703 "DISTDIR" : os.path.dirname(pkg_path),
3705 "FILE" : os.path.basename(pkg_path)
3708 fetch_env = dict(settings.iteritems())
3709 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3710 for x in shlex.split(fcmd)]
3712 if self.fd_pipes is None:
3714 fd_pipes = self.fd_pipes
3716 # Redirect all output to stdout since some fetchers like
3717 # wget pollute stderr (if portage detects a problem then it
3718 # can send it's own message to stderr).
3719 fd_pipes.setdefault(0, sys.stdin.fileno())
3720 fd_pipes.setdefault(1, sys.stdout.fileno())
3721 fd_pipes.setdefault(2, sys.stdout.fileno())
3723 self.args = fetch_args
3724 self.env = fetch_env
3725 SpawnProcess._start(self)
3727 def _set_returncode(self, wait_retval):
3728 SpawnProcess._set_returncode(self, wait_retval)
3729 if self.returncode == os.EX_OK:
3730 # If possible, update the mtime to match the remote package if
3731 # the fetcher didn't already do it automatically.
3732 bintree = self.pkg.root_config.trees["bintree"]
3733 if bintree._remote_has_index:
3734 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3735 if remote_mtime is not None:
3737 remote_mtime = long(remote_mtime)
3742 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3746 if remote_mtime != local_mtime:
3748 os.utime(self.pkg_path,
3749 (remote_mtime, remote_mtime))
3758 This raises an AlreadyLocked exception if lock() is called
3759 while a lock is already held. In order to avoid this, call
3760 unlock() or check whether the "locked" attribute is True
3761 or False before calling lock().
3763 if self._lock_obj is not None:
3764 raise self.AlreadyLocked((self._lock_obj,))
3766 self._lock_obj = portage.locks.lockfile(
3767 self.pkg_path, wantnewlockfile=1)
3770 class AlreadyLocked(portage.exception.PortageException):
3774 if self._lock_obj is None:
3776 portage.locks.unlockfile(self._lock_obj)
3777 self._lock_obj = None
3780 class BinpkgVerifier(AsynchronousTask):
3781 __slots__ = ("logfile", "pkg",)
3785 Note: Unlike a normal AsynchronousTask.start() method,
3786 this one does all work is synchronously. The returncode
3787 attribute will be set before it returns.
3791 root_config = pkg.root_config
3792 bintree = root_config.trees["bintree"]
3794 stdout_orig = sys.stdout
3795 stderr_orig = sys.stderr
3797 if self.background and self.logfile is not None:
3798 log_file = open(self.logfile, 'a')
3800 if log_file is not None:
3801 sys.stdout = log_file
3802 sys.stderr = log_file
3804 bintree.digestCheck(pkg)
3805 except portage.exception.FileNotFound:
3806 writemsg("!!! Fetching Binary failed " + \
3807 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3809 except portage.exception.DigestException, e:
3810 writemsg("\n!!! Digest verification failed:\n",
3812 writemsg("!!! %s\n" % e.value[0],
3814 writemsg("!!! Reason: %s\n" % e.value[1],
3816 writemsg("!!! Got: %s\n" % e.value[2],
3818 writemsg("!!! Expected: %s\n" % e.value[3],
3821 if rval != os.EX_OK:
3822 pkg_path = bintree.getname(pkg.cpv)
3823 head, tail = os.path.split(pkg_path)
3824 temp_filename = portage._checksum_failure_temp_file(head, tail)
3825 writemsg("File renamed to '%s'\n" % (temp_filename,),
3828 sys.stdout = stdout_orig
3829 sys.stderr = stderr_orig
3830 if log_file is not None:
3833 self.returncode = rval
3836 class BinpkgPrefetcher(CompositeTask):
3838 __slots__ = ("pkg",) + \
3839 ("pkg_path", "_bintree",)
3842 self._bintree = self.pkg.root_config.trees["bintree"]
3843 fetcher = BinpkgFetcher(background=self.background,
3844 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3845 scheduler=self.scheduler)
3846 self.pkg_path = fetcher.pkg_path
3847 self._start_task(fetcher, self._fetcher_exit)
3849 def _fetcher_exit(self, fetcher):
3851 if self._default_exit(fetcher) != os.EX_OK:
3855 verifier = BinpkgVerifier(background=self.background,
3856 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3857 self._start_task(verifier, self._verifier_exit)
3859 def _verifier_exit(self, verifier):
3860 if self._default_exit(verifier) != os.EX_OK:
3864 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3866 self._current_task = None
3867 self.returncode = os.EX_OK
3870 class BinpkgExtractorAsync(SpawnProcess):
3872 __slots__ = ("image_dir", "pkg", "pkg_path")
3874 _shell_binary = portage.const.BASH_BINARY
3877 self.args = [self._shell_binary, "-c",
3878 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3879 (portage._shell_quote(self.pkg_path),
3880 portage._shell_quote(self.image_dir))]
3882 self.env = self.pkg.root_config.settings.environ()
3883 SpawnProcess._start(self)
3885 class MergeListItem(CompositeTask):
3888 TODO: For parallel scheduling, everything here needs asynchronous
3889 execution support (start, poll, and wait methods).
3892 __slots__ = ("args_set",
3893 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3894 "find_blockers", "logger", "mtimedb", "pkg",
3895 "pkg_count", "pkg_to_replace", "prefetcher",
3896 "settings", "statusMessage", "world_atom") + \
3902 build_opts = self.build_opts
3905 # uninstall, executed by self.merge()
3906 self.returncode = os.EX_OK
3910 args_set = self.args_set
3911 find_blockers = self.find_blockers
3912 logger = self.logger
3913 mtimedb = self.mtimedb
3914 pkg_count = self.pkg_count
3915 scheduler = self.scheduler
3916 settings = self.settings
3917 world_atom = self.world_atom
3918 ldpath_mtimes = mtimedb["ldpath"]
3920 action_desc = "Emerging"
3922 if pkg.type_name == "binary":
3923 action_desc += " binary"
3925 if build_opts.fetchonly:
3926 action_desc = "Fetching"
3928 msg = "%s (%s of %s) %s" % \
3930 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3931 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3932 colorize("GOOD", pkg.cpv))
3934 portdb = pkg.root_config.trees["porttree"].dbapi
3935 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3936 if portdir_repo_name:
3937 pkg_repo_name = pkg.metadata.get("repository")
3938 if pkg_repo_name != portdir_repo_name:
3939 if not pkg_repo_name:
3940 pkg_repo_name = "unknown repo"
3941 msg += " from %s" % pkg_repo_name
3944 msg += " %s %s" % (preposition, pkg.root)
3946 if not build_opts.pretend:
3947 self.statusMessage(msg)
3948 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3949 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3951 if pkg.type_name == "ebuild":
3953 build = EbuildBuild(args_set=args_set,
3954 background=self.background,
3955 config_pool=self.config_pool,
3956 find_blockers=find_blockers,
3957 ldpath_mtimes=ldpath_mtimes, logger=logger,
3958 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3959 prefetcher=self.prefetcher, scheduler=scheduler,
3960 settings=settings, world_atom=world_atom)
3962 self._install_task = build
3963 self._start_task(build, self._default_final_exit)
3966 elif pkg.type_name == "binary":
3968 binpkg = Binpkg(background=self.background,
3969 find_blockers=find_blockers,
3970 ldpath_mtimes=ldpath_mtimes, logger=logger,
3971 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3972 prefetcher=self.prefetcher, settings=settings,
3973 scheduler=scheduler, world_atom=world_atom)
3975 self._install_task = binpkg
3976 self._start_task(binpkg, self._default_final_exit)
3980 self._install_task.poll()
3981 return self.returncode
3984 self._install_task.wait()
3985 return self.returncode
3990 build_opts = self.build_opts
3991 find_blockers = self.find_blockers
3992 logger = self.logger
3993 mtimedb = self.mtimedb
3994 pkg_count = self.pkg_count
3995 prefetcher = self.prefetcher
3996 scheduler = self.scheduler
3997 settings = self.settings
3998 world_atom = self.world_atom
3999 ldpath_mtimes = mtimedb["ldpath"]
4002 if not (build_opts.buildpkgonly or \
4003 build_opts.fetchonly or build_opts.pretend):
4005 uninstall = PackageUninstall(background=self.background,
4006 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4007 pkg=pkg, scheduler=scheduler, settings=settings)
4010 retval = uninstall.wait()
4011 if retval != os.EX_OK:
4015 if build_opts.fetchonly or \
4016 build_opts.buildpkgonly:
4017 return self.returncode
4019 retval = self._install_task.install()
4022 class PackageMerge(AsynchronousTask):
4024 TODO: Implement asynchronous merge so that the scheduler can
4025 run while a merge is executing.
4028 __slots__ = ("merge",)
4032 pkg = self.merge.pkg
4033 pkg_count = self.merge.pkg_count
4036 action_desc = "Uninstalling"
4037 preposition = "from"
4040 action_desc = "Installing"
4042 counter_str = "(%s of %s) " % \
4043 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4044 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4049 colorize("GOOD", pkg.cpv))
4052 msg += " %s %s" % (preposition, pkg.root)
4054 if not self.merge.build_opts.fetchonly and \
4055 not self.merge.build_opts.pretend and \
4056 not self.merge.build_opts.buildpkgonly:
4057 self.merge.statusMessage(msg)
4059 self.returncode = self.merge.merge()
4062 class DependencyArg(object):
4063 def __init__(self, arg=None, root_config=None):
4065 self.root_config = root_config
4068 return str(self.arg)
4070 class AtomArg(DependencyArg):
4071 def __init__(self, atom=None, **kwargs):
4072 DependencyArg.__init__(self, **kwargs)
4074 if not isinstance(self.atom, portage.dep.Atom):
4075 self.atom = portage.dep.Atom(self.atom)
4076 self.set = (self.atom, )
4078 class PackageArg(DependencyArg):
4079 def __init__(self, package=None, **kwargs):
4080 DependencyArg.__init__(self, **kwargs)
4081 self.package = package
4082 self.atom = portage.dep.Atom("=" + package.cpv)
4083 self.set = (self.atom, )
4085 class SetArg(DependencyArg):
4086 def __init__(self, set=None, **kwargs):
4087 DependencyArg.__init__(self, **kwargs)
4089 self.name = self.arg[len(SETPREFIX):]
4091 class Dependency(SlotObject):
4092 __slots__ = ("atom", "blocker", "depth",
4093 "parent", "onlydeps", "priority", "root")
4094 def __init__(self, **kwargs):
4095 SlotObject.__init__(self, **kwargs)
4096 if self.priority is None:
4097 self.priority = DepPriority()
4098 if self.depth is None:
4101 class BlockerCache(portage.cache.mappings.MutableMapping):
4102 """This caches blockers of installed packages so that dep_check does not
4103 have to be done for every single installed package on every invocation of
4104 emerge. The cache is invalidated whenever it is detected that something
4105 has changed that might alter the results of dep_check() calls:
4106 1) the set of installed packages (including COUNTER) has changed
4107 2) the old-style virtuals have changed
4110 # Number of uncached packages to trigger cache update, since
4111 # it's wasteful to update it for every vdb change.
4112 _cache_threshold = 5
4114 class BlockerData(object):
4116 __slots__ = ("__weakref__", "atoms", "counter")
4118 def __init__(self, counter, atoms):
4119 self.counter = counter
4122 def __init__(self, myroot, vardb):
4124 self._virtuals = vardb.settings.getvirtuals()
4125 self._cache_filename = os.path.join(myroot,
4126 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4127 self._cache_version = "1"
4128 self._cache_data = None
4129 self._modified = set()
4134 f = open(self._cache_filename, mode='rb')
4135 mypickle = pickle.Unpickler(f)
4137 mypickle.find_global = None
4138 except AttributeError:
4139 # TODO: If py3k, override Unpickler.find_class().
4141 self._cache_data = mypickle.load()
4144 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4145 if isinstance(e, pickle.UnpicklingError):
4146 writemsg("!!! Error loading '%s': %s\n" % \
4147 (self._cache_filename, str(e)), noiselevel=-1)
4150 cache_valid = self._cache_data and \
4151 isinstance(self._cache_data, dict) and \
4152 self._cache_data.get("version") == self._cache_version and \
4153 isinstance(self._cache_data.get("blockers"), dict)
4155 # Validate all the atoms and counters so that
4156 # corruption is detected as soon as possible.
4157 invalid_items = set()
4158 for k, v in self._cache_data["blockers"].iteritems():
4159 if not isinstance(k, basestring):
4160 invalid_items.add(k)
4163 if portage.catpkgsplit(k) is None:
4164 invalid_items.add(k)
4166 except portage.exception.InvalidData:
4167 invalid_items.add(k)
4169 if not isinstance(v, tuple) or \
4171 invalid_items.add(k)
4174 if not isinstance(counter, (int, long)):
4175 invalid_items.add(k)
4177 if not isinstance(atoms, (list, tuple)):
4178 invalid_items.add(k)
4180 invalid_atom = False
4182 if not isinstance(atom, basestring):
4185 if atom[:1] != "!" or \
4186 not portage.isvalidatom(
4187 atom, allow_blockers=True):
4191 invalid_items.add(k)
4194 for k in invalid_items:
4195 del self._cache_data["blockers"][k]
4196 if not self._cache_data["blockers"]:
4200 self._cache_data = {"version":self._cache_version}
4201 self._cache_data["blockers"] = {}
4202 self._cache_data["virtuals"] = self._virtuals
4203 self._modified.clear()
4206 """If the current user has permission and the internal blocker cache
4207 been updated, save it to disk and mark it unmodified. This is called
4208 by emerge after it has proccessed blockers for all installed packages.
4209 Currently, the cache is only written if the user has superuser
4210 privileges (since that's required to obtain a lock), but all users
4211 have read access and benefit from faster blocker lookups (as long as
4212 the entire cache is still valid). The cache is stored as a pickled
4213 dict object with the following format:
4217 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4218 "virtuals" : vardb.settings.getvirtuals()
4221 if len(self._modified) >= self._cache_threshold and \
4224 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4225 pickle.dump(self._cache_data, f, protocol=2)
4227 portage.util.apply_secpass_permissions(
4228 self._cache_filename, gid=portage.portage_gid, mode=0644)
4229 except (IOError, OSError), e:
4231 self._modified.clear()
4233 def __setitem__(self, cpv, blocker_data):
4235 Update the cache and mark it as modified for a future call to
4238 @param cpv: Package for which to cache blockers.
4240 @param blocker_data: An object with counter and atoms attributes.
4241 @type blocker_data: BlockerData
4243 self._cache_data["blockers"][cpv] = \
4244 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4245 self._modified.add(cpv)
4248 if self._cache_data is None:
4249 # triggered by python-trace
4251 return iter(self._cache_data["blockers"])
4253 def __delitem__(self, cpv):
4254 del self._cache_data["blockers"][cpv]
4256 def __getitem__(self, cpv):
4259 @returns: An object with counter and atoms attributes.
4261 return self.BlockerData(*self._cache_data["blockers"][cpv])
4263 class BlockerDB(object):
4265 def __init__(self, root_config):
4266 self._root_config = root_config
4267 self._vartree = root_config.trees["vartree"]
4268 self._portdb = root_config.trees["porttree"].dbapi
4270 self._dep_check_trees = None
4271 self._fake_vartree = None
4273 def _get_fake_vartree(self, acquire_lock=0):
4274 fake_vartree = self._fake_vartree
4275 if fake_vartree is None:
4276 fake_vartree = FakeVartree(self._root_config,
4277 acquire_lock=acquire_lock)
4278 self._fake_vartree = fake_vartree
4279 self._dep_check_trees = { self._vartree.root : {
4280 "porttree" : fake_vartree,
4281 "vartree" : fake_vartree,
4284 fake_vartree.sync(acquire_lock=acquire_lock)
4287 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4288 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4289 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4290 settings = self._vartree.settings
4291 stale_cache = set(blocker_cache)
4292 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4293 dep_check_trees = self._dep_check_trees
4294 vardb = fake_vartree.dbapi
4295 installed_pkgs = list(vardb)
4297 for inst_pkg in installed_pkgs:
4298 stale_cache.discard(inst_pkg.cpv)
4299 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4300 if cached_blockers is not None and \
4301 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4302 cached_blockers = None
4303 if cached_blockers is not None:
4304 blocker_atoms = cached_blockers.atoms
4306 # Use aux_get() to trigger FakeVartree global
4307 # updates on *DEPEND when appropriate.
4308 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4310 portage.dep._dep_check_strict = False
4311 success, atoms = portage.dep_check(depstr,
4312 vardb, settings, myuse=inst_pkg.use.enabled,
4313 trees=dep_check_trees, myroot=inst_pkg.root)
4315 portage.dep._dep_check_strict = True
4317 pkg_location = os.path.join(inst_pkg.root,
4318 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4319 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4320 (pkg_location, atoms), noiselevel=-1)
4323 blocker_atoms = [atom for atom in atoms \
4324 if atom.startswith("!")]
4325 blocker_atoms.sort()
4326 counter = long(inst_pkg.metadata["COUNTER"])
4327 blocker_cache[inst_pkg.cpv] = \
4328 blocker_cache.BlockerData(counter, blocker_atoms)
4329 for cpv in stale_cache:
4330 del blocker_cache[cpv]
4331 blocker_cache.flush()
4333 blocker_parents = digraph()
4335 for pkg in installed_pkgs:
4336 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4337 blocker_atom = blocker_atom.lstrip("!")
4338 blocker_atoms.append(blocker_atom)
4339 blocker_parents.add(blocker_atom, pkg)
4341 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4342 blocking_pkgs = set()
4343 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4344 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4346 # Check for blockers in the other direction.
4347 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4349 portage.dep._dep_check_strict = False
4350 success, atoms = portage.dep_check(depstr,
4351 vardb, settings, myuse=new_pkg.use.enabled,
4352 trees=dep_check_trees, myroot=new_pkg.root)
4354 portage.dep._dep_check_strict = True
4356 # We should never get this far with invalid deps.
4357 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4360 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4363 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4364 for inst_pkg in installed_pkgs:
4366 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4367 except (portage.exception.InvalidDependString, StopIteration):
4369 blocking_pkgs.add(inst_pkg)
4371 return blocking_pkgs
4373 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4375 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4376 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4377 p_type, p_root, p_key, p_status = parent_node
4379 if p_status == "nomerge":
4380 category, pf = portage.catsplit(p_key)
4381 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4382 msg.append("Portage is unable to process the dependencies of the ")
4383 msg.append("'%s' package. " % p_key)
4384 msg.append("In order to correct this problem, the package ")
4385 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4386 msg.append("As a temporary workaround, the --nodeps option can ")
4387 msg.append("be used to ignore all dependencies. For reference, ")
4388 msg.append("the problematic dependencies can be found in the ")
4389 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4391 msg.append("This package can not be installed. ")
4392 msg.append("Please notify the '%s' package maintainer " % p_key)
4393 msg.append("about this problem.")
4395 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4396 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4398 class PackageVirtualDbapi(portage.dbapi):
4400 A dbapi-like interface class that represents the state of the installed
4401 package database as new packages are installed, replacing any packages
4402 that previously existed in the same slot. The main difference between
4403 this class and fakedbapi is that this one uses Package instances
4404 internally (passed in via cpv_inject() and cpv_remove() calls).
4406 def __init__(self, settings):
4407 portage.dbapi.__init__(self)
4408 self.settings = settings
4409 self._match_cache = {}
4415 Remove all packages.
4419 self._cp_map.clear()
4420 self._cpv_map.clear()
4423 obj = PackageVirtualDbapi(self.settings)
4424 obj._match_cache = self._match_cache.copy()
4425 obj._cp_map = self._cp_map.copy()
4426 for k, v in obj._cp_map.iteritems():
4427 obj._cp_map[k] = v[:]
4428 obj._cpv_map = self._cpv_map.copy()
4432 return self._cpv_map.itervalues()
4434 def __contains__(self, item):
4435 existing = self._cpv_map.get(item.cpv)
4436 if existing is not None and \
4441 def get(self, item, default=None):
4442 cpv = getattr(item, "cpv", None)
4446 type_name, root, cpv, operation = item
4448 existing = self._cpv_map.get(cpv)
4449 if existing is not None and \
4454 def match_pkgs(self, atom):
4455 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4457 def _clear_cache(self):
4458 if self._categories is not None:
4459 self._categories = None
4460 if self._match_cache:
4461 self._match_cache = {}
4463 def match(self, origdep, use_cache=1):
4464 result = self._match_cache.get(origdep)
4465 if result is not None:
4467 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4468 self._match_cache[origdep] = result
4471 def cpv_exists(self, cpv):
4472 return cpv in self._cpv_map
4474 def cp_list(self, mycp, use_cache=1):
4475 cachelist = self._match_cache.get(mycp)
4476 # cp_list() doesn't expand old-style virtuals
4477 if cachelist and cachelist[0].startswith(mycp):
4479 cpv_list = self._cp_map.get(mycp)
4480 if cpv_list is None:
4483 cpv_list = [pkg.cpv for pkg in cpv_list]
4484 self._cpv_sort_ascending(cpv_list)
4485 if not (not cpv_list and mycp.startswith("virtual/")):
4486 self._match_cache[mycp] = cpv_list
4490 return list(self._cp_map)
4493 return list(self._cpv_map)
4495 def cpv_inject(self, pkg):
4496 cp_list = self._cp_map.get(pkg.cp)
4499 self._cp_map[pkg.cp] = cp_list
4500 e_pkg = self._cpv_map.get(pkg.cpv)
4501 if e_pkg is not None:
4504 self.cpv_remove(e_pkg)
4505 for e_pkg in cp_list:
4506 if e_pkg.slot_atom == pkg.slot_atom:
4509 self.cpv_remove(e_pkg)
4512 self._cpv_map[pkg.cpv] = pkg
4515 def cpv_remove(self, pkg):
4516 old_pkg = self._cpv_map.get(pkg.cpv)
4519 self._cp_map[pkg.cp].remove(pkg)
4520 del self._cpv_map[pkg.cpv]
4523 def aux_get(self, cpv, wants):
4524 metadata = self._cpv_map[cpv].metadata
4525 return [metadata.get(x, "") for x in wants]
4527 def aux_update(self, cpv, values):
4528 self._cpv_map[cpv].metadata.update(values)
4531 class depgraph(object):
4533 pkg_tree_map = RootConfig.pkg_tree_map
4535 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4537 def __init__(self, settings, trees, myopts, myparams, spinner):
4538 self.settings = settings
4539 self.target_root = settings["ROOT"]
4540 self.myopts = myopts
4541 self.myparams = myparams
4543 if settings.get("PORTAGE_DEBUG", "") == "1":
4545 self.spinner = spinner
4546 self._running_root = trees["/"]["root_config"]
4547 self._opts_no_restart = Scheduler._opts_no_restart
4548 self.pkgsettings = {}
4549 # Maps slot atom to package for each Package added to the graph.
4550 self._slot_pkg_map = {}
4551 # Maps nodes to the reasons they were selected for reinstallation.
4552 self._reinstall_nodes = {}
4555 self._trees_orig = trees
4557 # Contains a filtered view of preferred packages that are selected
4558 # from available repositories.
4559 self._filtered_trees = {}
4560 # Contains installed packages and new packages that have been added
4562 self._graph_trees = {}
4563 # All Package instances
4564 self._pkg_cache = {}
4565 for myroot in trees:
4566 self.trees[myroot] = {}
4567 # Create a RootConfig instance that references
4568 # the FakeVartree instead of the real one.
4569 self.roots[myroot] = RootConfig(
4570 trees[myroot]["vartree"].settings,
4572 trees[myroot]["root_config"].setconfig)
4573 for tree in ("porttree", "bintree"):
4574 self.trees[myroot][tree] = trees[myroot][tree]
4575 self.trees[myroot]["vartree"] = \
4576 FakeVartree(trees[myroot]["root_config"],
4577 pkg_cache=self._pkg_cache)
4578 self.pkgsettings[myroot] = portage.config(
4579 clone=self.trees[myroot]["vartree"].settings)
4580 self._slot_pkg_map[myroot] = {}
4581 vardb = self.trees[myroot]["vartree"].dbapi
4582 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4583 "--buildpkgonly" not in self.myopts
4584 # This fakedbapi instance will model the state that the vdb will
4585 # have after new packages have been installed.
4586 fakedb = PackageVirtualDbapi(vardb.settings)
4587 if preload_installed_pkgs:
4589 self.spinner.update()
4590 # This triggers metadata updates via FakeVartree.
4591 vardb.aux_get(pkg.cpv, [])
4592 fakedb.cpv_inject(pkg)
4594 # Now that the vardb state is cached in our FakeVartree,
4595 # we won't be needing the real vartree cache for awhile.
4596 # To make some room on the heap, clear the vardbapi
4598 trees[myroot]["vartree"].dbapi._clear_cache()
4601 self.mydbapi[myroot] = fakedb
4604 graph_tree.dbapi = fakedb
4605 self._graph_trees[myroot] = {}
4606 self._filtered_trees[myroot] = {}
4607 # Substitute the graph tree for the vartree in dep_check() since we
4608 # want atom selections to be consistent with package selections
4609 # have already been made.
4610 self._graph_trees[myroot]["porttree"] = graph_tree
4611 self._graph_trees[myroot]["vartree"] = graph_tree
4612 def filtered_tree():
4614 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4615 self._filtered_trees[myroot]["porttree"] = filtered_tree
4617 # Passing in graph_tree as the vartree here could lead to better
4618 # atom selections in some cases by causing atoms for packages that
4619 # have been added to the graph to be preferred over other choices.
4620 # However, it can trigger atom selections that result in
4621 # unresolvable direct circular dependencies. For example, this
4622 # happens with gwydion-dylan which depends on either itself or
4623 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4624 # gwydion-dylan-bin needs to be selected in order to avoid a
4625 # an unresolvable direct circular dependency.
4627 # To solve the problem described above, pass in "graph_db" so that
4628 # packages that have been added to the graph are distinguishable
4629 # from other available packages and installed packages. Also, pass
4630 # the parent package into self._select_atoms() calls so that
4631 # unresolvable direct circular dependencies can be detected and
4632 # avoided when possible.
4633 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4634 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4637 portdb = self.trees[myroot]["porttree"].dbapi
4638 bindb = self.trees[myroot]["bintree"].dbapi
4639 vardb = self.trees[myroot]["vartree"].dbapi
4640 # (db, pkg_type, built, installed, db_keys)
4641 if "--usepkgonly" not in self.myopts:
4642 db_keys = list(portdb._aux_cache_keys)
4643 dbs.append((portdb, "ebuild", False, False, db_keys))
4644 if "--usepkg" in self.myopts:
4645 db_keys = list(bindb._aux_cache_keys)
4646 dbs.append((bindb, "binary", True, False, db_keys))
4647 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4648 dbs.append((vardb, "installed", True, True, db_keys))
4649 self._filtered_trees[myroot]["dbs"] = dbs
4650 if "--usepkg" in self.myopts:
4651 self.trees[myroot]["bintree"].populate(
4652 "--getbinpkg" in self.myopts,
4653 "--getbinpkgonly" in self.myopts)
4656 self.digraph=portage.digraph()
4657 # contains all sets added to the graph
4659 # contains atoms given as arguments
4660 self._sets["args"] = InternalPackageSet()
4661 # contains all atoms from all sets added to the graph, including
4662 # atoms given as arguments
4663 self._set_atoms = InternalPackageSet()
4664 self._atom_arg_map = {}
4665 # contains all nodes pulled in by self._set_atoms
4666 self._set_nodes = set()
4667 # Contains only Blocker -> Uninstall edges
4668 self._blocker_uninstalls = digraph()
4669 # Contains only Package -> Blocker edges
4670 self._blocker_parents = digraph()
4671 # Contains only irrelevant Package -> Blocker edges
4672 self._irrelevant_blockers = digraph()
4673 # Contains only unsolvable Package -> Blocker edges
4674 self._unsolvable_blockers = digraph()
4675 # Contains all Blocker -> Blocked Package edges
4676 self._blocked_pkgs = digraph()
4677 # Contains world packages that have been protected from
4678 # uninstallation but may not have been added to the graph
4679 # if the graph is not complete yet.
4680 self._blocked_world_pkgs = {}
4681 self._slot_collision_info = {}
4682 # Slot collision nodes are not allowed to block other packages since
4683 # blocker validation is only able to account for one package per slot.
4684 self._slot_collision_nodes = set()
4685 self._parent_atoms = {}
4686 self._slot_conflict_parent_atoms = set()
4687 self._serialized_tasks_cache = None
4688 self._scheduler_graph = None
4689 self._displayed_list = None
4690 self._pprovided_args = []
4691 self._missing_args = []
4692 self._masked_installed = set()
4693 self._unsatisfied_deps_for_display = []
4694 self._unsatisfied_blockers_for_display = None
4695 self._circular_deps_for_display = None
4696 self._dep_stack = []
4697 self._unsatisfied_deps = []
4698 self._initially_unsatisfied_deps = []
4699 self._ignored_deps = []
4700 self._required_set_names = set(["system", "world"])
4701 self._select_atoms = self._select_atoms_highest_available
4702 self._select_package = self._select_pkg_highest_available
4703 self._highest_pkg_cache = {}
4705 def _show_slot_collision_notice(self):
4706 """Show an informational message advising the user to mask one of the
4707 the packages. In some cases it may be possible to resolve this
4708 automatically, but support for backtracking (removal nodes that have
4709 already been selected) will be required in order to handle all possible
4713 if not self._slot_collision_info:
4716 self._show_merge_list()
4719 msg.append("\n!!! Multiple package instances within a single " + \
4720 "package slot have been pulled\n")
4721 msg.append("!!! into the dependency graph, resulting" + \
4722 " in a slot conflict:\n\n")
4724 # Max number of parents shown, to avoid flooding the display.
4726 explanation_columns = 70
4728 for (slot_atom, root), slot_nodes \
4729 in self._slot_collision_info.iteritems():
4730 msg.append(str(slot_atom))
4733 for node in slot_nodes:
4735 msg.append(str(node))
4736 parent_atoms = self._parent_atoms.get(node)
4739 # Prefer conflict atoms over others.
4740 for parent_atom in parent_atoms:
4741 if len(pruned_list) >= max_parents:
4743 if parent_atom in self._slot_conflict_parent_atoms:
4744 pruned_list.add(parent_atom)
4746 # If this package was pulled in by conflict atoms then
4747 # show those alone since those are the most interesting.
4749 # When generating the pruned list, prefer instances
4750 # of DependencyArg over instances of Package.
4751 for parent_atom in parent_atoms:
4752 if len(pruned_list) >= max_parents:
4754 parent, atom = parent_atom
4755 if isinstance(parent, DependencyArg):
4756 pruned_list.add(parent_atom)
4757 # Prefer Packages instances that themselves have been
4758 # pulled into collision slots.
4759 for parent_atom in parent_atoms:
4760 if len(pruned_list) >= max_parents:
4762 parent, atom = parent_atom
4763 if isinstance(parent, Package) and \
4764 (parent.slot_atom, parent.root) \
4765 in self._slot_collision_info:
4766 pruned_list.add(parent_atom)
4767 for parent_atom in parent_atoms:
4768 if len(pruned_list) >= max_parents:
4770 pruned_list.add(parent_atom)
4771 omitted_parents = len(parent_atoms) - len(pruned_list)
4772 parent_atoms = pruned_list
4773 msg.append(" pulled in by\n")
4774 for parent_atom in parent_atoms:
4775 parent, atom = parent_atom
4776 msg.append(2*indent)
4777 if isinstance(parent,
4778 (PackageArg, AtomArg)):
4779 # For PackageArg and AtomArg types, it's
4780 # redundant to display the atom attribute.
4781 msg.append(str(parent))
4783 # Display the specific atom from SetArg or
4785 msg.append("%s required by %s" % (atom, parent))
4788 msg.append(2*indent)
4789 msg.append("(and %d more)\n" % omitted_parents)
4791 msg.append(" (no parents)\n")
4793 explanation = self._slot_conflict_explanation(slot_nodes)
4796 msg.append(indent + "Explanation:\n\n")
4797 for line in textwrap.wrap(explanation, explanation_columns):
4798 msg.append(2*indent + line + "\n")
4801 sys.stderr.write("".join(msg))
4804 explanations_for_all = explanations == len(self._slot_collision_info)
4806 if explanations_for_all or "--quiet" in self.myopts:
4810 msg.append("It may be possible to solve this problem ")
4811 msg.append("by using package.mask to prevent one of ")
4812 msg.append("those packages from being selected. ")
4813 msg.append("However, it is also possible that conflicting ")
4814 msg.append("dependencies exist such that they are impossible to ")
4815 msg.append("satisfy simultaneously. If such a conflict exists in ")
4816 msg.append("the dependencies of two different packages, then those ")
4817 msg.append("packages can not be installed simultaneously.")
4819 from formatter import AbstractFormatter, DumbWriter
4820 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4822 f.add_flowing_data(x)
4826 msg.append("For more information, see MASKED PACKAGES ")
4827 msg.append("section in the emerge man page or refer ")
4828 msg.append("to the Gentoo Handbook.")
4830 f.add_flowing_data(x)
4834 def _slot_conflict_explanation(self, slot_nodes):
4836 When a slot conflict occurs due to USE deps, there are a few
4837 different cases to consider:
4839 1) New USE are correctly set but --newuse wasn't requested so an
4840 installed package with incorrect USE happened to get pulled
4841 into graph before the new one.
4843 2) New USE are incorrectly set but an installed package has correct
4844 USE so it got pulled into the graph, and a new instance also got
4845 pulled in due to --newuse or an upgrade.
4847 3) Multiple USE deps exist that can't be satisfied simultaneously,
4848 and multiple package instances got pulled into the same slot to
4849 satisfy the conflicting deps.
4851 Currently, explanations and suggested courses of action are generated
4852 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4855 if len(slot_nodes) != 2:
4856 # Suggestions are only implemented for
4857 # conflicts between two packages.
4860 all_conflict_atoms = self._slot_conflict_parent_atoms
4862 matched_atoms = None
4863 unmatched_node = None
4864 for node in slot_nodes:
4865 parent_atoms = self._parent_atoms.get(node)
4866 if not parent_atoms:
4867 # Normally, there are always parent atoms. If there are
4868 # none then something unexpected is happening and there's
4869 # currently no suggestion for this case.
4871 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4872 for parent_atom in conflict_atoms:
4873 parent, atom = parent_atom
4875 # Suggestions are currently only implemented for cases
4876 # in which all conflict atoms have USE deps.
4879 if matched_node is not None:
4880 # If conflict atoms match multiple nodes
4881 # then there's no suggestion.
4884 matched_atoms = conflict_atoms
4886 if unmatched_node is not None:
4887 # Neither node is matched by conflict atoms, and
4888 # there is no suggestion for this case.
4890 unmatched_node = node
4892 if matched_node is None or unmatched_node is None:
4893 # This shouldn't happen.
4896 if unmatched_node.installed and not matched_node.installed and \
4897 unmatched_node.cpv == matched_node.cpv:
4898 # If the conflicting packages are the same version then
4899 # --newuse should be all that's needed. If they are different
4900 # versions then there's some other problem.
4901 return "New USE are correctly set, but --newuse wasn't" + \
4902 " requested, so an installed package with incorrect USE " + \
4903 "happened to get pulled into the dependency graph. " + \
4904 "In order to solve " + \
4905 "this, either specify the --newuse option or explicitly " + \
4906 " reinstall '%s'." % matched_node.slot_atom
4908 if matched_node.installed and not unmatched_node.installed:
4909 atoms = sorted(set(atom for parent, atom in matched_atoms))
4910 explanation = ("New USE for '%s' are incorrectly set. " + \
4911 "In order to solve this, adjust USE to satisfy '%s'") % \
4912 (matched_node.slot_atom, atoms[0])
4914 for atom in atoms[1:-1]:
4915 explanation += ", '%s'" % (atom,)
4918 explanation += " and '%s'" % (atoms[-1],)
4924 def _process_slot_conflicts(self):
4926 Process slot conflict data to identify specific atoms which
4927 lead to conflict. These atoms only match a subset of the
4928 packages that have been pulled into a given slot.
4930 for (slot_atom, root), slot_nodes \
4931 in self._slot_collision_info.iteritems():
4933 all_parent_atoms = set()
4934 for pkg in slot_nodes:
4935 parent_atoms = self._parent_atoms.get(pkg)
4936 if not parent_atoms:
4938 all_parent_atoms.update(parent_atoms)
4940 for pkg in slot_nodes:
4941 parent_atoms = self._parent_atoms.get(pkg)
4942 if parent_atoms is None:
4943 parent_atoms = set()
4944 self._parent_atoms[pkg] = parent_atoms
4945 for parent_atom in all_parent_atoms:
4946 if parent_atom in parent_atoms:
4948 # Use package set for matching since it will match via
4949 # PROVIDE when necessary, while match_from_list does not.
4950 parent, atom = parent_atom
4951 atom_set = InternalPackageSet(
4952 initial_atoms=(atom,))
4953 if atom_set.findAtomForPackage(pkg):
4954 parent_atoms.add(parent_atom)
4956 self._slot_conflict_parent_atoms.add(parent_atom)
4958 def _reinstall_for_flags(self, forced_flags,
4959 orig_use, orig_iuse, cur_use, cur_iuse):
4960 """Return a set of flags that trigger reinstallation, or None if there
4961 are no such flags."""
4962 if "--newuse" in self.myopts:
4963 flags = set(orig_iuse.symmetric_difference(
4964 cur_iuse).difference(forced_flags))
4965 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4966 cur_iuse.intersection(cur_use)))
4969 elif "changed-use" == self.myopts.get("--reinstall"):
4970 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4971 cur_iuse.intersection(cur_use))
4976 def _create_graph(self, allow_unsatisfied=False):
4977 dep_stack = self._dep_stack
4979 self.spinner.update()
4980 dep = dep_stack.pop()
4981 if isinstance(dep, Package):
4982 if not self._add_pkg_deps(dep,
4983 allow_unsatisfied=allow_unsatisfied):
4986 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4990 def _add_dep(self, dep, allow_unsatisfied=False):
4991 debug = "--debug" in self.myopts
4992 buildpkgonly = "--buildpkgonly" in self.myopts
4993 nodeps = "--nodeps" in self.myopts
4994 empty = "empty" in self.myparams
4995 deep = "deep" in self.myparams
4996 update = "--update" in self.myopts and dep.depth <= 1
4998 if not buildpkgonly and \
5000 dep.parent not in self._slot_collision_nodes:
5001 if dep.parent.onlydeps:
5002 # It's safe to ignore blockers if the
5003 # parent is an --onlydeps node.
5005 # The blocker applies to the root where
5006 # the parent is or will be installed.
5007 blocker = Blocker(atom=dep.atom,
5008 eapi=dep.parent.metadata["EAPI"],
5009 root=dep.parent.root)
5010 self._blocker_parents.add(blocker, dep.parent)
5012 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5013 onlydeps=dep.onlydeps)
5015 if dep.priority.optional:
5016 # This could be an unecessary build-time dep
5017 # pulled in by --with-bdeps=y.
5019 if allow_unsatisfied:
5020 self._unsatisfied_deps.append(dep)
5022 self._unsatisfied_deps_for_display.append(
5023 ((dep.root, dep.atom), {"myparent":dep.parent}))
5025 # In some cases, dep_check will return deps that shouldn't
5026 # be proccessed any further, so they are identified and
5027 # discarded here. Try to discard as few as possible since
5028 # discarded dependencies reduce the amount of information
5029 # available for optimization of merge order.
5030 if dep.priority.satisfied and \
5031 not dep_pkg.installed and \
5032 not (existing_node or empty or deep or update):
5034 if dep.root == self.target_root:
5036 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5037 except StopIteration:
5039 except portage.exception.InvalidDependString:
5040 if not dep_pkg.installed:
5041 # This shouldn't happen since the package
5042 # should have been masked.
5045 self._ignored_deps.append(dep)
5048 if not self._add_pkg(dep_pkg, dep):
5052 def _add_pkg(self, pkg, dep):
5059 myparent = dep.parent
5060 priority = dep.priority
5062 if priority is None:
5063 priority = DepPriority()
5065 Fills the digraph with nodes comprised of packages to merge.
5066 mybigkey is the package spec of the package to merge.
5067 myparent is the package depending on mybigkey ( or None )
5068 addme = Should we add this package to the digraph or are we just looking at it's deps?
5069 Think --onlydeps, we need to ignore packages in that case.
5072 #IUSE-aware emerge -> USE DEP aware depgraph
5073 #"no downgrade" emerge
5075 # Ensure that the dependencies of the same package
5076 # are never processed more than once.
5077 previously_added = pkg in self.digraph
5079 # select the correct /var database that we'll be checking against
5080 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5081 pkgsettings = self.pkgsettings[pkg.root]
5086 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5087 except portage.exception.InvalidDependString, e:
5088 if not pkg.installed:
5089 show_invalid_depstring_notice(
5090 pkg, pkg.metadata["PROVIDE"], str(e))
5094 if not pkg.onlydeps:
5095 if not pkg.installed and \
5096 "empty" not in self.myparams and \
5097 vardbapi.match(pkg.slot_atom):
5098 # Increase the priority of dependencies on packages that
5099 # are being rebuilt. This optimizes merge order so that
5100 # dependencies are rebuilt/updated as soon as possible,
5101 # which is needed especially when emerge is called by
5102 # revdep-rebuild since dependencies may be affected by ABI
5103 # breakage that has rendered them useless. Don't adjust
5104 # priority here when in "empty" mode since all packages
5105 # are being merged in that case.
5106 priority.rebuild = True
5108 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5109 slot_collision = False
5111 existing_node_matches = pkg.cpv == existing_node.cpv
5112 if existing_node_matches and \
5113 pkg != existing_node and \
5114 dep.atom is not None:
5115 # Use package set for matching since it will match via
5116 # PROVIDE when necessary, while match_from_list does not.
5117 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5118 if not atom_set.findAtomForPackage(existing_node):
5119 existing_node_matches = False
5120 if existing_node_matches:
5121 # The existing node can be reused.
5123 for parent_atom in arg_atoms:
5124 parent, atom = parent_atom
5125 self.digraph.add(existing_node, parent,
5127 self._add_parent_atom(existing_node, parent_atom)
5128 # If a direct circular dependency is not an unsatisfied
5129 # buildtime dependency then drop it here since otherwise
5130 # it can skew the merge order calculation in an unwanted
5132 if existing_node != myparent or \
5133 (priority.buildtime and not priority.satisfied):
5134 self.digraph.addnode(existing_node, myparent,
5136 if dep.atom is not None and dep.parent is not None:
5137 self._add_parent_atom(existing_node,
5138 (dep.parent, dep.atom))
5142 # A slot collision has occurred. Sometimes this coincides
5143 # with unresolvable blockers, so the slot collision will be
5144 # shown later if there are no unresolvable blockers.
5145 self._add_slot_conflict(pkg)
5146 slot_collision = True
5149 # Now add this node to the graph so that self.display()
5150 # can show use flags and --tree portage.output. This node is
5151 # only being partially added to the graph. It must not be
5152 # allowed to interfere with the other nodes that have been
5153 # added. Do not overwrite data for existing nodes in
5154 # self.mydbapi since that data will be used for blocker
5156 # Even though the graph is now invalid, continue to process
5157 # dependencies so that things like --fetchonly can still
5158 # function despite collisions.
5160 elif not previously_added:
5161 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5162 self.mydbapi[pkg.root].cpv_inject(pkg)
5163 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5165 if not pkg.installed:
5166 # Allow this package to satisfy old-style virtuals in case it
5167 # doesn't already. Any pre-existing providers will be preferred
5170 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5171 # For consistency, also update the global virtuals.
5172 settings = self.roots[pkg.root].settings
5174 settings.setinst(pkg.cpv, pkg.metadata)
5176 except portage.exception.InvalidDependString, e:
5177 show_invalid_depstring_notice(
5178 pkg, pkg.metadata["PROVIDE"], str(e))
5183 self._set_nodes.add(pkg)
5185 # Do this even when addme is False (--onlydeps) so that the
5186 # parent/child relationship is always known in case
5187 # self._show_slot_collision_notice() needs to be called later.
5188 self.digraph.add(pkg, myparent, priority=priority)
5189 if dep.atom is not None and dep.parent is not None:
5190 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5193 for parent_atom in arg_atoms:
5194 parent, atom = parent_atom
5195 self.digraph.add(pkg, parent, priority=priority)
5196 self._add_parent_atom(pkg, parent_atom)
5198 """ This section determines whether we go deeper into dependencies or not.
5199 We want to go deeper on a few occasions:
5200 Installing package A, we need to make sure package A's deps are met.
5201 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5202 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5204 dep_stack = self._dep_stack
5205 if "recurse" not in self.myparams:
5207 elif pkg.installed and \
5208 "deep" not in self.myparams:
5209 dep_stack = self._ignored_deps
5211 self.spinner.update()
5216 if not previously_added:
5217 dep_stack.append(pkg)
5220 def _add_parent_atom(self, pkg, parent_atom):
5221 parent_atoms = self._parent_atoms.get(pkg)
5222 if parent_atoms is None:
5223 parent_atoms = set()
5224 self._parent_atoms[pkg] = parent_atoms
5225 parent_atoms.add(parent_atom)
5227 def _add_slot_conflict(self, pkg):
5228 self._slot_collision_nodes.add(pkg)
5229 slot_key = (pkg.slot_atom, pkg.root)
5230 slot_nodes = self._slot_collision_info.get(slot_key)
5231 if slot_nodes is None:
5233 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5234 self._slot_collision_info[slot_key] = slot_nodes
5237 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5239 mytype = pkg.type_name
5242 metadata = pkg.metadata
5243 myuse = pkg.use.enabled
5245 depth = pkg.depth + 1
5246 removal_action = "remove" in self.myparams
5249 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5251 edepend[k] = metadata[k]
5253 if not pkg.built and \
5254 "--buildpkgonly" in self.myopts and \
5255 "deep" not in self.myparams and \
5256 "empty" not in self.myparams:
5257 edepend["RDEPEND"] = ""
5258 edepend["PDEPEND"] = ""
5259 bdeps_optional = False
5261 if pkg.built and not removal_action:
5262 if self.myopts.get("--with-bdeps", "n") == "y":
5263 # Pull in build time deps as requested, but marked them as
5264 # "optional" since they are not strictly required. This allows
5265 # more freedom in the merge order calculation for solving
5266 # circular dependencies. Don't convert to PDEPEND since that
5267 # could make --with-bdeps=y less effective if it is used to
5268 # adjust merge order to prevent built_with_use() calls from
5270 bdeps_optional = True
5272 # built packages do not have build time dependencies.
5273 edepend["DEPEND"] = ""
5275 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5276 edepend["DEPEND"] = ""
5279 if self.target_root != "/":
5280 if "--root-deps" in self.myopts:
5282 if "--rdeps-only" in self.myopts:
5284 edepend["DEPEND"] = ""
5287 (bdeps_root, edepend["DEPEND"],
5288 self._priority(buildtime=(not bdeps_optional),
5289 optional=bdeps_optional)),
5290 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5291 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5294 debug = "--debug" in self.myopts
5295 strict = mytype != "installed"
5297 for dep_root, dep_string, dep_priority in deps:
5302 print "Parent: ", jbigkey
5303 print "Depstring:", dep_string
5304 print "Priority:", dep_priority
5305 vardb = self.roots[dep_root].trees["vartree"].dbapi
5307 selected_atoms = self._select_atoms(dep_root,
5308 dep_string, myuse=myuse, parent=pkg, strict=strict,
5309 priority=dep_priority)
5310 except portage.exception.InvalidDependString, e:
5311 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5314 print "Candidates:", selected_atoms
5316 for atom in selected_atoms:
5319 atom = portage.dep.Atom(atom)
5321 mypriority = dep_priority.copy()
5322 if not atom.blocker and vardb.match(atom):
5323 mypriority.satisfied = True
5325 if not self._add_dep(Dependency(atom=atom,
5326 blocker=atom.blocker, depth=depth, parent=pkg,
5327 priority=mypriority, root=dep_root),
5328 allow_unsatisfied=allow_unsatisfied):
5331 except portage.exception.InvalidAtom, e:
5332 show_invalid_depstring_notice(
5333 pkg, dep_string, str(e))
5335 if not pkg.installed:
5339 print "Exiting...", jbigkey
5340 except portage.exception.AmbiguousPackageName, e:
5342 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5343 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5345 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5346 portage.writemsg("\n", noiselevel=-1)
5347 if mytype == "binary":
5349 "!!! This binary package cannot be installed: '%s'\n" % \
5350 mykey, noiselevel=-1)
5351 elif mytype == "ebuild":
5352 portdb = self.roots[myroot].trees["porttree"].dbapi
5353 myebuild, mylocation = portdb.findname2(mykey)
5354 portage.writemsg("!!! This ebuild cannot be installed: " + \
5355 "'%s'\n" % myebuild, noiselevel=-1)
5356 portage.writemsg("!!! Please notify the package maintainer " + \
5357 "that atoms must be fully-qualified.\n", noiselevel=-1)
5361 def _priority(self, **kwargs):
5362 if "remove" in self.myparams:
5363 priority_constructor = UnmergeDepPriority
5365 priority_constructor = DepPriority
5366 return priority_constructor(**kwargs)
5368 def _dep_expand(self, root_config, atom_without_category):
5370 @param root_config: a root config instance
5371 @type root_config: RootConfig
5372 @param atom_without_category: an atom without a category component
5373 @type atom_without_category: String
5375 @returns: a list of atoms containing categories (possibly empty)
5377 null_cp = portage.dep_getkey(insert_category_into_atom(
5378 atom_without_category, "null"))
5379 cat, atom_pn = portage.catsplit(null_cp)
5381 dbs = self._filtered_trees[root_config.root]["dbs"]
5383 for db, pkg_type, built, installed, db_keys in dbs:
5384 for cat in db.categories:
5385 if db.cp_list("%s/%s" % (cat, atom_pn)):
5389 for cat in categories:
5390 deps.append(insert_category_into_atom(
5391 atom_without_category, cat))
5394 def _have_new_virt(self, root, atom_cp):
5396 for db, pkg_type, built, installed, db_keys in \
5397 self._filtered_trees[root]["dbs"]:
5398 if db.cp_list(atom_cp):
5403 def _iter_atoms_for_pkg(self, pkg):
5404 # TODO: add multiple $ROOT support
5405 if pkg.root != self.target_root:
5407 atom_arg_map = self._atom_arg_map
5408 root_config = self.roots[pkg.root]
5409 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5410 atom_cp = portage.dep_getkey(atom)
5411 if atom_cp != pkg.cp and \
5412 self._have_new_virt(pkg.root, atom_cp):
5414 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5415 visible_pkgs.reverse() # descending order
5417 for visible_pkg in visible_pkgs:
5418 if visible_pkg.cp != atom_cp:
5420 if pkg >= visible_pkg:
5421 # This is descending order, and we're not
5422 # interested in any versions <= pkg given.
5424 if pkg.slot_atom != visible_pkg.slot_atom:
5425 higher_slot = visible_pkg
5427 if higher_slot is not None:
5429 for arg in atom_arg_map[(atom, pkg.root)]:
5430 if isinstance(arg, PackageArg) and \
5435 def select_files(self, myfiles):
5436 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5437 appropriate depgraph and return a favorite list."""
5438 debug = "--debug" in self.myopts
5439 root_config = self.roots[self.target_root]
5440 sets = root_config.sets
5441 getSetAtoms = root_config.setconfig.getSetAtoms
5443 myroot = self.target_root
5444 dbs = self._filtered_trees[myroot]["dbs"]
5445 vardb = self.trees[myroot]["vartree"].dbapi
5446 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5447 portdb = self.trees[myroot]["porttree"].dbapi
5448 bindb = self.trees[myroot]["bintree"].dbapi
5449 pkgsettings = self.pkgsettings[myroot]
5451 onlydeps = "--onlydeps" in self.myopts
5454 ext = os.path.splitext(x)[1]
5456 if not os.path.exists(x):
5458 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5459 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5460 elif os.path.exists(
5461 os.path.join(pkgsettings["PKGDIR"], x)):
5462 x = os.path.join(pkgsettings["PKGDIR"], x)
5464 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5465 print "!!! Please ensure the tbz2 exists as specified.\n"
5466 return 0, myfavorites
5467 mytbz2=portage.xpak.tbz2(x)
5468 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5469 if os.path.realpath(x) != \
5470 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5471 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5472 return 0, myfavorites
5473 db_keys = list(bindb._aux_cache_keys)
5474 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5475 pkg = Package(type_name="binary", root_config=root_config,
5476 cpv=mykey, built=True, metadata=metadata,
5478 self._pkg_cache[pkg] = pkg
5479 args.append(PackageArg(arg=x, package=pkg,
5480 root_config=root_config))
5481 elif ext==".ebuild":
5482 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5483 pkgdir = os.path.dirname(ebuild_path)
5484 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5485 cp = pkgdir[len(tree_root)+1:]
5486 e = portage.exception.PackageNotFound(
5487 ("%s is not in a valid portage tree " + \
5488 "hierarchy or does not exist") % x)
5489 if not portage.isvalidatom(cp):
5491 cat = portage.catsplit(cp)[0]
5492 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5493 if not portage.isvalidatom("="+mykey):
5495 ebuild_path = portdb.findname(mykey)
5497 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5498 cp, os.path.basename(ebuild_path)):
5499 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5500 return 0, myfavorites
5501 if mykey not in portdb.xmatch(
5502 "match-visible", portage.dep_getkey(mykey)):
5503 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5504 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5505 print colorize("BAD", "*** page for details.")
5506 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5509 raise portage.exception.PackageNotFound(
5510 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5511 db_keys = list(portdb._aux_cache_keys)
5512 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5513 pkg = Package(type_name="ebuild", root_config=root_config,
5514 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5515 pkgsettings.setcpv(pkg)
5516 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5517 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5518 self._pkg_cache[pkg] = pkg
5519 args.append(PackageArg(arg=x, package=pkg,
5520 root_config=root_config))
5521 elif x.startswith(os.path.sep):
5522 if not x.startswith(myroot):
5523 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5524 " $ROOT.\n") % x, noiselevel=-1)
5526 # Queue these up since it's most efficient to handle
5527 # multiple files in a single iter_owners() call.
5528 lookup_owners.append(x)
5530 if x in ("system", "world"):
5532 if x.startswith(SETPREFIX):
5533 s = x[len(SETPREFIX):]
5535 raise portage.exception.PackageSetNotFound(s)
5538 # Recursively expand sets so that containment tests in
5539 # self._get_parent_sets() properly match atoms in nested
5540 # sets (like if world contains system).
5541 expanded_set = InternalPackageSet(
5542 initial_atoms=getSetAtoms(s))
5543 self._sets[s] = expanded_set
5544 args.append(SetArg(arg=x, set=expanded_set,
5545 root_config=root_config))
5547 if not is_valid_package_atom(x):
5548 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5550 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5551 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5553 # Don't expand categories or old-style virtuals here unless
5554 # necessary. Expansion of old-style virtuals here causes at
5555 # least the following problems:
5556 # 1) It's more difficult to determine which set(s) an atom
5557 # came from, if any.
5558 # 2) It takes away freedom from the resolver to choose other
5559 # possible expansions when necessary.
5561 args.append(AtomArg(arg=x, atom=x,
5562 root_config=root_config))
5564 expanded_atoms = self._dep_expand(root_config, x)
5565 installed_cp_set = set()
5566 for atom in expanded_atoms:
5567 atom_cp = portage.dep_getkey(atom)
5568 if vardb.cp_list(atom_cp):
5569 installed_cp_set.add(atom_cp)
5570 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5571 installed_cp = iter(installed_cp_set).next()
5572 expanded_atoms = [atom for atom in expanded_atoms \
5573 if portage.dep_getkey(atom) == installed_cp]
5575 if len(expanded_atoms) > 1:
5578 ambiguous_package_name(x, expanded_atoms, root_config,
5579 self.spinner, self.myopts)
5580 return False, myfavorites
5582 atom = expanded_atoms[0]
5584 null_atom = insert_category_into_atom(x, "null")
5585 null_cp = portage.dep_getkey(null_atom)
5586 cat, atom_pn = portage.catsplit(null_cp)
5587 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5589 # Allow the depgraph to choose which virtual.
5590 atom = insert_category_into_atom(x, "virtual")
5592 atom = insert_category_into_atom(x, "null")
5594 args.append(AtomArg(arg=x, atom=atom,
5595 root_config=root_config))
5599 search_for_multiple = False
5600 if len(lookup_owners) > 1:
5601 search_for_multiple = True
5603 for x in lookup_owners:
5604 if not search_for_multiple and os.path.isdir(x):
5605 search_for_multiple = True
5606 relative_paths.append(x[len(myroot):])
5609 for pkg, relative_path in \
5610 real_vardb._owners.iter_owners(relative_paths):
5611 owners.add(pkg.mycpv)
5612 if not search_for_multiple:
5616 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5617 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5621 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5623 # portage now masks packages with missing slot, but it's
5624 # possible that one was installed by an older version
5625 atom = portage.cpv_getkey(cpv)
5627 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5628 args.append(AtomArg(arg=atom, atom=atom,
5629 root_config=root_config))
5631 if "--update" in self.myopts:
5632 # In some cases, the greedy slots behavior can pull in a slot that
5633 # the user would want to uninstall due to it being blocked by a
5634 # newer version in a different slot. Therefore, it's necessary to
5635 # detect and discard any that should be uninstalled. Each time
5636 # that arguments are updated, package selections are repeated in
5637 # order to ensure consistency with the current arguments:
5639 # 1) Initialize args
5640 # 2) Select packages and generate initial greedy atoms
5641 # 3) Update args with greedy atoms
5642 # 4) Select packages and generate greedy atoms again, while
5643 # accounting for any blockers between selected packages
5644 # 5) Update args with revised greedy atoms
5646 self._set_args(args)
5649 greedy_args.append(arg)
5650 if not isinstance(arg, AtomArg):
5652 for atom in self._greedy_slots(arg.root_config, arg.atom):
5654 AtomArg(arg=arg.arg, atom=atom,
5655 root_config=arg.root_config))
5657 self._set_args(greedy_args)
5660 # Revise greedy atoms, accounting for any blockers
5661 # between selected packages.
5662 revised_greedy_args = []
5664 revised_greedy_args.append(arg)
5665 if not isinstance(arg, AtomArg):
5667 for atom in self._greedy_slots(arg.root_config, arg.atom,
5668 blocker_lookahead=True):
5669 revised_greedy_args.append(
5670 AtomArg(arg=arg.arg, atom=atom,
5671 root_config=arg.root_config))
5672 args = revised_greedy_args
5673 del revised_greedy_args
5675 self._set_args(args)
5677 myfavorites = set(myfavorites)
5679 if isinstance(arg, (AtomArg, PackageArg)):
5680 myfavorites.add(arg.atom)
5681 elif isinstance(arg, SetArg):
5682 myfavorites.add(arg.arg)
5683 myfavorites = list(myfavorites)
5685 pprovideddict = pkgsettings.pprovideddict
5687 portage.writemsg("\n", noiselevel=-1)
5688 # Order needs to be preserved since a feature of --nodeps
5689 # is to allow the user to force a specific merge order.
5693 for atom in arg.set:
5694 self.spinner.update()
5695 dep = Dependency(atom=atom, onlydeps=onlydeps,
5696 root=myroot, parent=arg)
5697 atom_cp = portage.dep_getkey(atom)
5699 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5700 if pprovided and portage.match_from_list(atom, pprovided):
5701 # A provided package has been specified on the command line.
5702 self._pprovided_args.append((arg, atom))
5704 if isinstance(arg, PackageArg):
5705 if not self._add_pkg(arg.package, dep) or \
5706 not self._create_graph():
5707 sys.stderr.write(("\n\n!!! Problem resolving " + \
5708 "dependencies for %s\n") % arg.arg)
5709 return 0, myfavorites
5712 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5713 (arg, atom), noiselevel=-1)
5714 pkg, existing_node = self._select_package(
5715 myroot, atom, onlydeps=onlydeps)
5717 if not (isinstance(arg, SetArg) and \
5718 arg.name in ("system", "world")):
5719 self._unsatisfied_deps_for_display.append(
5720 ((myroot, atom), {}))
5721 return 0, myfavorites
5722 self._missing_args.append((arg, atom))
5724 if atom_cp != pkg.cp:
5725 # For old-style virtuals, we need to repeat the
5726 # package.provided check against the selected package.
5727 expanded_atom = atom.replace(atom_cp, pkg.cp)
5728 pprovided = pprovideddict.get(pkg.cp)
5730 portage.match_from_list(expanded_atom, pprovided):
5731 # A provided package has been
5732 # specified on the command line.
5733 self._pprovided_args.append((arg, atom))
5735 if pkg.installed and "selective" not in self.myparams:
5736 self._unsatisfied_deps_for_display.append(
5737 ((myroot, atom), {}))
5738 # Previous behavior was to bail out in this case, but
5739 # since the dep is satisfied by the installed package,
5740 # it's more friendly to continue building the graph
5741 # and just show a warning message. Therefore, only bail
5742 # out here if the atom is not from either the system or
5744 if not (isinstance(arg, SetArg) and \
5745 arg.name in ("system", "world")):
5746 return 0, myfavorites
5748 # Add the selected package to the graph as soon as possible
5749 # so that later dep_check() calls can use it as feedback
5750 # for making more consistent atom selections.
5751 if not self._add_pkg(pkg, dep):
5752 if isinstance(arg, SetArg):
5753 sys.stderr.write(("\n\n!!! Problem resolving " + \
5754 "dependencies for %s from %s\n") % \
5757 sys.stderr.write(("\n\n!!! Problem resolving " + \
5758 "dependencies for %s\n") % atom)
5759 return 0, myfavorites
5761 except portage.exception.MissingSignature, e:
5762 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5763 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5764 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5765 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5766 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5767 return 0, myfavorites
5768 except portage.exception.InvalidSignature, e:
5769 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5770 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5771 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5772 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5773 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5774 return 0, myfavorites
5775 except SystemExit, e:
5776 raise # Needed else can't exit
5777 except Exception, e:
5778 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5779 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5782 # Now that the root packages have been added to the graph,
5783 # process the dependencies.
5784 if not self._create_graph():
5785 return 0, myfavorites
5788 if "--usepkgonly" in self.myopts:
5789 for xs in self.digraph.all_nodes():
5790 if not isinstance(xs, Package):
5792 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5796 print "Missing binary for:",xs[2]
5800 except self._unknown_internal_error:
5801 return False, myfavorites
5803 # We're true here unless we are missing binaries.
5804 return (not missing,myfavorites)
5806 def _set_args(self, args):
5808 Create the "args" package set from atoms and packages given as
5809 arguments. This method can be called multiple times if necessary.
5810 The package selection cache is automatically invalidated, since
5811 arguments influence package selections.
5813 args_set = self._sets["args"]
5816 if not isinstance(arg, (AtomArg, PackageArg)):
5819 if atom in args_set:
5823 self._set_atoms.clear()
5824 self._set_atoms.update(chain(*self._sets.itervalues()))
5825 atom_arg_map = self._atom_arg_map
5826 atom_arg_map.clear()
5828 for atom in arg.set:
5829 atom_key = (atom, arg.root_config.root)
5830 refs = atom_arg_map.get(atom_key)
5833 atom_arg_map[atom_key] = refs
5837 # Invalidate the package selection cache, since
5838 # arguments influence package selections.
5839 self._highest_pkg_cache.clear()
5840 for trees in self._filtered_trees.itervalues():
5841 trees["porttree"].dbapi._clear_cache()
5843 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5845 Return a list of slot atoms corresponding to installed slots that
5846 differ from the slot of the highest visible match. When
5847 blocker_lookahead is True, slot atoms that would trigger a blocker
5848 conflict are automatically discarded, potentially allowing automatic
5849 uninstallation of older slots when appropriate.
5851 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5852 if highest_pkg is None:
5854 vardb = root_config.trees["vartree"].dbapi
5856 for cpv in vardb.match(atom):
5857 # don't mix new virtuals with old virtuals
5858 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5859 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5861 slots.add(highest_pkg.metadata["SLOT"])
5865 slots.remove(highest_pkg.metadata["SLOT"])
5868 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5869 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5870 if pkg is not None and \
5871 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5872 greedy_pkgs.append(pkg)
5875 if not blocker_lookahead:
5876 return [pkg.slot_atom for pkg in greedy_pkgs]
5879 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5880 for pkg in greedy_pkgs + [highest_pkg]:
5881 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5883 atoms = self._select_atoms(
5884 pkg.root, dep_str, pkg.use.enabled,
5885 parent=pkg, strict=True)
5886 except portage.exception.InvalidDependString:
5888 blocker_atoms = (x for x in atoms if x.blocker)
5889 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5891 if highest_pkg not in blockers:
5894 # filter packages with invalid deps
5895 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5897 # filter packages that conflict with highest_pkg
5898 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5899 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5900 blockers[pkg].findAtomForPackage(highest_pkg))]
5905 # If two packages conflict, discard the lower version.
5906 discard_pkgs = set()
5907 greedy_pkgs.sort(reverse=True)
5908 for i in xrange(len(greedy_pkgs) - 1):
5909 pkg1 = greedy_pkgs[i]
5910 if pkg1 in discard_pkgs:
5912 for j in xrange(i + 1, len(greedy_pkgs)):
5913 pkg2 = greedy_pkgs[j]
5914 if pkg2 in discard_pkgs:
5916 if blockers[pkg1].findAtomForPackage(pkg2) or \
5917 blockers[pkg2].findAtomForPackage(pkg1):
5919 discard_pkgs.add(pkg2)
5921 return [pkg.slot_atom for pkg in greedy_pkgs \
5922 if pkg not in discard_pkgs]
5924 def _select_atoms_from_graph(self, *pargs, **kwargs):
5926 Prefer atoms matching packages that have already been
5927 added to the graph or those that are installed and have
5928 not been scheduled for replacement.
5930 kwargs["trees"] = self._graph_trees
5931 return self._select_atoms_highest_available(*pargs, **kwargs)
5933 def _select_atoms_highest_available(self, root, depstring,
5934 myuse=None, parent=None, strict=True, trees=None, priority=None):
5935 """This will raise InvalidDependString if necessary. If trees is
5936 None then self._filtered_trees is used."""
5937 pkgsettings = self.pkgsettings[root]
5939 trees = self._filtered_trees
5940 if not getattr(priority, "buildtime", False):
5941 # The parent should only be passed to dep_check() for buildtime
5942 # dependencies since that's the only case when it's appropriate
5943 # to trigger the circular dependency avoidance code which uses it.
5944 # It's important not to trigger the same circular dependency
5945 # avoidance code for runtime dependencies since it's not needed
5946 # and it can promote an incorrect package choice.
5950 if parent is not None:
5951 trees[root]["parent"] = parent
5953 portage.dep._dep_check_strict = False
5954 mycheck = portage.dep_check(depstring, None,
5955 pkgsettings, myuse=myuse,
5956 myroot=root, trees=trees)
5958 if parent is not None:
5959 trees[root].pop("parent")
5960 portage.dep._dep_check_strict = True
5962 raise portage.exception.InvalidDependString(mycheck[1])
5963 selected_atoms = mycheck[1]
5964 return selected_atoms
5966 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5967 atom = portage.dep.Atom(atom)
5968 atom_set = InternalPackageSet(initial_atoms=(atom,))
5969 atom_without_use = atom
5971 atom_without_use = portage.dep.remove_slot(atom)
5973 atom_without_use += ":" + atom.slot
5974 atom_without_use = portage.dep.Atom(atom_without_use)
5975 xinfo = '"%s"' % atom
5978 # Discard null/ from failed cpv_expand category expansion.
5979 xinfo = xinfo.replace("null/", "")
5980 masked_packages = []
5982 masked_pkg_instances = set()
5983 missing_licenses = []
5984 have_eapi_mask = False
5985 pkgsettings = self.pkgsettings[root]
5986 implicit_iuse = pkgsettings._get_implicit_iuse()
5987 root_config = self.roots[root]
5988 portdb = self.roots[root].trees["porttree"].dbapi
5989 dbs = self._filtered_trees[root]["dbs"]
5990 for db, pkg_type, built, installed, db_keys in dbs:
5994 if hasattr(db, "xmatch"):
5995 cpv_list = db.xmatch("match-all", atom_without_use)
5997 cpv_list = db.match(atom_without_use)
6000 for cpv in cpv_list:
6001 metadata, mreasons = get_mask_info(root_config, cpv,
6002 pkgsettings, db, pkg_type, built, installed, db_keys)
6003 if metadata is not None:
6004 pkg = Package(built=built, cpv=cpv,
6005 installed=installed, metadata=metadata,
6006 root_config=root_config)
6007 if pkg.cp != atom.cp:
6008 # A cpv can be returned from dbapi.match() as an
6009 # old-style virtual match even in cases when the
6010 # package does not actually PROVIDE the virtual.
6011 # Filter out any such false matches here.
6012 if not atom_set.findAtomForPackage(pkg):
6015 masked_pkg_instances.add(pkg)
6017 missing_use.append(pkg)
6020 masked_packages.append(
6021 (root_config, pkgsettings, cpv, metadata, mreasons))
6023 missing_use_reasons = []
6024 missing_iuse_reasons = []
6025 for pkg in missing_use:
6026 use = pkg.use.enabled
6027 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6028 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6030 for x in atom.use.required:
6031 if iuse_re.match(x) is None:
6032 missing_iuse.append(x)
6035 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6036 missing_iuse_reasons.append((pkg, mreasons))
6038 need_enable = sorted(atom.use.enabled.difference(use))
6039 need_disable = sorted(atom.use.disabled.intersection(use))
6040 if need_enable or need_disable:
6042 changes.extend(colorize("red", "+" + x) \
6043 for x in need_enable)
6044 changes.extend(colorize("blue", "-" + x) \
6045 for x in need_disable)
6046 mreasons.append("Change USE: %s" % " ".join(changes))
6047 missing_use_reasons.append((pkg, mreasons))
6049 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6050 in missing_use_reasons if pkg not in masked_pkg_instances]
6052 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6053 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6055 show_missing_use = False
6056 if unmasked_use_reasons:
6057 # Only show the latest version.
6058 show_missing_use = unmasked_use_reasons[:1]
6059 elif unmasked_iuse_reasons:
6060 if missing_use_reasons:
6061 # All packages with required IUSE are masked,
6062 # so display a normal masking message.
6065 show_missing_use = unmasked_iuse_reasons
6067 if show_missing_use:
6068 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6069 print "!!! One of the following packages is required to complete your request:"
6070 for pkg, mreasons in show_missing_use:
6071 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6073 elif masked_packages:
6075 colorize("BAD", "All ebuilds that could satisfy ") + \
6076 colorize("INFORM", xinfo) + \
6077 colorize("BAD", " have been masked.")
6078 print "!!! One of the following masked packages is required to complete your request:"
6079 have_eapi_mask = show_masked_packages(masked_packages)
6082 msg = ("The current version of portage supports " + \
6083 "EAPI '%s'. You must upgrade to a newer version" + \
6084 " of portage before EAPI masked packages can" + \
6085 " be installed.") % portage.const.EAPI
6086 from textwrap import wrap
6087 for line in wrap(msg, 75):
6092 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6094 # Show parent nodes and the argument that pulled them in.
6095 traversed_nodes = set()
6098 while node is not None:
6099 traversed_nodes.add(node)
6100 msg.append('(dependency required by "%s" [%s])' % \
6101 (colorize('INFORM', str(node.cpv)), node.type_name))
6102 # When traversing to parents, prefer arguments over packages
6103 # since arguments are root nodes. Never traverse the same
6104 # package twice, in order to prevent an infinite loop.
6105 selected_parent = None
6106 for parent in self.digraph.parent_nodes(node):
6107 if isinstance(parent, DependencyArg):
6108 msg.append('(dependency required by "%s" [argument])' % \
6109 (colorize('INFORM', str(parent))))
6110 selected_parent = None
6112 if parent not in traversed_nodes:
6113 selected_parent = parent
6114 node = selected_parent
6120 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6121 cache_key = (root, atom, onlydeps)
6122 ret = self._highest_pkg_cache.get(cache_key)
6125 if pkg and not existing:
6126 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6127 if existing and existing == pkg:
6128 # Update the cache to reflect that the
6129 # package has been added to the graph.
6131 self._highest_pkg_cache[cache_key] = ret
6133 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6134 self._highest_pkg_cache[cache_key] = ret
6137 settings = pkg.root_config.settings
6138 if visible(settings, pkg) and not (pkg.installed and \
6139 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6140 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6143 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6144 root_config = self.roots[root]
6145 pkgsettings = self.pkgsettings[root]
6146 dbs = self._filtered_trees[root]["dbs"]
6147 vardb = self.roots[root].trees["vartree"].dbapi
6148 portdb = self.roots[root].trees["porttree"].dbapi
6149 # List of acceptable packages, ordered by type preference.
6150 matched_packages = []
6151 highest_version = None
6152 if not isinstance(atom, portage.dep.Atom):
6153 atom = portage.dep.Atom(atom)
6155 atom_set = InternalPackageSet(initial_atoms=(atom,))
6156 existing_node = None
6158 usepkgonly = "--usepkgonly" in self.myopts
6159 empty = "empty" in self.myparams
6160 selective = "selective" in self.myparams
6162 noreplace = "--noreplace" in self.myopts
6163 # Behavior of the "selective" parameter depends on
6164 # whether or not a package matches an argument atom.
6165 # If an installed package provides an old-style
6166 # virtual that is no longer provided by an available
6167 # package, the installed package may match an argument
6168 # atom even though none of the available packages do.
6169 # Therefore, "selective" logic does not consider
6170 # whether or not an installed package matches an
6171 # argument atom. It only considers whether or not
6172 # available packages match argument atoms, which is
6173 # represented by the found_available_arg flag.
6174 found_available_arg = False
6175 for find_existing_node in True, False:
6178 for db, pkg_type, built, installed, db_keys in dbs:
6181 if installed and not find_existing_node:
6182 want_reinstall = reinstall or empty or \
6183 (found_available_arg and not selective)
6184 if want_reinstall and matched_packages:
6186 if hasattr(db, "xmatch"):
6187 cpv_list = db.xmatch("match-all", atom)
6189 cpv_list = db.match(atom)
6191 # USE=multislot can make an installed package appear as if
6192 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6193 # won't do any good as long as USE=multislot is enabled since
6194 # the newly built package still won't have the expected slot.
6195 # Therefore, assume that such SLOT dependencies are already
6196 # satisfied rather than forcing a rebuild.
6197 if installed and not cpv_list and atom.slot:
6198 for cpv in db.match(atom.cp):
6199 slot_available = False
6200 for other_db, other_type, other_built, \
6201 other_installed, other_keys in dbs:
6204 other_db.aux_get(cpv, ["SLOT"])[0]:
6205 slot_available = True
6209 if not slot_available:
6211 inst_pkg = self._pkg(cpv, "installed",
6212 root_config, installed=installed)
6213 # Remove the slot from the atom and verify that
6214 # the package matches the resulting atom.
6215 atom_without_slot = portage.dep.remove_slot(atom)
6217 atom_without_slot += str(atom.use)
6218 atom_without_slot = portage.dep.Atom(atom_without_slot)
6219 if portage.match_from_list(
6220 atom_without_slot, [inst_pkg]):
6221 cpv_list = [inst_pkg.cpv]
6226 pkg_status = "merge"
6227 if installed or onlydeps:
6228 pkg_status = "nomerge"
6231 for cpv in cpv_list:
6232 # Make --noreplace take precedence over --newuse.
6233 if not installed and noreplace and \
6234 cpv in vardb.match(atom):
6235 # If the installed version is masked, it may
6236 # be necessary to look at lower versions,
6237 # in case there is a visible downgrade.
6239 reinstall_for_flags = None
6240 cache_key = (pkg_type, root, cpv, pkg_status)
6241 calculated_use = True
6242 pkg = self._pkg_cache.get(cache_key)
6244 calculated_use = False
6246 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6249 pkg = Package(built=built, cpv=cpv,
6250 installed=installed, metadata=metadata,
6251 onlydeps=onlydeps, root_config=root_config,
6253 metadata = pkg.metadata
6255 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6256 if not built and ("?" in metadata["LICENSE"] or \
6257 "?" in metadata["PROVIDE"]):
6258 # This is avoided whenever possible because
6259 # it's expensive. It only needs to be done here
6260 # if it has an effect on visibility.
6261 pkgsettings.setcpv(pkg)
6262 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6263 calculated_use = True
6264 self._pkg_cache[pkg] = pkg
6266 if not installed or (built and matched_packages):
6267 # Only enforce visibility on installed packages
6268 # if there is at least one other visible package
6269 # available. By filtering installed masked packages
6270 # here, packages that have been masked since they
6271 # were installed can be automatically downgraded
6272 # to an unmasked version.
6274 if not visible(pkgsettings, pkg):
6276 except portage.exception.InvalidDependString:
6280 # Enable upgrade or downgrade to a version
6281 # with visible KEYWORDS when the installed
6282 # version is masked by KEYWORDS, but never
6283 # reinstall the same exact version only due
6284 # to a KEYWORDS mask.
6285 if built and matched_packages:
6287 different_version = None
6288 for avail_pkg in matched_packages:
6289 if not portage.dep.cpvequal(
6290 pkg.cpv, avail_pkg.cpv):
6291 different_version = avail_pkg
6293 if different_version is not None:
6296 pkgsettings._getMissingKeywords(
6297 pkg.cpv, pkg.metadata):
6300 # If the ebuild no longer exists or it's
6301 # keywords have been dropped, reject built
6302 # instances (installed or binary).
6303 # If --usepkgonly is enabled, assume that
6304 # the ebuild status should be ignored.
6308 pkg.cpv, "ebuild", root_config)
6309 except portage.exception.PackageNotFound:
6312 if not visible(pkgsettings, pkg_eb):
6315 if not pkg.built and not calculated_use:
6316 # This is avoided whenever possible because
6318 pkgsettings.setcpv(pkg)
6319 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6321 if pkg.cp != atom.cp:
6322 # A cpv can be returned from dbapi.match() as an
6323 # old-style virtual match even in cases when the
6324 # package does not actually PROVIDE the virtual.
6325 # Filter out any such false matches here.
6326 if not atom_set.findAtomForPackage(pkg):
6330 if root == self.target_root:
6332 # Ebuild USE must have been calculated prior
6333 # to this point, in case atoms have USE deps.
6334 myarg = self._iter_atoms_for_pkg(pkg).next()
6335 except StopIteration:
6337 except portage.exception.InvalidDependString:
6339 # masked by corruption
6341 if not installed and myarg:
6342 found_available_arg = True
6344 if atom.use and not pkg.built:
6345 use = pkg.use.enabled
6346 if atom.use.enabled.difference(use):
6348 if atom.use.disabled.intersection(use):
6350 if pkg.cp == atom_cp:
6351 if highest_version is None:
6352 highest_version = pkg
6353 elif pkg > highest_version:
6354 highest_version = pkg
6355 # At this point, we've found the highest visible
6356 # match from the current repo. Any lower versions
6357 # from this repo are ignored, so this so the loop
6358 # will always end with a break statement below
6360 if find_existing_node:
6361 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6364 if portage.dep.match_from_list(atom, [e_pkg]):
6365 if highest_version and \
6366 e_pkg.cp == atom_cp and \
6367 e_pkg < highest_version and \
6368 e_pkg.slot_atom != highest_version.slot_atom:
6369 # There is a higher version available in a
6370 # different slot, so this existing node is
6374 matched_packages.append(e_pkg)
6375 existing_node = e_pkg
6377 # Compare built package to current config and
6378 # reject the built package if necessary.
6379 if built and not installed and \
6380 ("--newuse" in self.myopts or \
6381 "--reinstall" in self.myopts):
6382 iuses = pkg.iuse.all
6383 old_use = pkg.use.enabled
6385 pkgsettings.setcpv(myeb)
6387 pkgsettings.setcpv(pkg)
6388 now_use = pkgsettings["PORTAGE_USE"].split()
6389 forced_flags = set()
6390 forced_flags.update(pkgsettings.useforce)
6391 forced_flags.update(pkgsettings.usemask)
6393 if myeb and not usepkgonly:
6394 cur_iuse = myeb.iuse.all
6395 if self._reinstall_for_flags(forced_flags,
6399 # Compare current config to installed package
6400 # and do not reinstall if possible.
6401 if not installed and \
6402 ("--newuse" in self.myopts or \
6403 "--reinstall" in self.myopts) and \
6404 cpv in vardb.match(atom):
6405 pkgsettings.setcpv(pkg)
6406 forced_flags = set()
6407 forced_flags.update(pkgsettings.useforce)
6408 forced_flags.update(pkgsettings.usemask)
6409 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6410 old_iuse = set(filter_iuse_defaults(
6411 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6412 cur_use = pkg.use.enabled
6413 cur_iuse = pkg.iuse.all
6414 reinstall_for_flags = \
6415 self._reinstall_for_flags(
6416 forced_flags, old_use, old_iuse,
6418 if reinstall_for_flags:
6422 matched_packages.append(pkg)
6423 if reinstall_for_flags:
6424 self._reinstall_nodes[pkg] = \
6428 if not matched_packages:
6431 if "--debug" in self.myopts:
6432 for pkg in matched_packages:
6433 portage.writemsg("%s %s\n" % \
6434 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6436 # Filter out any old-style virtual matches if they are
6437 # mixed with new-style virtual matches.
6438 cp = portage.dep_getkey(atom)
6439 if len(matched_packages) > 1 and \
6440 "virtual" == portage.catsplit(cp)[0]:
6441 for pkg in matched_packages:
6444 # Got a new-style virtual, so filter
6445 # out any old-style virtuals.
6446 matched_packages = [pkg for pkg in matched_packages \
6450 if len(matched_packages) > 1:
6451 bestmatch = portage.best(
6452 [pkg.cpv for pkg in matched_packages])
6453 matched_packages = [pkg for pkg in matched_packages \
6454 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6456 # ordered by type preference ("ebuild" type is the last resort)
6457 return matched_packages[-1], existing_node
6459 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6461 Select packages that have already been added to the graph or
6462 those that are installed and have not been scheduled for
6465 graph_db = self._graph_trees[root]["porttree"].dbapi
6466 matches = graph_db.match_pkgs(atom)
6469 pkg = matches[-1] # highest match
6470 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6471 return pkg, in_graph
6473 def _complete_graph(self):
6475 Add any deep dependencies of required sets (args, system, world) that
6476 have not been pulled into the graph yet. This ensures that the graph
6477 is consistent such that initially satisfied deep dependencies are not
6478 broken in the new graph. Initially unsatisfied dependencies are
6479 irrelevant since we only want to avoid breaking dependencies that are
6482 Since this method can consume enough time to disturb users, it is
6483 currently only enabled by the --complete-graph option.
6485 if "--buildpkgonly" in self.myopts or \
6486 "recurse" not in self.myparams:
6489 if "complete" not in self.myparams:
6490 # Skip this to avoid consuming enough time to disturb users.
6493 # Put the depgraph into a mode that causes it to only
6494 # select packages that have already been added to the
6495 # graph or those that are installed and have not been
6496 # scheduled for replacement. Also, toggle the "deep"
6497 # parameter so that all dependencies are traversed and
6499 self._select_atoms = self._select_atoms_from_graph
6500 self._select_package = self._select_pkg_from_graph
6501 already_deep = "deep" in self.myparams
6502 if not already_deep:
6503 self.myparams.add("deep")
6505 for root in self.roots:
6506 required_set_names = self._required_set_names.copy()
6507 if root == self.target_root and \
6508 (already_deep or "empty" in self.myparams):
6509 required_set_names.difference_update(self._sets)
6510 if not required_set_names and not self._ignored_deps:
6512 root_config = self.roots[root]
6513 setconfig = root_config.setconfig
6515 # Reuse existing SetArg instances when available.
6516 for arg in self.digraph.root_nodes():
6517 if not isinstance(arg, SetArg):
6519 if arg.root_config != root_config:
6521 if arg.name in required_set_names:
6523 required_set_names.remove(arg.name)
6524 # Create new SetArg instances only when necessary.
6525 for s in required_set_names:
6526 expanded_set = InternalPackageSet(
6527 initial_atoms=setconfig.getSetAtoms(s))
6528 atom = SETPREFIX + s
6529 args.append(SetArg(arg=atom, set=expanded_set,
6530 root_config=root_config))
6531 vardb = root_config.trees["vartree"].dbapi
6533 for atom in arg.set:
6534 self._dep_stack.append(
6535 Dependency(atom=atom, root=root, parent=arg))
6536 if self._ignored_deps:
6537 self._dep_stack.extend(self._ignored_deps)
6538 self._ignored_deps = []
6539 if not self._create_graph(allow_unsatisfied=True):
6541 # Check the unsatisfied deps to see if any initially satisfied deps
6542 # will become unsatisfied due to an upgrade. Initially unsatisfied
6543 # deps are irrelevant since we only want to avoid breaking deps
6544 # that are initially satisfied.
6545 while self._unsatisfied_deps:
6546 dep = self._unsatisfied_deps.pop()
6547 matches = vardb.match_pkgs(dep.atom)
6549 self._initially_unsatisfied_deps.append(dep)
6551 # An scheduled installation broke a deep dependency.
6552 # Add the installed package to the graph so that it
6553 # will be appropriately reported as a slot collision
6554 # (possibly solvable via backtracking).
6555 pkg = matches[-1] # highest match
6556 if not self._add_pkg(pkg, dep):
6558 if not self._create_graph(allow_unsatisfied=True):
6562 def _pkg(self, cpv, type_name, root_config, installed=False):
6564 Get a package instance from the cache, or create a new
6565 one if necessary. Raises KeyError from aux_get if it
6566 failures for some reason (package does not exist or is
6571 operation = "nomerge"
6572 pkg = self._pkg_cache.get(
6573 (type_name, root_config.root, cpv, operation))
6575 tree_type = self.pkg_tree_map[type_name]
6576 db = root_config.trees[tree_type].dbapi
6577 db_keys = list(self._trees_orig[root_config.root][
6578 tree_type].dbapi._aux_cache_keys)
6580 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6582 raise portage.exception.PackageNotFound(cpv)
6583 pkg = Package(cpv=cpv, metadata=metadata,
6584 root_config=root_config, installed=installed)
6585 if type_name == "ebuild":
6586 settings = self.pkgsettings[root_config.root]
6587 settings.setcpv(pkg)
6588 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6589 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6590 self._pkg_cache[pkg] = pkg
6593 def validate_blockers(self):
6594 """Remove any blockers from the digraph that do not match any of the
6595 packages within the graph. If necessary, create hard deps to ensure
6596 correct merge order such that mutually blocking packages are never
6597 installed simultaneously."""
6599 if "--buildpkgonly" in self.myopts or \
6600 "--nodeps" in self.myopts:
6603 #if "deep" in self.myparams:
6605 # Pull in blockers from all installed packages that haven't already
6606 # been pulled into the depgraph. This is not enabled by default
6607 # due to the performance penalty that is incurred by all the
6608 # additional dep_check calls that are required.
6610 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6611 for myroot in self.trees:
6612 vardb = self.trees[myroot]["vartree"].dbapi
6613 portdb = self.trees[myroot]["porttree"].dbapi
6614 pkgsettings = self.pkgsettings[myroot]
6615 final_db = self.mydbapi[myroot]
6617 blocker_cache = BlockerCache(myroot, vardb)
6618 stale_cache = set(blocker_cache)
6621 stale_cache.discard(cpv)
6622 pkg_in_graph = self.digraph.contains(pkg)
6624 # Check for masked installed packages. Only warn about
6625 # packages that are in the graph in order to avoid warning
6626 # about those that will be automatically uninstalled during
6627 # the merge process or by --depclean.
6629 if pkg_in_graph and not visible(pkgsettings, pkg):
6630 self._masked_installed.add(pkg)
6632 blocker_atoms = None
6638 self._blocker_parents.child_nodes(pkg))
6643 self._irrelevant_blockers.child_nodes(pkg))
6646 if blockers is not None:
6647 blockers = set(str(blocker.atom) \
6648 for blocker in blockers)
6650 # If this node has any blockers, create a "nomerge"
6651 # node for it so that they can be enforced.
6652 self.spinner.update()
6653 blocker_data = blocker_cache.get(cpv)
6654 if blocker_data is not None and \
6655 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6658 # If blocker data from the graph is available, use
6659 # it to validate the cache and update the cache if
6661 if blocker_data is not None and \
6662 blockers is not None:
6663 if not blockers.symmetric_difference(
6664 blocker_data.atoms):
6668 if blocker_data is None and \
6669 blockers is not None:
6670 # Re-use the blockers from the graph.
6671 blocker_atoms = sorted(blockers)
6672 counter = long(pkg.metadata["COUNTER"])
6674 blocker_cache.BlockerData(counter, blocker_atoms)
6675 blocker_cache[pkg.cpv] = blocker_data
6679 blocker_atoms = blocker_data.atoms
6681 # Use aux_get() to trigger FakeVartree global
6682 # updates on *DEPEND when appropriate.
6683 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6684 # It is crucial to pass in final_db here in order to
6685 # optimize dep_check calls by eliminating atoms via
6686 # dep_wordreduce and dep_eval calls.
6688 portage.dep._dep_check_strict = False
6690 success, atoms = portage.dep_check(depstr,
6691 final_db, pkgsettings, myuse=pkg.use.enabled,
6692 trees=self._graph_trees, myroot=myroot)
6693 except Exception, e:
6694 if isinstance(e, SystemExit):
6696 # This is helpful, for example, if a ValueError
6697 # is thrown from cpv_expand due to multiple
6698 # matches (this can happen if an atom lacks a
6700 show_invalid_depstring_notice(
6701 pkg, depstr, str(e))
6705 portage.dep._dep_check_strict = True
6707 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6708 if replacement_pkg and \
6709 replacement_pkg[0].operation == "merge":
6710 # This package is being replaced anyway, so
6711 # ignore invalid dependencies so as not to
6712 # annoy the user too much (otherwise they'd be
6713 # forced to manually unmerge it first).
6715 show_invalid_depstring_notice(pkg, depstr, atoms)
6717 blocker_atoms = [myatom for myatom in atoms \
6718 if myatom.startswith("!")]
6719 blocker_atoms.sort()
6720 counter = long(pkg.metadata["COUNTER"])
6721 blocker_cache[cpv] = \
6722 blocker_cache.BlockerData(counter, blocker_atoms)
6725 for atom in blocker_atoms:
6726 blocker = Blocker(atom=portage.dep.Atom(atom),
6727 eapi=pkg.metadata["EAPI"], root=myroot)
6728 self._blocker_parents.add(blocker, pkg)
6729 except portage.exception.InvalidAtom, e:
6730 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6731 show_invalid_depstring_notice(
6732 pkg, depstr, "Invalid Atom: %s" % (e,))
6734 for cpv in stale_cache:
6735 del blocker_cache[cpv]
6736 blocker_cache.flush()
6739 # Discard any "uninstall" tasks scheduled by previous calls
6740 # to this method, since those tasks may not make sense given
6741 # the current graph state.
6742 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6743 if previous_uninstall_tasks:
6744 self._blocker_uninstalls = digraph()
6745 self.digraph.difference_update(previous_uninstall_tasks)
6747 for blocker in self._blocker_parents.leaf_nodes():
6748 self.spinner.update()
6749 root_config = self.roots[blocker.root]
6750 virtuals = root_config.settings.getvirtuals()
6751 myroot = blocker.root
6752 initial_db = self.trees[myroot]["vartree"].dbapi
6753 final_db = self.mydbapi[myroot]
6755 provider_virtual = False
6756 if blocker.cp in virtuals and \
6757 not self._have_new_virt(blocker.root, blocker.cp):
6758 provider_virtual = True
6760 # Use this to check PROVIDE for each matched package
6762 atom_set = InternalPackageSet(
6763 initial_atoms=[blocker.atom])
6765 if provider_virtual:
6767 for provider_entry in virtuals[blocker.cp]:
6769 portage.dep_getkey(provider_entry)
6770 atoms.append(blocker.atom.replace(
6771 blocker.cp, provider_cp))
6773 atoms = [blocker.atom]
6775 blocked_initial = set()
6777 for pkg in initial_db.match_pkgs(atom):
6778 if atom_set.findAtomForPackage(pkg):
6779 blocked_initial.add(pkg)
6781 blocked_final = set()
6783 for pkg in final_db.match_pkgs(atom):
6784 if atom_set.findAtomForPackage(pkg):
6785 blocked_final.add(pkg)
6787 if not blocked_initial and not blocked_final:
6788 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6789 self._blocker_parents.remove(blocker)
6790 # Discard any parents that don't have any more blockers.
6791 for pkg in parent_pkgs:
6792 self._irrelevant_blockers.add(blocker, pkg)
6793 if not self._blocker_parents.child_nodes(pkg):
6794 self._blocker_parents.remove(pkg)
6796 for parent in self._blocker_parents.parent_nodes(blocker):
6797 unresolved_blocks = False
6798 depends_on_order = set()
6799 for pkg in blocked_initial:
6800 if pkg.slot_atom == parent.slot_atom:
6801 # TODO: Support blocks within slots in cases where it
6802 # might make sense. For example, a new version might
6803 # require that the old version be uninstalled at build
6806 if parent.installed:
6807 # Two currently installed packages conflict with
6808 # eachother. Ignore this case since the damage
6809 # is already done and this would be likely to
6810 # confuse users if displayed like a normal blocker.
6813 self._blocked_pkgs.add(pkg, blocker)
6815 if parent.operation == "merge":
6816 # Maybe the blocked package can be replaced or simply
6817 # unmerged to resolve this block.
6818 depends_on_order.add((pkg, parent))
6820 # None of the above blocker resolutions techniques apply,
6821 # so apparently this one is unresolvable.
6822 unresolved_blocks = True
6823 for pkg in blocked_final:
6824 if pkg.slot_atom == parent.slot_atom:
6825 # TODO: Support blocks within slots.
6827 if parent.operation == "nomerge" and \
6828 pkg.operation == "nomerge":
6829 # This blocker will be handled the next time that a
6830 # merge of either package is triggered.
6833 self._blocked_pkgs.add(pkg, blocker)
6835 # Maybe the blocking package can be
6836 # unmerged to resolve this block.
6837 if parent.operation == "merge" and pkg.installed:
6838 depends_on_order.add((pkg, parent))
6840 elif parent.operation == "nomerge":
6841 depends_on_order.add((parent, pkg))
6843 # None of the above blocker resolutions techniques apply,
6844 # so apparently this one is unresolvable.
6845 unresolved_blocks = True
6847 # Make sure we don't unmerge any package that have been pulled
6849 if not unresolved_blocks and depends_on_order:
6850 for inst_pkg, inst_task in depends_on_order:
6851 if self.digraph.contains(inst_pkg) and \
6852 self.digraph.parent_nodes(inst_pkg):
6853 unresolved_blocks = True
6856 if not unresolved_blocks and depends_on_order:
6857 for inst_pkg, inst_task in depends_on_order:
6858 uninst_task = Package(built=inst_pkg.built,
6859 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6860 metadata=inst_pkg.metadata,
6861 operation="uninstall",
6862 root_config=inst_pkg.root_config,
6863 type_name=inst_pkg.type_name)
6864 self._pkg_cache[uninst_task] = uninst_task
6865 # Enforce correct merge order with a hard dep.
6866 self.digraph.addnode(uninst_task, inst_task,
6867 priority=BlockerDepPriority.instance)
6868 # Count references to this blocker so that it can be
6869 # invalidated after nodes referencing it have been
6871 self._blocker_uninstalls.addnode(uninst_task, blocker)
6872 if not unresolved_blocks and not depends_on_order:
6873 self._irrelevant_blockers.add(blocker, parent)
6874 self._blocker_parents.remove_edge(blocker, parent)
6875 if not self._blocker_parents.parent_nodes(blocker):
6876 self._blocker_parents.remove(blocker)
6877 if not self._blocker_parents.child_nodes(parent):
6878 self._blocker_parents.remove(parent)
6879 if unresolved_blocks:
6880 self._unsolvable_blockers.add(blocker, parent)
6884 def _accept_blocker_conflicts(self):
6886 for x in ("--buildpkgonly", "--fetchonly",
6887 "--fetch-all-uri", "--nodeps"):
6888 if x in self.myopts:
6893 def _merge_order_bias(self, mygraph):
6895 For optimal leaf node selection, promote deep system runtime deps and
6896 order nodes from highest to lowest overall reference count.
6900 for node in mygraph.order:
6901 node_info[node] = len(mygraph.parent_nodes(node))
6902 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6904 def cmp_merge_preference(node1, node2):
6906 if node1.operation == 'uninstall':
6907 if node2.operation == 'uninstall':
6911 if node2.operation == 'uninstall':
6912 if node1.operation == 'uninstall':
6916 node1_sys = node1 in deep_system_deps
6917 node2_sys = node2 in deep_system_deps
6918 if node1_sys != node2_sys:
6923 return node_info[node2] - node_info[node1]
6925 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6927 def altlist(self, reversed=False):
6929 while self._serialized_tasks_cache is None:
6930 self._resolve_conflicts()
6932 self._serialized_tasks_cache, self._scheduler_graph = \
6933 self._serialize_tasks()
6934 except self._serialize_tasks_retry:
6937 retlist = self._serialized_tasks_cache[:]
6942 def schedulerGraph(self):
6944 The scheduler graph is identical to the normal one except that
6945 uninstall edges are reversed in specific cases that require
6946 conflicting packages to be temporarily installed simultaneously.
6947 This is intended for use by the Scheduler in it's parallelization
6948 logic. It ensures that temporary simultaneous installation of
6949 conflicting packages is avoided when appropriate (especially for
6950 !!atom blockers), but allowed in specific cases that require it.
6952 Note that this method calls break_refs() which alters the state of
6953 internal Package instances such that this depgraph instance should
6954 not be used to perform any more calculations.
6956 if self._scheduler_graph is None:
6958 self.break_refs(self._scheduler_graph.order)
6959 return self._scheduler_graph
6961 def break_refs(self, nodes):
6963 Take a mergelist like that returned from self.altlist() and
6964 break any references that lead back to the depgraph. This is
6965 useful if you want to hold references to packages without
6966 also holding the depgraph on the heap.
6969 if hasattr(node, "root_config"):
6970 # The FakeVartree references the _package_cache which
6971 # references the depgraph. So that Package instances don't
6972 # hold the depgraph and FakeVartree on the heap, replace
6973 # the RootConfig that references the FakeVartree with the
6974 # original RootConfig instance which references the actual
6976 node.root_config = \
6977 self._trees_orig[node.root_config.root]["root_config"]
6979 def _resolve_conflicts(self):
6980 if not self._complete_graph():
6981 raise self._unknown_internal_error()
6983 if not self.validate_blockers():
6984 raise self._unknown_internal_error()
6986 if self._slot_collision_info:
6987 self._process_slot_conflicts()
6989 def _serialize_tasks(self):
6991 if "--debug" in self.myopts:
6992 writemsg("\ndigraph:\n\n", noiselevel=-1)
6993 self.digraph.debug_print()
6994 writemsg("\n", noiselevel=-1)
6996 scheduler_graph = self.digraph.copy()
6997 mygraph=self.digraph.copy()
6998 # Prune "nomerge" root nodes if nothing depends on them, since
6999 # otherwise they slow down merge order calculation. Don't remove
7000 # non-root nodes since they help optimize merge order in some cases
7001 # such as revdep-rebuild.
7002 removed_nodes = set()
7004 for node in mygraph.root_nodes():
7005 if not isinstance(node, Package) or \
7006 node.installed or node.onlydeps:
7007 removed_nodes.add(node)
7009 self.spinner.update()
7010 mygraph.difference_update(removed_nodes)
7011 if not removed_nodes:
7013 removed_nodes.clear()
7014 self._merge_order_bias(mygraph)
7015 def cmp_circular_bias(n1, n2):
7017 RDEPEND is stronger than PDEPEND and this function
7018 measures such a strength bias within a circular
7019 dependency relationship.
7021 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7022 ignore_priority=priority_range.ignore_medium_soft)
7023 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7024 ignore_priority=priority_range.ignore_medium_soft)
7025 if n1_n2_medium == n2_n1_medium:
7030 myblocker_uninstalls = self._blocker_uninstalls.copy()
7032 # Contains uninstall tasks that have been scheduled to
7033 # occur after overlapping blockers have been installed.
7034 scheduled_uninstalls = set()
7035 # Contains any Uninstall tasks that have been ignored
7036 # in order to avoid the circular deps code path. These
7037 # correspond to blocker conflicts that could not be
7039 ignored_uninstall_tasks = set()
7040 have_uninstall_task = False
7041 complete = "complete" in self.myparams
7044 def get_nodes(**kwargs):
7046 Returns leaf nodes excluding Uninstall instances
7047 since those should be executed as late as possible.
7049 return [node for node in mygraph.leaf_nodes(**kwargs) \
7050 if isinstance(node, Package) and \
7051 (node.operation != "uninstall" or \
7052 node in scheduled_uninstalls)]
7054 # sys-apps/portage needs special treatment if ROOT="/"
7055 running_root = self._running_root.root
7056 from portage.const import PORTAGE_PACKAGE_ATOM
7057 runtime_deps = InternalPackageSet(
7058 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7059 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7060 PORTAGE_PACKAGE_ATOM)
7061 replacement_portage = self.mydbapi[running_root].match_pkgs(
7062 PORTAGE_PACKAGE_ATOM)
7065 running_portage = running_portage[0]
7067 running_portage = None
7069 if replacement_portage:
7070 replacement_portage = replacement_portage[0]
7072 replacement_portage = None
7074 if replacement_portage == running_portage:
7075 replacement_portage = None
7077 if replacement_portage is not None:
7078 # update from running_portage to replacement_portage asap
7079 asap_nodes.append(replacement_portage)
7081 if running_portage is not None:
7083 portage_rdepend = self._select_atoms_highest_available(
7084 running_root, running_portage.metadata["RDEPEND"],
7085 myuse=running_portage.use.enabled,
7086 parent=running_portage, strict=False)
7087 except portage.exception.InvalidDependString, e:
7088 portage.writemsg("!!! Invalid RDEPEND in " + \
7089 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7090 (running_root, running_portage.cpv, e), noiselevel=-1)
7092 portage_rdepend = []
7093 runtime_deps.update(atom for atom in portage_rdepend \
7094 if not atom.startswith("!"))
7096 def gather_deps(ignore_priority, mergeable_nodes,
7097 selected_nodes, node):
7099 Recursively gather a group of nodes that RDEPEND on
7100 eachother. This ensures that they are merged as a group
7101 and get their RDEPENDs satisfied as soon as possible.
7103 if node in selected_nodes:
7105 if node not in mergeable_nodes:
7107 if node == replacement_portage and \
7108 mygraph.child_nodes(node,
7109 ignore_priority=priority_range.ignore_medium_soft):
7110 # Make sure that portage always has all of it's
7111 # RDEPENDs installed first.
7113 selected_nodes.add(node)
7114 for child in mygraph.child_nodes(node,
7115 ignore_priority=ignore_priority):
7116 if not gather_deps(ignore_priority,
7117 mergeable_nodes, selected_nodes, child):
7121 def ignore_uninst_or_med(priority):
7122 if priority is BlockerDepPriority.instance:
7124 return priority_range.ignore_medium(priority)
7126 def ignore_uninst_or_med_soft(priority):
7127 if priority is BlockerDepPriority.instance:
7129 return priority_range.ignore_medium_soft(priority)
7131 tree_mode = "--tree" in self.myopts
7132 # Tracks whether or not the current iteration should prefer asap_nodes
7133 # if available. This is set to False when the previous iteration
7134 # failed to select any nodes. It is reset whenever nodes are
7135 # successfully selected.
7138 # Controls whether or not the current iteration should drop edges that
7139 # are "satisfied" by installed packages, in order to solve circular
7140 # dependencies. The deep runtime dependencies of installed packages are
7141 # not checked in this case (bug #199856), so it must be avoided
7142 # whenever possible.
7143 drop_satisfied = False
7145 # State of variables for successive iterations that loosen the
7146 # criteria for node selection.
7148 # iteration prefer_asap drop_satisfied
7153 # If no nodes are selected on the last iteration, it is due to
7154 # unresolved blockers or circular dependencies.
7156 while not mygraph.empty():
7157 self.spinner.update()
7158 selected_nodes = None
7159 ignore_priority = None
7160 if drop_satisfied or (prefer_asap and asap_nodes):
7161 priority_range = DepPrioritySatisfiedRange
7163 priority_range = DepPriorityNormalRange
7164 if prefer_asap and asap_nodes:
7165 # ASAP nodes are merged before their soft deps. Go ahead and
7166 # select root nodes here if necessary, since it's typical for
7167 # the parent to have been removed from the graph already.
7168 asap_nodes = [node for node in asap_nodes \
7169 if mygraph.contains(node)]
7170 for node in asap_nodes:
7171 if not mygraph.child_nodes(node,
7172 ignore_priority=priority_range.ignore_soft):
7173 selected_nodes = [node]
7174 asap_nodes.remove(node)
7176 if not selected_nodes and \
7177 not (prefer_asap and asap_nodes):
7178 for i in xrange(priority_range.NONE,
7179 priority_range.MEDIUM_SOFT + 1):
7180 ignore_priority = priority_range.ignore_priority[i]
7181 nodes = get_nodes(ignore_priority=ignore_priority)
7183 # If there is a mix of uninstall nodes with other
7184 # types, save the uninstall nodes for later since
7185 # sometimes a merge node will render an uninstall
7186 # node unnecessary (due to occupying the same slot),
7187 # and we want to avoid executing a separate uninstall
7188 # task in that case.
7190 good_uninstalls = []
7191 with_some_uninstalls_excluded = []
7193 if node.operation == "uninstall":
7194 slot_node = self.mydbapi[node.root
7195 ].match_pkgs(node.slot_atom)
7197 slot_node[0].operation == "merge":
7199 good_uninstalls.append(node)
7200 with_some_uninstalls_excluded.append(node)
7202 nodes = good_uninstalls
7203 elif with_some_uninstalls_excluded:
7204 nodes = with_some_uninstalls_excluded
7208 if ignore_priority is None and not tree_mode:
7209 # Greedily pop all of these nodes since no
7210 # relationship has been ignored. This optimization
7211 # destroys --tree output, so it's disabled in tree
7213 selected_nodes = nodes
7215 # For optimal merge order:
7216 # * Only pop one node.
7217 # * Removing a root node (node without a parent)
7218 # will not produce a leaf node, so avoid it.
7219 # * It's normal for a selected uninstall to be a
7220 # root node, so don't check them for parents.
7222 if node.operation == "uninstall" or \
7223 mygraph.parent_nodes(node):
7224 selected_nodes = [node]
7230 if not selected_nodes:
7231 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7233 mergeable_nodes = set(nodes)
7234 if prefer_asap and asap_nodes:
7236 for i in xrange(priority_range.SOFT,
7237 priority_range.MEDIUM_SOFT + 1):
7238 ignore_priority = priority_range.ignore_priority[i]
7240 if not mygraph.parent_nodes(node):
7242 selected_nodes = set()
7243 if gather_deps(ignore_priority,
7244 mergeable_nodes, selected_nodes, node):
7247 selected_nodes = None
7251 if prefer_asap and asap_nodes and not selected_nodes:
7252 # We failed to find any asap nodes to merge, so ignore
7253 # them for the next iteration.
7257 if selected_nodes and ignore_priority is not None:
7258 # Try to merge ignored medium_soft deps as soon as possible
7259 # if they're not satisfied by installed packages.
7260 for node in selected_nodes:
7261 children = set(mygraph.child_nodes(node))
7262 soft = children.difference(
7263 mygraph.child_nodes(node,
7264 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7265 medium_soft = children.difference(
7266 mygraph.child_nodes(node,
7268 DepPrioritySatisfiedRange.ignore_medium_soft))
7269 medium_soft.difference_update(soft)
7270 for child in medium_soft:
7271 if child in selected_nodes:
7273 if child in asap_nodes:
7275 asap_nodes.append(child)
7277 if selected_nodes and len(selected_nodes) > 1:
7278 if not isinstance(selected_nodes, list):
7279 selected_nodes = list(selected_nodes)
7280 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7282 if not selected_nodes and not myblocker_uninstalls.is_empty():
7283 # An Uninstall task needs to be executed in order to
7284 # avoid conflict if possible.
7287 priority_range = DepPrioritySatisfiedRange
7289 priority_range = DepPriorityNormalRange
7291 mergeable_nodes = get_nodes(
7292 ignore_priority=ignore_uninst_or_med)
7294 min_parent_deps = None
7296 for task in myblocker_uninstalls.leaf_nodes():
7297 # Do some sanity checks so that system or world packages
7298 # don't get uninstalled inappropriately here (only really
7299 # necessary when --complete-graph has not been enabled).
7301 if task in ignored_uninstall_tasks:
7304 if task in scheduled_uninstalls:
7305 # It's been scheduled but it hasn't
7306 # been executed yet due to dependence
7307 # on installation of blocking packages.
7310 root_config = self.roots[task.root]
7311 inst_pkg = self._pkg_cache[
7312 ("installed", task.root, task.cpv, "nomerge")]
7314 if self.digraph.contains(inst_pkg):
7317 forbid_overlap = False
7318 heuristic_overlap = False
7319 for blocker in myblocker_uninstalls.parent_nodes(task):
7320 if blocker.eapi in ("0", "1"):
7321 heuristic_overlap = True
7322 elif blocker.atom.blocker.overlap.forbid:
7323 forbid_overlap = True
7325 if forbid_overlap and running_root == task.root:
7328 if heuristic_overlap and running_root == task.root:
7329 # Never uninstall sys-apps/portage or it's essential
7330 # dependencies, except through replacement.
7332 runtime_dep_atoms = \
7333 list(runtime_deps.iterAtomsForPackage(task))
7334 except portage.exception.InvalidDependString, e:
7335 portage.writemsg("!!! Invalid PROVIDE in " + \
7336 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7337 (task.root, task.cpv, e), noiselevel=-1)
7341 # Don't uninstall a runtime dep if it appears
7342 # to be the only suitable one installed.
7344 vardb = root_config.trees["vartree"].dbapi
7345 for atom in runtime_dep_atoms:
7346 other_version = None
7347 for pkg in vardb.match_pkgs(atom):
7348 if pkg.cpv == task.cpv and \
7349 pkg.metadata["COUNTER"] == \
7350 task.metadata["COUNTER"]:
7354 if other_version is None:
7360 # For packages in the system set, don't take
7361 # any chances. If the conflict can't be resolved
7362 # by a normal replacement operation then abort.
7365 for atom in root_config.sets[
7366 "system"].iterAtomsForPackage(task):
7369 except portage.exception.InvalidDependString, e:
7370 portage.writemsg("!!! Invalid PROVIDE in " + \
7371 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7372 (task.root, task.cpv, e), noiselevel=-1)
7378 # Note that the world check isn't always
7379 # necessary since self._complete_graph() will
7380 # add all packages from the system and world sets to the
7381 # graph. This just allows unresolved conflicts to be
7382 # detected as early as possible, which makes it possible
7383 # to avoid calling self._complete_graph() when it is
7384 # unnecessary due to blockers triggering an abortion.
7386 # For packages in the world set, go ahead an uninstall
7387 # when necessary, as long as the atom will be satisfied
7388 # in the final state.
7389 graph_db = self.mydbapi[task.root]
7392 for atom in root_config.sets[
7393 "world"].iterAtomsForPackage(task):
7395 for pkg in graph_db.match_pkgs(atom):
7402 self._blocked_world_pkgs[inst_pkg] = atom
7404 except portage.exception.InvalidDependString, e:
7405 portage.writemsg("!!! Invalid PROVIDE in " + \
7406 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7407 (task.root, task.cpv, e), noiselevel=-1)
7413 # Check the deps of parent nodes to ensure that
7414 # the chosen task produces a leaf node. Maybe
7415 # this can be optimized some more to make the
7416 # best possible choice, but the current algorithm
7417 # is simple and should be near optimal for most
7419 mergeable_parent = False
7421 for parent in mygraph.parent_nodes(task):
7422 parent_deps.update(mygraph.child_nodes(parent,
7423 ignore_priority=priority_range.ignore_medium_soft))
7424 if parent in mergeable_nodes and \
7425 gather_deps(ignore_uninst_or_med_soft,
7426 mergeable_nodes, set(), parent):
7427 mergeable_parent = True
7429 if not mergeable_parent:
7432 parent_deps.remove(task)
7433 if min_parent_deps is None or \
7434 len(parent_deps) < min_parent_deps:
7435 min_parent_deps = len(parent_deps)
7438 if uninst_task is not None:
7439 # The uninstall is performed only after blocking
7440 # packages have been merged on top of it. File
7441 # collisions between blocking packages are detected
7442 # and removed from the list of files to be uninstalled.
7443 scheduled_uninstalls.add(uninst_task)
7444 parent_nodes = mygraph.parent_nodes(uninst_task)
7446 # Reverse the parent -> uninstall edges since we want
7447 # to do the uninstall after blocking packages have
7448 # been merged on top of it.
7449 mygraph.remove(uninst_task)
7450 for blocked_pkg in parent_nodes:
7451 mygraph.add(blocked_pkg, uninst_task,
7452 priority=BlockerDepPriority.instance)
7453 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7454 scheduler_graph.add(blocked_pkg, uninst_task,
7455 priority=BlockerDepPriority.instance)
7457 # Reset the state variables for leaf node selection and
7458 # continue trying to select leaf nodes.
7460 drop_satisfied = False
7463 if not selected_nodes:
7464 # Only select root nodes as a last resort. This case should
7465 # only trigger when the graph is nearly empty and the only
7466 # remaining nodes are isolated (no parents or children). Since
7467 # the nodes must be isolated, ignore_priority is not needed.
7468 selected_nodes = get_nodes()
7470 if not selected_nodes and not drop_satisfied:
7471 drop_satisfied = True
7474 if not selected_nodes and not myblocker_uninstalls.is_empty():
7475 # If possible, drop an uninstall task here in order to avoid
7476 # the circular deps code path. The corresponding blocker will
7477 # still be counted as an unresolved conflict.
7479 for node in myblocker_uninstalls.leaf_nodes():
7481 mygraph.remove(node)
7486 ignored_uninstall_tasks.add(node)
7489 if uninst_task is not None:
7490 # Reset the state variables for leaf node selection and
7491 # continue trying to select leaf nodes.
7493 drop_satisfied = False
7496 if not selected_nodes:
7497 self._circular_deps_for_display = mygraph
7498 raise self._unknown_internal_error()
7500 # At this point, we've succeeded in selecting one or more nodes, so
7501 # reset state variables for leaf node selection.
7503 drop_satisfied = False
7505 mygraph.difference_update(selected_nodes)
7507 for node in selected_nodes:
7508 if isinstance(node, Package) and \
7509 node.operation == "nomerge":
7512 # Handle interactions between blockers
7513 # and uninstallation tasks.
7514 solved_blockers = set()
7516 if isinstance(node, Package) and \
7517 "uninstall" == node.operation:
7518 have_uninstall_task = True
7521 vardb = self.trees[node.root]["vartree"].dbapi
7522 previous_cpv = vardb.match(node.slot_atom)
7524 # The package will be replaced by this one, so remove
7525 # the corresponding Uninstall task if necessary.
7526 previous_cpv = previous_cpv[0]
7528 ("installed", node.root, previous_cpv, "uninstall")
7530 mygraph.remove(uninst_task)
7534 if uninst_task is not None and \
7535 uninst_task not in ignored_uninstall_tasks and \
7536 myblocker_uninstalls.contains(uninst_task):
7537 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7538 myblocker_uninstalls.remove(uninst_task)
7539 # Discard any blockers that this Uninstall solves.
7540 for blocker in blocker_nodes:
7541 if not myblocker_uninstalls.child_nodes(blocker):
7542 myblocker_uninstalls.remove(blocker)
7543 solved_blockers.add(blocker)
7545 retlist.append(node)
7547 if (isinstance(node, Package) and \
7548 "uninstall" == node.operation) or \
7549 (uninst_task is not None and \
7550 uninst_task in scheduled_uninstalls):
7551 # Include satisfied blockers in the merge list
7552 # since the user might be interested and also
7553 # it serves as an indicator that blocking packages
7554 # will be temporarily installed simultaneously.
7555 for blocker in solved_blockers:
7556 retlist.append(Blocker(atom=blocker.atom,
7557 root=blocker.root, eapi=blocker.eapi,
7560 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7561 for node in myblocker_uninstalls.root_nodes():
7562 unsolvable_blockers.add(node)
7564 for blocker in unsolvable_blockers:
7565 retlist.append(blocker)
7567 # If any Uninstall tasks need to be executed in order
7568 # to avoid a conflict, complete the graph with any
7569 # dependencies that may have been initially
7570 # neglected (to ensure that unsafe Uninstall tasks
7571 # are properly identified and blocked from execution).
7572 if have_uninstall_task and \
7574 not unsolvable_blockers:
7575 self.myparams.add("complete")
7576 raise self._serialize_tasks_retry("")
7578 if unsolvable_blockers and \
7579 not self._accept_blocker_conflicts():
7580 self._unsatisfied_blockers_for_display = unsolvable_blockers
7581 self._serialized_tasks_cache = retlist[:]
7582 self._scheduler_graph = scheduler_graph
7583 raise self._unknown_internal_error()
7585 if self._slot_collision_info and \
7586 not self._accept_blocker_conflicts():
7587 self._serialized_tasks_cache = retlist[:]
7588 self._scheduler_graph = scheduler_graph
7589 raise self._unknown_internal_error()
7591 return retlist, scheduler_graph
7593 def _show_circular_deps(self, mygraph):
7594 # No leaf nodes are available, so we have a circular
7595 # dependency panic situation. Reduce the noise level to a
7596 # minimum via repeated elimination of root nodes since they
7597 # have no parents and thus can not be part of a cycle.
7599 root_nodes = mygraph.root_nodes(
7600 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7603 mygraph.difference_update(root_nodes)
7604 # Display the USE flags that are enabled on nodes that are part
7605 # of dependency cycles in case that helps the user decide to
7606 # disable some of them.
7608 tempgraph = mygraph.copy()
7609 while not tempgraph.empty():
7610 nodes = tempgraph.leaf_nodes()
7612 node = tempgraph.order[0]
7615 display_order.append(node)
7616 tempgraph.remove(node)
7617 display_order.reverse()
7618 self.myopts.pop("--quiet", None)
7619 self.myopts.pop("--verbose", None)
7620 self.myopts["--tree"] = True
7621 portage.writemsg("\n\n", noiselevel=-1)
7622 self.display(display_order)
7623 prefix = colorize("BAD", " * ")
7624 portage.writemsg("\n", noiselevel=-1)
7625 portage.writemsg(prefix + "Error: circular dependencies:\n",
7627 portage.writemsg("\n", noiselevel=-1)
7628 mygraph.debug_print()
7629 portage.writemsg("\n", noiselevel=-1)
7630 portage.writemsg(prefix + "Note that circular dependencies " + \
7631 "can often be avoided by temporarily\n", noiselevel=-1)
7632 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7633 "optional dependencies.\n", noiselevel=-1)
7635 def _show_merge_list(self):
7636 if self._serialized_tasks_cache is not None and \
7637 not (self._displayed_list and \
7638 (self._displayed_list == self._serialized_tasks_cache or \
7639 self._displayed_list == \
7640 list(reversed(self._serialized_tasks_cache)))):
7641 display_list = self._serialized_tasks_cache[:]
7642 if "--tree" in self.myopts:
7643 display_list.reverse()
7644 self.display(display_list)
7646 def _show_unsatisfied_blockers(self, blockers):
7647 self._show_merge_list()
7648 msg = "Error: The above package list contains " + \
7649 "packages which cannot be installed " + \
7650 "at the same time on the same system."
7651 prefix = colorize("BAD", " * ")
7652 from textwrap import wrap
7653 portage.writemsg("\n", noiselevel=-1)
7654 for line in wrap(msg, 70):
7655 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7657 # Display the conflicting packages along with the packages
7658 # that pulled them in. This is helpful for troubleshooting
7659 # cases in which blockers don't solve automatically and
7660 # the reasons are not apparent from the normal merge list
7664 for blocker in blockers:
7665 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7666 self._blocker_parents.parent_nodes(blocker)):
7667 parent_atoms = self._parent_atoms.get(pkg)
7668 if not parent_atoms:
7669 atom = self._blocked_world_pkgs.get(pkg)
7670 if atom is not None:
7671 parent_atoms = set([("@world", atom)])
7673 conflict_pkgs[pkg] = parent_atoms
7676 # Reduce noise by pruning packages that are only
7677 # pulled in by other conflict packages.
7679 for pkg, parent_atoms in conflict_pkgs.iteritems():
7680 relevant_parent = False
7681 for parent, atom in parent_atoms:
7682 if parent not in conflict_pkgs:
7683 relevant_parent = True
7685 if not relevant_parent:
7686 pruned_pkgs.add(pkg)
7687 for pkg in pruned_pkgs:
7688 del conflict_pkgs[pkg]
7694 # Max number of parents shown, to avoid flooding the display.
7696 for pkg, parent_atoms in conflict_pkgs.iteritems():
7700 # Prefer packages that are not directly involved in a conflict.
7701 for parent_atom in parent_atoms:
7702 if len(pruned_list) >= max_parents:
7704 parent, atom = parent_atom
7705 if parent not in conflict_pkgs:
7706 pruned_list.add(parent_atom)
7708 for parent_atom in parent_atoms:
7709 if len(pruned_list) >= max_parents:
7711 pruned_list.add(parent_atom)
7713 omitted_parents = len(parent_atoms) - len(pruned_list)
7714 msg.append(indent + "%s pulled in by\n" % pkg)
7716 for parent_atom in pruned_list:
7717 parent, atom = parent_atom
7718 msg.append(2*indent)
7719 if isinstance(parent,
7720 (PackageArg, AtomArg)):
7721 # For PackageArg and AtomArg types, it's
7722 # redundant to display the atom attribute.
7723 msg.append(str(parent))
7725 # Display the specific atom from SetArg or
7727 msg.append("%s required by %s" % (atom, parent))
7731 msg.append(2*indent)
7732 msg.append("(and %d more)\n" % omitted_parents)
7736 sys.stderr.write("".join(msg))
7739 if "--quiet" not in self.myopts:
7740 show_blocker_docs_link()
7742 def display(self, mylist, favorites=[], verbosity=None):
7744 # This is used to prevent display_problems() from
7745 # redundantly displaying this exact same merge list
7746 # again via _show_merge_list().
7747 self._displayed_list = mylist
7749 if verbosity is None:
7750 verbosity = ("--quiet" in self.myopts and 1 or \
7751 "--verbose" in self.myopts and 3 or 2)
7752 favorites_set = InternalPackageSet(favorites)
7753 oneshot = "--oneshot" in self.myopts or \
7754 "--onlydeps" in self.myopts
7755 columns = "--columns" in self.myopts
7760 counters = PackageCounters()
7762 if verbosity == 1 and "--verbose" not in self.myopts:
7763 def create_use_string(*args):
7766 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7768 is_new, reinst_flags,
7769 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7770 alphabetical=("--alphabetical" in self.myopts)):
7778 cur_iuse = set(cur_iuse)
7779 enabled_flags = cur_iuse.intersection(cur_use)
7780 removed_iuse = set(old_iuse).difference(cur_iuse)
7781 any_iuse = cur_iuse.union(old_iuse)
7782 any_iuse = list(any_iuse)
7784 for flag in any_iuse:
7787 reinst_flag = reinst_flags and flag in reinst_flags
7788 if flag in enabled_flags:
7790 if is_new or flag in old_use and \
7791 (all_flags or reinst_flag):
7792 flag_str = red(flag)
7793 elif flag not in old_iuse:
7794 flag_str = yellow(flag) + "%*"
7795 elif flag not in old_use:
7796 flag_str = green(flag) + "*"
7797 elif flag in removed_iuse:
7798 if all_flags or reinst_flag:
7799 flag_str = yellow("-" + flag) + "%"
7802 flag_str = "(" + flag_str + ")"
7803 removed.append(flag_str)
7806 if is_new or flag in old_iuse and \
7807 flag not in old_use and \
7808 (all_flags or reinst_flag):
7809 flag_str = blue("-" + flag)
7810 elif flag not in old_iuse:
7811 flag_str = yellow("-" + flag)
7812 if flag not in iuse_forced:
7814 elif flag in old_use:
7815 flag_str = green("-" + flag) + "*"
7817 if flag in iuse_forced:
7818 flag_str = "(" + flag_str + ")"
7820 enabled.append(flag_str)
7822 disabled.append(flag_str)
7825 ret = " ".join(enabled)
7827 ret = " ".join(enabled + disabled + removed)
7829 ret = '%s="%s" ' % (name, ret)
7832 repo_display = RepoDisplay(self.roots)
7836 mygraph = self.digraph.copy()
7838 # If there are any Uninstall instances, add the corresponding
7839 # blockers to the digraph (useful for --tree display).
7841 executed_uninstalls = set(node for node in mylist \
7842 if isinstance(node, Package) and node.operation == "unmerge")
7844 for uninstall in self._blocker_uninstalls.leaf_nodes():
7845 uninstall_parents = \
7846 self._blocker_uninstalls.parent_nodes(uninstall)
7847 if not uninstall_parents:
7850 # Remove the corresponding "nomerge" node and substitute
7851 # the Uninstall node.
7852 inst_pkg = self._pkg_cache[
7853 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7855 mygraph.remove(inst_pkg)
7860 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7862 inst_pkg_blockers = []
7864 # Break the Package -> Uninstall edges.
7865 mygraph.remove(uninstall)
7867 # Resolution of a package's blockers
7868 # depend on it's own uninstallation.
7869 for blocker in inst_pkg_blockers:
7870 mygraph.add(uninstall, blocker)
7872 # Expand Package -> Uninstall edges into
7873 # Package -> Blocker -> Uninstall edges.
7874 for blocker in uninstall_parents:
7875 mygraph.add(uninstall, blocker)
7876 for parent in self._blocker_parents.parent_nodes(blocker):
7877 if parent != inst_pkg:
7878 mygraph.add(blocker, parent)
7880 # If the uninstall task did not need to be executed because
7881 # of an upgrade, display Blocker -> Upgrade edges since the
7882 # corresponding Blocker -> Uninstall edges will not be shown.
7884 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7885 if upgrade_node is not None and \
7886 uninstall not in executed_uninstalls:
7887 for blocker in uninstall_parents:
7888 mygraph.add(upgrade_node, blocker)
7890 unsatisfied_blockers = []
7895 if isinstance(x, Blocker) and not x.satisfied:
7896 unsatisfied_blockers.append(x)
7899 if "--tree" in self.myopts:
7900 depth = len(tree_nodes)
7901 while depth and graph_key not in \
7902 mygraph.child_nodes(tree_nodes[depth-1]):
7905 tree_nodes = tree_nodes[:depth]
7906 tree_nodes.append(graph_key)
7907 display_list.append((x, depth, True))
7908 shown_edges.add((graph_key, tree_nodes[depth-1]))
7910 traversed_nodes = set() # prevent endless circles
7911 traversed_nodes.add(graph_key)
7912 def add_parents(current_node, ordered):
7914 # Do not traverse to parents if this node is an
7915 # an argument or a direct member of a set that has
7916 # been specified as an argument (system or world).
7917 if current_node not in self._set_nodes:
7918 parent_nodes = mygraph.parent_nodes(current_node)
7920 child_nodes = set(mygraph.child_nodes(current_node))
7921 selected_parent = None
7922 # First, try to avoid a direct cycle.
7923 for node in parent_nodes:
7924 if not isinstance(node, (Blocker, Package)):
7926 if node not in traversed_nodes and \
7927 node not in child_nodes:
7928 edge = (current_node, node)
7929 if edge in shown_edges:
7931 selected_parent = node
7933 if not selected_parent:
7934 # A direct cycle is unavoidable.
7935 for node in parent_nodes:
7936 if not isinstance(node, (Blocker, Package)):
7938 if node not in traversed_nodes:
7939 edge = (current_node, node)
7940 if edge in shown_edges:
7942 selected_parent = node
7945 shown_edges.add((current_node, selected_parent))
7946 traversed_nodes.add(selected_parent)
7947 add_parents(selected_parent, False)
7948 display_list.append((current_node,
7949 len(tree_nodes), ordered))
7950 tree_nodes.append(current_node)
7952 add_parents(graph_key, True)
7954 display_list.append((x, depth, True))
7955 mylist = display_list
7956 for x in unsatisfied_blockers:
7957 mylist.append((x, 0, True))
7959 last_merge_depth = 0
7960 for i in xrange(len(mylist)-1,-1,-1):
7961 graph_key, depth, ordered = mylist[i]
7962 if not ordered and depth == 0 and i > 0 \
7963 and graph_key == mylist[i-1][0] and \
7964 mylist[i-1][1] == 0:
7965 # An ordered node got a consecutive duplicate when the tree was
7969 if ordered and graph_key[-1] != "nomerge":
7970 last_merge_depth = depth
7972 if depth >= last_merge_depth or \
7973 i < len(mylist) - 1 and \
7974 depth >= mylist[i+1][1]:
7977 from portage import flatten
7978 from portage.dep import use_reduce, paren_reduce
7979 # files to fetch list - avoids counting a same file twice
7980 # in size display (verbose mode)
7983 # Use this set to detect when all the "repoadd" strings are "[0]"
7984 # and disable the entire repo display in this case.
7987 for mylist_index in xrange(len(mylist)):
7988 x, depth, ordered = mylist[mylist_index]
7992 portdb = self.trees[myroot]["porttree"].dbapi
7993 bindb = self.trees[myroot]["bintree"].dbapi
7994 vardb = self.trees[myroot]["vartree"].dbapi
7995 vartree = self.trees[myroot]["vartree"]
7996 pkgsettings = self.pkgsettings[myroot]
7999 indent = " " * depth
8001 if isinstance(x, Blocker):
8003 blocker_style = "PKG_BLOCKER_SATISFIED"
8004 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8006 blocker_style = "PKG_BLOCKER"
8007 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8009 counters.blocks += 1
8011 counters.blocks_satisfied += 1
8012 resolved = portage.key_expand(
8013 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8014 if "--columns" in self.myopts and "--quiet" in self.myopts:
8015 addl += " " + colorize(blocker_style, resolved)
8017 addl = "[%s %s] %s%s" % \
8018 (colorize(blocker_style, "blocks"),
8019 addl, indent, colorize(blocker_style, resolved))
8020 block_parents = self._blocker_parents.parent_nodes(x)
8021 block_parents = set([pnode[2] for pnode in block_parents])
8022 block_parents = ", ".join(block_parents)
8024 addl += colorize(blocker_style,
8025 " (\"%s\" is blocking %s)") % \
8026 (str(x.atom).lstrip("!"), block_parents)
8028 addl += colorize(blocker_style,
8029 " (is blocking %s)") % block_parents
8030 if isinstance(x, Blocker) and x.satisfied:
8035 blockers.append(addl)
8038 pkg_merge = ordered and pkg_status == "merge"
8039 if not pkg_merge and pkg_status == "merge":
8040 pkg_status = "nomerge"
8041 built = pkg_type != "ebuild"
8042 installed = pkg_type == "installed"
8044 metadata = pkg.metadata
8046 repo_name = metadata["repository"]
8047 if pkg_type == "ebuild":
8048 ebuild_path = portdb.findname(pkg_key)
8049 if not ebuild_path: # shouldn't happen
8050 raise portage.exception.PackageNotFound(pkg_key)
8051 repo_path_real = os.path.dirname(os.path.dirname(
8052 os.path.dirname(ebuild_path)))
8054 repo_path_real = portdb.getRepositoryPath(repo_name)
8055 pkg_use = list(pkg.use.enabled)
8057 restrict = flatten(use_reduce(paren_reduce(
8058 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8059 except portage.exception.InvalidDependString, e:
8060 if not pkg.installed:
8061 show_invalid_depstring_notice(x,
8062 pkg.metadata["RESTRICT"], str(e))
8066 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8067 "fetch" in restrict:
8070 counters.restrict_fetch += 1
8071 if portdb.fetch_check(pkg_key, pkg_use):
8074 counters.restrict_fetch_satisfied += 1
8076 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8077 #param is used for -u, where you still *do* want to see when something is being upgraded.
8080 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8081 if vardb.cpv_exists(pkg_key):
8082 addl=" "+yellow("R")+fetch+" "
8085 counters.reinst += 1
8086 elif pkg_status == "uninstall":
8087 counters.uninst += 1
8088 # filter out old-style virtual matches
8089 elif installed_versions and \
8090 portage.cpv_getkey(installed_versions[0]) == \
8091 portage.cpv_getkey(pkg_key):
8092 myinslotlist = vardb.match(pkg.slot_atom)
8093 # If this is the first install of a new-style virtual, we
8094 # need to filter out old-style virtual matches.
8095 if myinslotlist and \
8096 portage.cpv_getkey(myinslotlist[0]) != \
8097 portage.cpv_getkey(pkg_key):
8100 myoldbest = myinslotlist[:]
8102 if not portage.dep.cpvequal(pkg_key,
8103 portage.best([pkg_key] + myoldbest)):
8105 addl += turquoise("U")+blue("D")
8107 counters.downgrades += 1
8110 addl += turquoise("U") + " "
8112 counters.upgrades += 1
8114 # New slot, mark it new.
8115 addl = " " + green("NS") + fetch + " "
8116 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8118 counters.newslot += 1
8120 if "--changelog" in self.myopts:
8121 inst_matches = vardb.match(pkg.slot_atom)
8123 changelogs.extend(self.calc_changelog(
8124 portdb.findname(pkg_key),
8125 inst_matches[0], pkg_key))
8127 addl = " " + green("N") + " " + fetch + " "
8136 forced_flags = set()
8137 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8138 forced_flags.update(pkgsettings.useforce)
8139 forced_flags.update(pkgsettings.usemask)
8141 cur_use = [flag for flag in pkg.use.enabled \
8142 if flag in pkg.iuse.all]
8143 cur_iuse = sorted(pkg.iuse.all)
8145 if myoldbest and myinslotlist:
8146 previous_cpv = myoldbest[0]
8148 previous_cpv = pkg.cpv
8149 if vardb.cpv_exists(previous_cpv):
8150 old_iuse, old_use = vardb.aux_get(
8151 previous_cpv, ["IUSE", "USE"])
8152 old_iuse = list(set(
8153 filter_iuse_defaults(old_iuse.split())))
8155 old_use = old_use.split()
8162 old_use = [flag for flag in old_use if flag in old_iuse]
8164 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8166 use_expand.reverse()
8167 use_expand_hidden = \
8168 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8170 def map_to_use_expand(myvals, forcedFlags=False,
8174 for exp in use_expand:
8177 for val in myvals[:]:
8178 if val.startswith(exp.lower()+"_"):
8179 if val in forced_flags:
8180 forced[exp].add(val[len(exp)+1:])
8181 ret[exp].append(val[len(exp)+1:])
8184 forced["USE"] = [val for val in myvals \
8185 if val in forced_flags]
8187 for exp in use_expand_hidden:
8193 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8194 # are the only thing that triggered reinstallation.
8195 reinst_flags_map = {}
8196 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8197 reinst_expand_map = None
8198 if reinstall_for_flags:
8199 reinst_flags_map = map_to_use_expand(
8200 list(reinstall_for_flags), removeHidden=False)
8201 for k in list(reinst_flags_map):
8202 if not reinst_flags_map[k]:
8203 del reinst_flags_map[k]
8204 if not reinst_flags_map.get("USE"):
8205 reinst_expand_map = reinst_flags_map.copy()
8206 reinst_expand_map.pop("USE", None)
8207 if reinst_expand_map and \
8208 not set(reinst_expand_map).difference(
8210 use_expand_hidden = \
8211 set(use_expand_hidden).difference(
8214 cur_iuse_map, iuse_forced = \
8215 map_to_use_expand(cur_iuse, forcedFlags=True)
8216 cur_use_map = map_to_use_expand(cur_use)
8217 old_iuse_map = map_to_use_expand(old_iuse)
8218 old_use_map = map_to_use_expand(old_use)
8221 use_expand.insert(0, "USE")
8223 for key in use_expand:
8224 if key in use_expand_hidden:
8226 verboseadd += create_use_string(key.upper(),
8227 cur_iuse_map[key], iuse_forced[key],
8228 cur_use_map[key], old_iuse_map[key],
8229 old_use_map[key], is_new,
8230 reinst_flags_map.get(key))
8235 if pkg_type == "ebuild" and pkg_merge:
8237 myfilesdict = portdb.getfetchsizes(pkg_key,
8238 useflags=pkg_use, debug=self.edebug)
8239 except portage.exception.InvalidDependString, e:
8240 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8241 show_invalid_depstring_notice(x, src_uri, str(e))
8244 if myfilesdict is None:
8245 myfilesdict="[empty/missing/bad digest]"
8247 for myfetchfile in myfilesdict:
8248 if myfetchfile not in myfetchlist:
8249 mysize+=myfilesdict[myfetchfile]
8250 myfetchlist.append(myfetchfile)
8252 counters.totalsize += mysize
8253 verboseadd += format_size(mysize)
8256 # assign index for a previous version in the same slot
8257 has_previous = False
8258 repo_name_prev = None
8259 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8261 slot_matches = vardb.match(slot_atom)
8264 repo_name_prev = vardb.aux_get(slot_matches[0],
8267 # now use the data to generate output
8268 if pkg.installed or not has_previous:
8269 repoadd = repo_display.repoStr(repo_path_real)
8271 repo_path_prev = None
8273 repo_path_prev = portdb.getRepositoryPath(
8275 if repo_path_prev == repo_path_real:
8276 repoadd = repo_display.repoStr(repo_path_real)
8278 repoadd = "%s=>%s" % (
8279 repo_display.repoStr(repo_path_prev),
8280 repo_display.repoStr(repo_path_real))
8282 repoadd_set.add(repoadd)
8284 xs = [portage.cpv_getkey(pkg_key)] + \
8285 list(portage.catpkgsplit(pkg_key)[2:])
8292 if "COLUMNWIDTH" in self.settings:
8294 mywidth = int(self.settings["COLUMNWIDTH"])
8295 except ValueError, e:
8296 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8298 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8299 self.settings["COLUMNWIDTH"], noiselevel=-1)
8301 oldlp = mywidth - 30
8304 # Convert myoldbest from a list to a string.
8308 for pos, key in enumerate(myoldbest):
8309 key = portage.catpkgsplit(key)[2] + \
8310 "-" + portage.catpkgsplit(key)[3]
8311 if key[-3:] == "-r0":
8313 myoldbest[pos] = key
8314 myoldbest = blue("["+", ".join(myoldbest)+"]")
8317 root_config = self.roots[myroot]
8318 system_set = root_config.sets["system"]
8319 world_set = root_config.sets["world"]
8324 pkg_system = system_set.findAtomForPackage(pkg)
8325 pkg_world = world_set.findAtomForPackage(pkg)
8326 if not (oneshot or pkg_world) and \
8327 myroot == self.target_root and \
8328 favorites_set.findAtomForPackage(pkg):
8329 # Maybe it will be added to world now.
8330 if create_world_atom(pkg, favorites_set, root_config):
8332 except portage.exception.InvalidDependString:
8333 # This is reported elsewhere if relevant.
8336 def pkgprint(pkg_str):
8339 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8341 return colorize("PKG_MERGE_WORLD", pkg_str)
8343 return colorize("PKG_MERGE", pkg_str)
8344 elif pkg_status == "uninstall":
8345 return colorize("PKG_UNINSTALL", pkg_str)
8348 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8350 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8352 return colorize("PKG_NOMERGE", pkg_str)
8355 properties = flatten(use_reduce(paren_reduce(
8356 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8357 except portage.exception.InvalidDependString, e:
8358 if not pkg.installed:
8359 show_invalid_depstring_notice(pkg,
8360 pkg.metadata["PROPERTIES"], str(e))
8364 interactive = "interactive" in properties
8365 if interactive and pkg.operation == "merge":
8366 addl = colorize("WARN", "I") + addl[1:]
8368 counters.interactive += 1
8373 if "--columns" in self.myopts:
8374 if "--quiet" in self.myopts:
8375 myprint=addl+" "+indent+pkgprint(pkg_cp)
8376 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8377 myprint=myprint+myoldbest
8378 myprint=myprint+darkgreen("to "+x[1])
8382 myprint = "[%s] %s%s" % \
8383 (pkgprint(pkg_status.ljust(13)),
8384 indent, pkgprint(pkg.cp))
8386 myprint = "[%s %s] %s%s" % \
8387 (pkgprint(pkg.type_name), addl,
8388 indent, pkgprint(pkg.cp))
8389 if (newlp-nc_len(myprint)) > 0:
8390 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8391 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8392 if (oldlp-nc_len(myprint)) > 0:
8393 myprint=myprint+" "*(oldlp-nc_len(myprint))
8394 myprint=myprint+myoldbest
8395 myprint += darkgreen("to " + pkg.root)
8398 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8400 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8401 myprint += indent + pkgprint(pkg_key) + " " + \
8402 myoldbest + darkgreen("to " + myroot)
8404 if "--columns" in self.myopts:
8405 if "--quiet" in self.myopts:
8406 myprint=addl+" "+indent+pkgprint(pkg_cp)
8407 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8408 myprint=myprint+myoldbest
8412 myprint = "[%s] %s%s" % \
8413 (pkgprint(pkg_status.ljust(13)),
8414 indent, pkgprint(pkg.cp))
8416 myprint = "[%s %s] %s%s" % \
8417 (pkgprint(pkg.type_name), addl,
8418 indent, pkgprint(pkg.cp))
8419 if (newlp-nc_len(myprint)) > 0:
8420 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8421 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8422 if (oldlp-nc_len(myprint)) > 0:
8423 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8424 myprint += myoldbest
8427 myprint = "[%s] %s%s %s" % \
8428 (pkgprint(pkg_status.ljust(13)),
8429 indent, pkgprint(pkg.cpv),
8432 myprint = "[%s %s] %s%s %s" % \
8433 (pkgprint(pkg_type), addl, indent,
8434 pkgprint(pkg.cpv), myoldbest)
8436 if columns and pkg.operation == "uninstall":
8438 p.append((myprint, verboseadd, repoadd))
8440 if "--tree" not in self.myopts and \
8441 "--quiet" not in self.myopts and \
8442 not self._opts_no_restart.intersection(self.myopts) and \
8443 pkg.root == self._running_root.root and \
8444 portage.match_from_list(
8445 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8446 not vardb.cpv_exists(pkg.cpv) and \
8447 "--quiet" not in self.myopts:
8448 if mylist_index < len(mylist) - 1:
8449 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8450 p.append(colorize("WARN", " then resume the merge."))
8453 show_repos = repoadd_set and repoadd_set != set(["0"])
8456 if isinstance(x, basestring):
8457 out.write("%s\n" % (x,))
8460 myprint, verboseadd, repoadd = x
8463 myprint += " " + verboseadd
8465 if show_repos and repoadd:
8466 myprint += " " + teal("[%s]" % repoadd)
8468 out.write("%s\n" % (myprint,))
8477 sys.stdout.write(str(repo_display))
8479 if "--changelog" in self.myopts:
8481 for revision,text in changelogs:
8482 print bold('*'+revision)
8483 sys.stdout.write(text)
8488 def display_problems(self):
8490 Display problems with the dependency graph such as slot collisions.
8491 This is called internally by display() to show the problems _after_
8492 the merge list where it is most likely to be seen, but if display()
8493 is not going to be called then this method should be called explicitly
8494 to ensure that the user is notified of problems with the graph.
8496 All output goes to stderr, except for unsatisfied dependencies which
8497 go to stdout for parsing by programs such as autounmask.
8500 # Note that show_masked_packages() sends it's output to
8501 # stdout, and some programs such as autounmask parse the
8502 # output in cases when emerge bails out. However, when
8503 # show_masked_packages() is called for installed packages
8504 # here, the message is a warning that is more appropriate
8505 # to send to stderr, so temporarily redirect stdout to
8506 # stderr. TODO: Fix output code so there's a cleaner way
8507 # to redirect everything to stderr.
8512 sys.stdout = sys.stderr
8513 self._display_problems()
8519 # This goes to stdout for parsing by programs like autounmask.
8520 for pargs, kwargs in self._unsatisfied_deps_for_display:
8521 self._show_unsatisfied_dep(*pargs, **kwargs)
8523 def _display_problems(self):
8524 if self._circular_deps_for_display is not None:
8525 self._show_circular_deps(
8526 self._circular_deps_for_display)
8528 # The user is only notified of a slot conflict if
8529 # there are no unresolvable blocker conflicts.
8530 if self._unsatisfied_blockers_for_display is not None:
8531 self._show_unsatisfied_blockers(
8532 self._unsatisfied_blockers_for_display)
8534 self._show_slot_collision_notice()
8536 # TODO: Add generic support for "set problem" handlers so that
8537 # the below warnings aren't special cases for world only.
8539 if self._missing_args:
8540 world_problems = False
8541 if "world" in self._sets:
8542 # Filter out indirect members of world (from nested sets)
8543 # since only direct members of world are desired here.
8544 world_set = self.roots[self.target_root].sets["world"]
8545 for arg, atom in self._missing_args:
8546 if arg.name == "world" and atom in world_set:
8547 world_problems = True
8551 sys.stderr.write("\n!!! Problems have been " + \
8552 "detected with your world file\n")
8553 sys.stderr.write("!!! Please run " + \
8554 green("emaint --check world")+"\n\n")
8556 if self._missing_args:
8557 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8558 " Ebuilds for the following packages are either all\n")
8559 sys.stderr.write(colorize("BAD", "!!!") + \
8560 " masked or don't exist:\n")
8561 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8562 self._missing_args) + "\n")
8564 if self._pprovided_args:
8566 for arg, atom in self._pprovided_args:
8567 if isinstance(arg, SetArg):
8569 arg_atom = (atom, atom)
8572 arg_atom = (arg.arg, atom)
8573 refs = arg_refs.setdefault(arg_atom, [])
8574 if parent not in refs:
8577 msg.append(bad("\nWARNING: "))
8578 if len(self._pprovided_args) > 1:
8579 msg.append("Requested packages will not be " + \
8580 "merged because they are listed in\n")
8582 msg.append("A requested package will not be " + \
8583 "merged because it is listed in\n")
8584 msg.append("package.provided:\n\n")
8585 problems_sets = set()
8586 for (arg, atom), refs in arg_refs.iteritems():
8589 problems_sets.update(refs)
8591 ref_string = ", ".join(["'%s'" % name for name in refs])
8592 ref_string = " pulled in by " + ref_string
8593 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8595 if "world" in problems_sets:
8596 msg.append("This problem can be solved in one of the following ways:\n\n")
8597 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8598 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8599 msg.append(" C) Remove offending entries from package.provided.\n\n")
8600 msg.append("The best course of action depends on the reason that an offending\n")
8601 msg.append("package.provided entry exists.\n\n")
8602 sys.stderr.write("".join(msg))
8604 masked_packages = []
8605 for pkg in self._masked_installed:
8606 root_config = pkg.root_config
8607 pkgsettings = self.pkgsettings[pkg.root]
8608 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8609 masked_packages.append((root_config, pkgsettings,
8610 pkg.cpv, pkg.metadata, mreasons))
8612 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8613 " The following installed packages are masked:\n")
8614 show_masked_packages(masked_packages)
8618 def calc_changelog(self,ebuildpath,current,next):
8619 if ebuildpath == None or not os.path.exists(ebuildpath):
8621 current = '-'.join(portage.catpkgsplit(current)[1:])
8622 if current.endswith('-r0'):
8623 current = current[:-3]
8624 next = '-'.join(portage.catpkgsplit(next)[1:])
8625 if next.endswith('-r0'):
8627 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8629 changelog = open(changelogpath).read()
8630 except SystemExit, e:
8631 raise # Needed else can't exit
8634 divisions = self.find_changelog_tags(changelog)
8635 #print 'XX from',current,'to',next
8636 #for div,text in divisions: print 'XX',div
8637 # skip entries for all revisions above the one we are about to emerge
8638 for i in range(len(divisions)):
8639 if divisions[i][0]==next:
8640 divisions = divisions[i:]
8642 # find out how many entries we are going to display
8643 for i in range(len(divisions)):
8644 if divisions[i][0]==current:
8645 divisions = divisions[:i]
8648 # couldnt find the current revision in the list. display nothing
8652 def find_changelog_tags(self,changelog):
8656 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8658 if release is not None:
8659 divs.append((release,changelog))
8661 if release is not None:
8662 divs.append((release,changelog[:match.start()]))
8663 changelog = changelog[match.end():]
8664 release = match.group(1)
8665 if release.endswith('.ebuild'):
8666 release = release[:-7]
8667 if release.endswith('-r0'):
8668 release = release[:-3]
8670 def saveNomergeFavorites(self):
8671 """Find atoms in favorites that are not in the mergelist and add them
8672 to the world file if necessary."""
8673 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8674 "--oneshot", "--onlydeps", "--pretend"):
8675 if x in self.myopts:
8677 root_config = self.roots[self.target_root]
8678 world_set = root_config.sets["world"]
8680 world_locked = False
8681 if hasattr(world_set, "lock"):
8685 if hasattr(world_set, "load"):
8686 world_set.load() # maybe it's changed on disk
8688 args_set = self._sets["args"]
8689 portdb = self.trees[self.target_root]["porttree"].dbapi
8690 added_favorites = set()
8691 for x in self._set_nodes:
8692 pkg_type, root, pkg_key, pkg_status = x
8693 if pkg_status != "nomerge":
8697 myfavkey = create_world_atom(x, args_set, root_config)
8699 if myfavkey in added_favorites:
8701 added_favorites.add(myfavkey)
8702 except portage.exception.InvalidDependString, e:
8703 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8704 (pkg_key, str(e)), noiselevel=-1)
8705 writemsg("!!! see '%s'\n\n" % os.path.join(
8706 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8709 for k in self._sets:
8710 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8715 all_added.append(SETPREFIX + k)
8716 all_added.extend(added_favorites)
8719 print ">>> Recording %s in \"world\" favorites file..." % \
8720 colorize("INFORM", str(a))
8722 world_set.update(all_added)
8727 def loadResumeCommand(self, resume_data, skip_masked=False):
8729 Add a resume command to the graph and validate it in the process. This
8730 will raise a PackageNotFound exception if a package is not available.
8733 if not isinstance(resume_data, dict):
8736 mergelist = resume_data.get("mergelist")
8737 if not isinstance(mergelist, list):
8740 fakedb = self.mydbapi
8742 serialized_tasks = []
8745 if not (isinstance(x, list) and len(x) == 4):
8747 pkg_type, myroot, pkg_key, action = x
8748 if pkg_type not in self.pkg_tree_map:
8750 if action != "merge":
8752 tree_type = self.pkg_tree_map[pkg_type]
8753 mydb = trees[myroot][tree_type].dbapi
8754 db_keys = list(self._trees_orig[myroot][
8755 tree_type].dbapi._aux_cache_keys)
8757 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8759 # It does no exist or it is corrupt.
8760 if action == "uninstall":
8762 raise portage.exception.PackageNotFound(pkg_key)
8763 installed = action == "uninstall"
8764 built = pkg_type != "ebuild"
8765 root_config = self.roots[myroot]
8766 pkg = Package(built=built, cpv=pkg_key,
8767 installed=installed, metadata=metadata,
8768 operation=action, root_config=root_config,
8770 if pkg_type == "ebuild":
8771 pkgsettings = self.pkgsettings[myroot]
8772 pkgsettings.setcpv(pkg)
8773 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8774 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8775 self._pkg_cache[pkg] = pkg
8777 root_config = self.roots[pkg.root]
8778 if "merge" == pkg.operation and \
8779 not visible(root_config.settings, pkg):
8781 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8783 self._unsatisfied_deps_for_display.append(
8784 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8786 fakedb[myroot].cpv_inject(pkg)
8787 serialized_tasks.append(pkg)
8788 self.spinner.update()
8790 if self._unsatisfied_deps_for_display:
8793 if not serialized_tasks or "--nodeps" in self.myopts:
8794 self._serialized_tasks_cache = serialized_tasks
8795 self._scheduler_graph = self.digraph
8797 self._select_package = self._select_pkg_from_graph
8798 self.myparams.add("selective")
8799 # Always traverse deep dependencies in order to account for
8800 # potentially unsatisfied dependencies of installed packages.
8801 # This is necessary for correct --keep-going or --resume operation
8802 # in case a package from a group of circularly dependent packages
8803 # fails. In this case, a package which has recently been installed
8804 # may have an unsatisfied circular dependency (pulled in by
8805 # PDEPEND, for example). So, even though a package is already
8806 # installed, it may not have all of it's dependencies satisfied, so
8807 # it may not be usable. If such a package is in the subgraph of
8808 # deep depenedencies of a scheduled build, that build needs to
8809 # be cancelled. In order for this type of situation to be
8810 # recognized, deep traversal of dependencies is required.
8811 self.myparams.add("deep")
8813 favorites = resume_data.get("favorites")
8814 args_set = self._sets["args"]
8815 if isinstance(favorites, list):
8816 args = self._load_favorites(favorites)
8820 for task in serialized_tasks:
8821 if isinstance(task, Package) and \
8822 task.operation == "merge":
8823 if not self._add_pkg(task, None):
8826 # Packages for argument atoms need to be explicitly
8827 # added via _add_pkg() so that they are included in the
8828 # digraph (needed at least for --tree display).
8830 for atom in arg.set:
8831 pkg, existing_node = self._select_package(
8832 arg.root_config.root, atom)
8833 if existing_node is None and \
8835 if not self._add_pkg(pkg, Dependency(atom=atom,
8836 root=pkg.root, parent=arg)):
8839 # Allow unsatisfied deps here to avoid showing a masking
8840 # message for an unsatisfied dep that isn't necessarily
8842 if not self._create_graph(allow_unsatisfied=True):
8845 unsatisfied_deps = []
8846 for dep in self._unsatisfied_deps:
8847 if not isinstance(dep.parent, Package):
8849 if dep.parent.operation == "merge":
8850 unsatisfied_deps.append(dep)
8853 # For unsatisfied deps of installed packages, only account for
8854 # them if they are in the subgraph of dependencies of a package
8855 # which is scheduled to be installed.
8856 unsatisfied_install = False
8858 dep_stack = self.digraph.parent_nodes(dep.parent)
8860 node = dep_stack.pop()
8861 if not isinstance(node, Package):
8863 if node.operation == "merge":
8864 unsatisfied_install = True
8866 if node in traversed:
8869 dep_stack.extend(self.digraph.parent_nodes(node))
8871 if unsatisfied_install:
8872 unsatisfied_deps.append(dep)
8874 if masked_tasks or unsatisfied_deps:
8875 # This probably means that a required package
8876 # was dropped via --skipfirst. It makes the
8877 # resume list invalid, so convert it to a
8878 # UnsatisfiedResumeDep exception.
8879 raise self.UnsatisfiedResumeDep(self,
8880 masked_tasks + unsatisfied_deps)
8881 self._serialized_tasks_cache = None
8884 except self._unknown_internal_error:
8889 def _load_favorites(self, favorites):
8891 Use a list of favorites to resume state from a
8892 previous select_files() call. This creates similar
8893 DependencyArg instances to those that would have
8894 been created by the original select_files() call.
8895 This allows Package instances to be matched with
8896 DependencyArg instances during graph creation.
8898 root_config = self.roots[self.target_root]
8899 getSetAtoms = root_config.setconfig.getSetAtoms
8900 sets = root_config.sets
8903 if not isinstance(x, basestring):
8905 if x in ("system", "world"):
8907 if x.startswith(SETPREFIX):
8908 s = x[len(SETPREFIX):]
8913 # Recursively expand sets so that containment tests in
8914 # self._get_parent_sets() properly match atoms in nested
8915 # sets (like if world contains system).
8916 expanded_set = InternalPackageSet(
8917 initial_atoms=getSetAtoms(s))
8918 self._sets[s] = expanded_set
8919 args.append(SetArg(arg=x, set=expanded_set,
8920 root_config=root_config))
8922 if not portage.isvalidatom(x):
8924 args.append(AtomArg(arg=x, atom=x,
8925 root_config=root_config))
8927 self._set_args(args)
8930 class UnsatisfiedResumeDep(portage.exception.PortageException):
8932 A dependency of a resume list is not installed. This
8933 can occur when a required package is dropped from the
8934 merge list via --skipfirst.
8936 def __init__(self, depgraph, value):
8937 portage.exception.PortageException.__init__(self, value)
8938 self.depgraph = depgraph
8940 class _internal_exception(portage.exception.PortageException):
8941 def __init__(self, value=""):
8942 portage.exception.PortageException.__init__(self, value)
8944 class _unknown_internal_error(_internal_exception):
8946 Used by the depgraph internally to terminate graph creation.
8947 The specific reason for the failure should have been dumped
8948 to stderr, unfortunately, the exact reason for the failure
8952 class _serialize_tasks_retry(_internal_exception):
8954 This is raised by the _serialize_tasks() method when it needs to
8955 be called again for some reason. The only case that it's currently
8956 used for is when neglected dependencies need to be added to the
8957 graph in order to avoid making a potentially unsafe decision.
8960 class _dep_check_composite_db(portage.dbapi):
8962 A dbapi-like interface that is optimized for use in dep_check() calls.
8963 This is built on top of the existing depgraph package selection logic.
8964 Some packages that have been added to the graph may be masked from this
8965 view in order to influence the atom preference selection that occurs
8968 def __init__(self, depgraph, root):
8969 portage.dbapi.__init__(self)
8970 self._depgraph = depgraph
8972 self._match_cache = {}
8973 self._cpv_pkg_map = {}
8975 def _clear_cache(self):
8976 self._match_cache.clear()
8977 self._cpv_pkg_map.clear()
8979 def match(self, atom):
8980 ret = self._match_cache.get(atom)
8985 atom = self._dep_expand(atom)
8986 pkg, existing = self._depgraph._select_package(self._root, atom)
8990 # Return the highest available from select_package() as well as
8991 # any matching slots in the graph db.
8993 slots.add(pkg.metadata["SLOT"])
8994 atom_cp = portage.dep_getkey(atom)
8995 if pkg.cp.startswith("virtual/"):
8996 # For new-style virtual lookahead that occurs inside
8997 # dep_check(), examine all slots. This is needed
8998 # so that newer slots will not unnecessarily be pulled in
8999 # when a satisfying lower slot is already installed. For
9000 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9001 # there's no need to pull in a newer slot to satisfy a
9002 # virtual/jdk dependency.
9003 for db, pkg_type, built, installed, db_keys in \
9004 self._depgraph._filtered_trees[self._root]["dbs"]:
9005 for cpv in db.match(atom):
9006 if portage.cpv_getkey(cpv) != pkg.cp:
9008 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9010 if self._visible(pkg):
9011 self._cpv_pkg_map[pkg.cpv] = pkg
9013 slots.remove(pkg.metadata["SLOT"])
9015 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9016 pkg, existing = self._depgraph._select_package(
9017 self._root, slot_atom)
9020 if not self._visible(pkg):
9022 self._cpv_pkg_map[pkg.cpv] = pkg
9025 self._cpv_sort_ascending(ret)
9026 self._match_cache[orig_atom] = ret
9029 def _visible(self, pkg):
9030 if pkg.installed and "selective" not in self._depgraph.myparams:
9032 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9033 except (StopIteration, portage.exception.InvalidDependString):
9040 self._depgraph.pkgsettings[pkg.root], pkg):
9042 except portage.exception.InvalidDependString:
9044 in_graph = self._depgraph._slot_pkg_map[
9045 self._root].get(pkg.slot_atom)
9046 if in_graph is None:
9047 # Mask choices for packages which are not the highest visible
9048 # version within their slot (since they usually trigger slot
9050 highest_visible, in_graph = self._depgraph._select_package(
9051 self._root, pkg.slot_atom)
9052 if pkg != highest_visible:
9054 elif in_graph != pkg:
9055 # Mask choices for packages that would trigger a slot
9056 # conflict with a previously selected package.
9060 def _dep_expand(self, atom):
9062 This is only needed for old installed packages that may
9063 contain atoms that are not fully qualified with a specific
9064 category. Emulate the cpv_expand() function that's used by
9065 dbapi.match() in cases like this. If there are multiple
9066 matches, it's often due to a new-style virtual that has
9067 been added, so try to filter those out to avoid raising
9070 root_config = self._depgraph.roots[self._root]
9072 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9073 if len(expanded_atoms) > 1:
9074 non_virtual_atoms = []
9075 for x in expanded_atoms:
9076 if not portage.dep_getkey(x).startswith("virtual/"):
9077 non_virtual_atoms.append(x)
9078 if len(non_virtual_atoms) == 1:
9079 expanded_atoms = non_virtual_atoms
9080 if len(expanded_atoms) > 1:
9081 # compatible with portage.cpv_expand()
9082 raise portage.exception.AmbiguousPackageName(
9083 [portage.dep_getkey(x) for x in expanded_atoms])
9085 atom = expanded_atoms[0]
9087 null_atom = insert_category_into_atom(atom, "null")
9088 null_cp = portage.dep_getkey(null_atom)
9089 cat, atom_pn = portage.catsplit(null_cp)
9090 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9092 # Allow the resolver to choose which virtual.
9093 atom = insert_category_into_atom(atom, "virtual")
9095 atom = insert_category_into_atom(atom, "null")
9098 def aux_get(self, cpv, wants):
9099 metadata = self._cpv_pkg_map[cpv].metadata
9100 return [metadata.get(x, "") for x in wants]
9102 class RepoDisplay(object):
9103 def __init__(self, roots):
9104 self._shown_repos = {}
9105 self._unknown_repo = False
9107 for root_config in roots.itervalues():
9108 portdir = root_config.settings.get("PORTDIR")
9110 repo_paths.add(portdir)
9111 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9113 repo_paths.update(overlays.split())
9114 repo_paths = list(repo_paths)
9115 self._repo_paths = repo_paths
9116 self._repo_paths_real = [ os.path.realpath(repo_path) \
9117 for repo_path in repo_paths ]
9119 # pre-allocate index for PORTDIR so that it always has index 0.
9120 for root_config in roots.itervalues():
9121 portdb = root_config.trees["porttree"].dbapi
9122 portdir = portdb.porttree_root
9124 self.repoStr(portdir)
9126 def repoStr(self, repo_path_real):
9129 real_index = self._repo_paths_real.index(repo_path_real)
9130 if real_index == -1:
9132 self._unknown_repo = True
9134 shown_repos = self._shown_repos
9135 repo_paths = self._repo_paths
9136 repo_path = repo_paths[real_index]
9137 index = shown_repos.get(repo_path)
9139 index = len(shown_repos)
9140 shown_repos[repo_path] = index
9146 shown_repos = self._shown_repos
9147 unknown_repo = self._unknown_repo
9148 if shown_repos or self._unknown_repo:
9149 output.append("Portage tree and overlays:\n")
9150 show_repo_paths = list(shown_repos)
9151 for repo_path, repo_index in shown_repos.iteritems():
9152 show_repo_paths[repo_index] = repo_path
9154 for index, repo_path in enumerate(show_repo_paths):
9155 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9157 output.append(" "+teal("[?]") + \
9158 " indicates that the source repository could not be determined\n")
9159 return "".join(output)
9161 class PackageCounters(object):
9171 self.blocks_satisfied = 0
9173 self.restrict_fetch = 0
9174 self.restrict_fetch_satisfied = 0
9175 self.interactive = 0
9178 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9181 myoutput.append("Total: %s package" % total_installs)
9182 if total_installs != 1:
9183 myoutput.append("s")
9184 if total_installs != 0:
9185 myoutput.append(" (")
9186 if self.upgrades > 0:
9187 details.append("%s upgrade" % self.upgrades)
9188 if self.upgrades > 1:
9190 if self.downgrades > 0:
9191 details.append("%s downgrade" % self.downgrades)
9192 if self.downgrades > 1:
9195 details.append("%s new" % self.new)
9196 if self.newslot > 0:
9197 details.append("%s in new slot" % self.newslot)
9198 if self.newslot > 1:
9201 details.append("%s reinstall" % self.reinst)
9205 details.append("%s uninstall" % self.uninst)
9208 if self.interactive > 0:
9209 details.append("%s %s" % (self.interactive,
9210 colorize("WARN", "interactive")))
9211 myoutput.append(", ".join(details))
9212 if total_installs != 0:
9213 myoutput.append(")")
9214 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9215 if self.restrict_fetch:
9216 myoutput.append("\nFetch Restriction: %s package" % \
9217 self.restrict_fetch)
9218 if self.restrict_fetch > 1:
9219 myoutput.append("s")
9220 if self.restrict_fetch_satisfied < self.restrict_fetch:
9221 myoutput.append(bad(" (%s unsatisfied)") % \
9222 (self.restrict_fetch - self.restrict_fetch_satisfied))
9224 myoutput.append("\nConflict: %s block" % \
9227 myoutput.append("s")
9228 if self.blocks_satisfied < self.blocks:
9229 myoutput.append(bad(" (%s unsatisfied)") % \
9230 (self.blocks - self.blocks_satisfied))
9231 return "".join(myoutput)
9233 class PollSelectAdapter(PollConstants):
9236 Use select to emulate a poll object, for
9237 systems that don't support poll().
9241 self._registered = {}
9242 self._select_args = [[], [], []]
9244 def register(self, fd, *args):
9246 Only POLLIN is currently supported!
9250 "register expected at most 2 arguments, got " + \
9251 repr(1 + len(args)))
9253 eventmask = PollConstants.POLLIN | \
9254 PollConstants.POLLPRI | PollConstants.POLLOUT
9258 self._registered[fd] = eventmask
9259 self._select_args = None
9261 def unregister(self, fd):
9262 self._select_args = None
9263 del self._registered[fd]
9265 def poll(self, *args):
9268 "poll expected at most 2 arguments, got " + \
9269 repr(1 + len(args)))
9275 select_args = self._select_args
9276 if select_args is None:
9277 select_args = [self._registered.keys(), [], []]
9279 if timeout is not None:
9280 select_args = select_args[:]
9281 # Translate poll() timeout args to select() timeout args:
9283 # | units | value(s) for indefinite block
9284 # ---------|--------------|------------------------------
9285 # poll | milliseconds | omitted, negative, or None
9286 # ---------|--------------|------------------------------
9287 # select | seconds | omitted
9288 # ---------|--------------|------------------------------
9290 if timeout is not None and timeout < 0:
9292 if timeout is not None:
9293 select_args.append(timeout / 1000)
9295 select_events = select.select(*select_args)
9297 for fd in select_events[0]:
9298 poll_events.append((fd, PollConstants.POLLIN))
9301 class SequentialTaskQueue(SlotObject):
9303 __slots__ = ("max_jobs", "running_tasks") + \
9304 ("_dirty", "_scheduling", "_task_queue")
9306 def __init__(self, **kwargs):
9307 SlotObject.__init__(self, **kwargs)
9308 self._task_queue = deque()
9309 self.running_tasks = set()
9310 if self.max_jobs is None:
9314 def add(self, task):
9315 self._task_queue.append(task)
9318 def addFront(self, task):
9319 self._task_queue.appendleft(task)
9330 if self._scheduling:
9331 # Ignore any recursive schedule() calls triggered via
9332 # self._task_exit().
9335 self._scheduling = True
9337 task_queue = self._task_queue
9338 running_tasks = self.running_tasks
9339 max_jobs = self.max_jobs
9340 state_changed = False
9342 while task_queue and \
9343 (max_jobs is True or len(running_tasks) < max_jobs):
9344 task = task_queue.popleft()
9345 cancelled = getattr(task, "cancelled", None)
9347 running_tasks.add(task)
9348 task.addExitListener(self._task_exit)
9350 state_changed = True
9353 self._scheduling = False
9355 return state_changed
9357 def _task_exit(self, task):
9359 Since we can always rely on exit listeners being called, the set of
9360 running tasks is always pruned automatically and there is never any need
9361 to actively prune it.
9363 self.running_tasks.remove(task)
9364 if self._task_queue:
9368 self._task_queue.clear()
9369 running_tasks = self.running_tasks
9370 while running_tasks:
9371 task = running_tasks.pop()
9372 task.removeExitListener(self._task_exit)
9376 def __nonzero__(self):
9377 return bool(self._task_queue or self.running_tasks)
9380 return len(self._task_queue) + len(self.running_tasks)
9382 _can_poll_device = None
9384 def can_poll_device():
9386 Test if it's possible to use poll() on a device such as a pty. This
9387 is known to fail on Darwin.
9389 @returns: True if poll() on a device succeeds, False otherwise.
9392 global _can_poll_device
9393 if _can_poll_device is not None:
9394 return _can_poll_device
9396 if not hasattr(select, "poll"):
9397 _can_poll_device = False
9398 return _can_poll_device
9401 dev_null = open('/dev/null', 'rb')
9403 _can_poll_device = False
9404 return _can_poll_device
9407 p.register(dev_null.fileno(), PollConstants.POLLIN)
9409 invalid_request = False
9410 for f, event in p.poll():
9411 if event & PollConstants.POLLNVAL:
9412 invalid_request = True
9416 _can_poll_device = not invalid_request
9417 return _can_poll_device
9419 def create_poll_instance():
9421 Create an instance of select.poll, or an instance of
9422 PollSelectAdapter there is no poll() implementation or
9423 it is broken somehow.
9425 if can_poll_device():
9426 return select.poll()
9427 return PollSelectAdapter()
9429 getloadavg = getattr(os, "getloadavg", None)
9430 if getloadavg is None:
9433 Uses /proc/loadavg to emulate os.getloadavg().
9434 Raises OSError if the load average was unobtainable.
9437 loadavg_str = open('/proc/loadavg').readline()
9439 # getloadavg() is only supposed to raise OSError, so convert
9440 raise OSError('unknown')
9441 loadavg_split = loadavg_str.split()
9442 if len(loadavg_split) < 3:
9443 raise OSError('unknown')
9447 loadavg_floats.append(float(loadavg_split[i]))
9449 raise OSError('unknown')
9450 return tuple(loadavg_floats)
9452 class PollScheduler(object):
9454 class _sched_iface_class(SlotObject):
9455 __slots__ = ("register", "schedule", "unregister")
9459 self._max_load = None
9461 self._poll_event_queue = []
9462 self._poll_event_handlers = {}
9463 self._poll_event_handler_ids = {}
9464 # Increment id for each new handler.
9465 self._event_handler_id = 0
9466 self._poll_obj = create_poll_instance()
9467 self._scheduling = False
9469 def _schedule(self):
9471 Calls _schedule_tasks() and automatically returns early from
9472 any recursive calls to this method that the _schedule_tasks()
9473 call might trigger. This makes _schedule() safe to call from
9474 inside exit listeners.
9476 if self._scheduling:
9478 self._scheduling = True
9480 return self._schedule_tasks()
9482 self._scheduling = False
9484 def _running_job_count(self):
9487 def _can_add_job(self):
9488 max_jobs = self._max_jobs
9489 max_load = self._max_load
9491 if self._max_jobs is not True and \
9492 self._running_job_count() >= self._max_jobs:
9495 if max_load is not None and \
9496 (max_jobs is True or max_jobs > 1) and \
9497 self._running_job_count() >= 1:
9499 avg1, avg5, avg15 = getloadavg()
9503 if avg1 >= max_load:
9508 def _poll(self, timeout=None):
9510 All poll() calls pass through here. The poll events
9511 are added directly to self._poll_event_queue.
9512 In order to avoid endless blocking, this raises
9513 StopIteration if timeout is None and there are
9514 no file descriptors to poll.
9516 if not self._poll_event_handlers:
9518 if timeout is None and \
9519 not self._poll_event_handlers:
9520 raise StopIteration(
9521 "timeout is None and there are no poll() event handlers")
9523 # The following error is known to occur with Linux kernel versions
9526 # select.error: (4, 'Interrupted system call')
9528 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9529 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9530 # without any events.
9533 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9535 except select.error, e:
9536 writemsg_level("\n!!! select error: %s\n" % (e,),
9537 level=logging.ERROR, noiselevel=-1)
9539 if timeout is not None:
9542 def _next_poll_event(self, timeout=None):
9544 Since the _schedule_wait() loop is called by event
9545 handlers from _poll_loop(), maintain a central event
9546 queue for both of them to share events from a single
9547 poll() call. In order to avoid endless blocking, this
9548 raises StopIteration if timeout is None and there are
9549 no file descriptors to poll.
9551 if not self._poll_event_queue:
9553 return self._poll_event_queue.pop()
9555 def _poll_loop(self):
9557 event_handlers = self._poll_event_handlers
9558 event_handled = False
9561 while event_handlers:
9562 f, event = self._next_poll_event()
9563 handler, reg_id = event_handlers[f]
9565 event_handled = True
9566 except StopIteration:
9567 event_handled = True
9569 if not event_handled:
9570 raise AssertionError("tight loop")
9572 def _schedule_yield(self):
9574 Schedule for a short period of time chosen by the scheduler based
9575 on internal state. Synchronous tasks should call this periodically
9576 in order to allow the scheduler to service pending poll events. The
9577 scheduler will call poll() exactly once, without blocking, and any
9578 resulting poll events will be serviced.
9580 event_handlers = self._poll_event_handlers
9583 if not event_handlers:
9584 return bool(events_handled)
9586 if not self._poll_event_queue:
9590 while event_handlers and self._poll_event_queue:
9591 f, event = self._next_poll_event()
9592 handler, reg_id = event_handlers[f]
9595 except StopIteration:
9598 return bool(events_handled)
9600 def _register(self, f, eventmask, handler):
9603 @return: A unique registration id, for use in schedule() or
9606 if f in self._poll_event_handlers:
9607 raise AssertionError("fd %d is already registered" % f)
9608 self._event_handler_id += 1
9609 reg_id = self._event_handler_id
9610 self._poll_event_handler_ids[reg_id] = f
9611 self._poll_event_handlers[f] = (handler, reg_id)
9612 self._poll_obj.register(f, eventmask)
9615 def _unregister(self, reg_id):
9616 f = self._poll_event_handler_ids[reg_id]
9617 self._poll_obj.unregister(f)
9618 del self._poll_event_handlers[f]
9619 del self._poll_event_handler_ids[reg_id]
9621 def _schedule_wait(self, wait_ids):
9623 Schedule until wait_id is not longer registered
9626 @param wait_id: a task id to wait for
9628 event_handlers = self._poll_event_handlers
9629 handler_ids = self._poll_event_handler_ids
9630 event_handled = False
9632 if isinstance(wait_ids, int):
9633 wait_ids = frozenset([wait_ids])
9636 while wait_ids.intersection(handler_ids):
9637 f, event = self._next_poll_event()
9638 handler, reg_id = event_handlers[f]
9640 event_handled = True
9641 except StopIteration:
9642 event_handled = True
9644 return event_handled
9646 class QueueScheduler(PollScheduler):
9649 Add instances of SequentialTaskQueue and then call run(). The
9650 run() method returns when no tasks remain.
9653 def __init__(self, max_jobs=None, max_load=None):
9654 PollScheduler.__init__(self)
9656 if max_jobs is None:
9659 self._max_jobs = max_jobs
9660 self._max_load = max_load
9661 self.sched_iface = self._sched_iface_class(
9662 register=self._register,
9663 schedule=self._schedule_wait,
9664 unregister=self._unregister)
9667 self._schedule_listeners = []
9670 self._queues.append(q)
9672 def remove(self, q):
9673 self._queues.remove(q)
9677 while self._schedule():
9680 while self._running_job_count():
9683 def _schedule_tasks(self):
9686 @returns: True if there may be remaining tasks to schedule,
9689 while self._can_add_job():
9690 n = self._max_jobs - self._running_job_count()
9694 if not self._start_next_job(n):
9697 for q in self._queues:
9702 def _running_job_count(self):
9704 for q in self._queues:
9705 job_count += len(q.running_tasks)
9706 self._jobs = job_count
9709 def _start_next_job(self, n=1):
9711 for q in self._queues:
9712 initial_job_count = len(q.running_tasks)
9714 final_job_count = len(q.running_tasks)
9715 if final_job_count > initial_job_count:
9716 started_count += (final_job_count - initial_job_count)
9717 if started_count >= n:
9719 return started_count
9721 class TaskScheduler(object):
9724 A simple way to handle scheduling of AsynchrousTask instances. Simply
9725 add tasks and call run(). The run() method returns when no tasks remain.
9728 def __init__(self, max_jobs=None, max_load=None):
9729 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9730 self._scheduler = QueueScheduler(
9731 max_jobs=max_jobs, max_load=max_load)
9732 self.sched_iface = self._scheduler.sched_iface
9733 self.run = self._scheduler.run
9734 self._scheduler.add(self._queue)
9736 def add(self, task):
9737 self._queue.add(task)
9739 class JobStatusDisplay(object):
9741 _bound_properties = ("curval", "failed", "running")
9742 _jobs_column_width = 48
9744 # Don't update the display unless at least this much
9745 # time has passed, in units of seconds.
9746 _min_display_latency = 2
9748 _default_term_codes = {
9754 _termcap_name_map = {
9755 'carriage_return' : 'cr',
9760 def __init__(self, out=sys.stdout, quiet=False):
9761 object.__setattr__(self, "out", out)
9762 object.__setattr__(self, "quiet", quiet)
9763 object.__setattr__(self, "maxval", 0)
9764 object.__setattr__(self, "merges", 0)
9765 object.__setattr__(self, "_changed", False)
9766 object.__setattr__(self, "_displayed", False)
9767 object.__setattr__(self, "_last_display_time", 0)
9768 object.__setattr__(self, "width", 80)
9771 isatty = hasattr(out, "isatty") and out.isatty()
9772 object.__setattr__(self, "_isatty", isatty)
9773 if not isatty or not self._init_term():
9775 for k, capname in self._termcap_name_map.iteritems():
9776 term_codes[k] = self._default_term_codes[capname]
9777 object.__setattr__(self, "_term_codes", term_codes)
9778 encoding = sys.getdefaultencoding()
9779 for k, v in self._term_codes.items():
9780 if not isinstance(v, basestring):
9781 self._term_codes[k] = v.decode(encoding, 'replace')
9783 def _init_term(self):
9785 Initialize term control codes.
9787 @returns: True if term codes were successfully initialized,
9791 term_type = os.environ.get("TERM", "vt100")
9797 curses.setupterm(term_type, self.out.fileno())
9798 tigetstr = curses.tigetstr
9799 except curses.error:
9804 if tigetstr is None:
9808 for k, capname in self._termcap_name_map.iteritems():
9809 code = tigetstr(capname)
9811 code = self._default_term_codes[capname]
9812 term_codes[k] = code
9813 object.__setattr__(self, "_term_codes", term_codes)
9816 def _format_msg(self, msg):
9817 return ">>> %s" % msg
9821 self._term_codes['carriage_return'] + \
9822 self._term_codes['clr_eol'])
9824 self._displayed = False
9826 def _display(self, line):
9827 self.out.write(line)
9829 self._displayed = True
9831 def _update(self, msg):
9834 if not self._isatty:
9835 out.write(self._format_msg(msg) + self._term_codes['newline'])
9837 self._displayed = True
9843 self._display(self._format_msg(msg))
9845 def displayMessage(self, msg):
9847 was_displayed = self._displayed
9849 if self._isatty and self._displayed:
9852 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9854 self._displayed = False
9857 self._changed = True
9863 for name in self._bound_properties:
9864 object.__setattr__(self, name, 0)
9867 self.out.write(self._term_codes['newline'])
9869 self._displayed = False
9871 def __setattr__(self, name, value):
9872 old_value = getattr(self, name)
9873 if value == old_value:
9875 object.__setattr__(self, name, value)
9876 if name in self._bound_properties:
9877 self._property_change(name, old_value, value)
9879 def _property_change(self, name, old_value, new_value):
9880 self._changed = True
9883 def _load_avg_str(self):
9898 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9902 Display status on stdout, but only if something has
9903 changed since the last call.
9909 current_time = time.time()
9910 time_delta = current_time - self._last_display_time
9911 if self._displayed and \
9913 if not self._isatty:
9915 if time_delta < self._min_display_latency:
9918 self._last_display_time = current_time
9919 self._changed = False
9920 self._display_status()
9922 def _display_status(self):
9923 # Don't use len(self._completed_tasks) here since that also
9924 # can include uninstall tasks.
9925 curval_str = str(self.curval)
9926 maxval_str = str(self.maxval)
9927 running_str = str(self.running)
9928 failed_str = str(self.failed)
9929 load_avg_str = self._load_avg_str()
9931 color_output = StringIO()
9932 plain_output = StringIO()
9933 style_file = portage.output.ConsoleStyleFile(color_output)
9934 style_file.write_listener = plain_output
9935 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9936 style_writer.style_listener = style_file.new_styles
9937 f = formatter.AbstractFormatter(style_writer)
9939 number_style = "INFORM"
9940 f.add_literal_data("Jobs: ")
9941 f.push_style(number_style)
9942 f.add_literal_data(curval_str)
9944 f.add_literal_data(" of ")
9945 f.push_style(number_style)
9946 f.add_literal_data(maxval_str)
9948 f.add_literal_data(" complete")
9951 f.add_literal_data(", ")
9952 f.push_style(number_style)
9953 f.add_literal_data(running_str)
9955 f.add_literal_data(" running")
9958 f.add_literal_data(", ")
9959 f.push_style(number_style)
9960 f.add_literal_data(failed_str)
9962 f.add_literal_data(" failed")
9964 padding = self._jobs_column_width - len(plain_output.getvalue())
9966 f.add_literal_data(padding * " ")
9968 f.add_literal_data("Load avg: ")
9969 f.add_literal_data(load_avg_str)
9971 # Truncate to fit width, to avoid making the terminal scroll if the
9972 # line overflows (happens when the load average is large).
9973 plain_output = plain_output.getvalue()
9974 if self._isatty and len(plain_output) > self.width:
9975 # Use plain_output here since it's easier to truncate
9976 # properly than the color output which contains console
9978 self._update(plain_output[:self.width])
9980 self._update(color_output.getvalue())
9982 xtermTitle(" ".join(plain_output.split()))
9984 class Scheduler(PollScheduler):
9986 _opts_ignore_blockers = \
9987 frozenset(["--buildpkgonly",
9988 "--fetchonly", "--fetch-all-uri",
9989 "--nodeps", "--pretend"])
9991 _opts_no_background = \
9992 frozenset(["--pretend",
9993 "--fetchonly", "--fetch-all-uri"])
9995 _opts_no_restart = frozenset(["--buildpkgonly",
9996 "--fetchonly", "--fetch-all-uri", "--pretend"])
9998 _bad_resume_opts = set(["--ask", "--changelog",
9999 "--resume", "--skipfirst"])
10001 _fetch_log = "/var/log/emerge-fetch.log"
10003 class _iface_class(SlotObject):
10004 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10005 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10006 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10009 class _fetch_iface_class(SlotObject):
10010 __slots__ = ("log_file", "schedule")
10012 _task_queues_class = slot_dict_class(
10013 ("merge", "jobs", "fetch", "unpack"), prefix="")
10015 class _build_opts_class(SlotObject):
10016 __slots__ = ("buildpkg", "buildpkgonly",
10017 "fetch_all_uri", "fetchonly", "pretend")
10019 class _binpkg_opts_class(SlotObject):
10020 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10022 class _pkg_count_class(SlotObject):
10023 __slots__ = ("curval", "maxval")
10025 class _emerge_log_class(SlotObject):
10026 __slots__ = ("xterm_titles",)
10028 def log(self, *pargs, **kwargs):
10029 if not self.xterm_titles:
10030 # Avoid interference with the scheduler's status display.
10031 kwargs.pop("short_msg", None)
10032 emergelog(self.xterm_titles, *pargs, **kwargs)
10034 class _failed_pkg(SlotObject):
10035 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10037 class _ConfigPool(object):
10038 """Interface for a task to temporarily allocate a config
10039 instance from a pool. This allows a task to be constructed
10040 long before the config instance actually becomes needed, like
10041 when prefetchers are constructed for the whole merge list."""
10042 __slots__ = ("_root", "_allocate", "_deallocate")
10043 def __init__(self, root, allocate, deallocate):
10045 self._allocate = allocate
10046 self._deallocate = deallocate
10047 def allocate(self):
10048 return self._allocate(self._root)
10049 def deallocate(self, settings):
10050 self._deallocate(settings)
10052 class _unknown_internal_error(portage.exception.PortageException):
10054 Used internally to terminate scheduling. The specific reason for
10055 the failure should have been dumped to stderr.
10057 def __init__(self, value=""):
10058 portage.exception.PortageException.__init__(self, value)
10060 def __init__(self, settings, trees, mtimedb, myopts,
10061 spinner, mergelist, favorites, digraph):
10062 PollScheduler.__init__(self)
10063 self.settings = settings
10064 self.target_root = settings["ROOT"]
10066 self.myopts = myopts
10067 self._spinner = spinner
10068 self._mtimedb = mtimedb
10069 self._mergelist = mergelist
10070 self._favorites = favorites
10071 self._args_set = InternalPackageSet(favorites)
10072 self._build_opts = self._build_opts_class()
10073 for k in self._build_opts.__slots__:
10074 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10075 self._binpkg_opts = self._binpkg_opts_class()
10076 for k in self._binpkg_opts.__slots__:
10077 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10080 self._logger = self._emerge_log_class()
10081 self._task_queues = self._task_queues_class()
10082 for k in self._task_queues.allowed_keys:
10083 setattr(self._task_queues, k,
10084 SequentialTaskQueue())
10086 # Holds merges that will wait to be executed when no builds are
10087 # executing. This is useful for system packages since dependencies
10088 # on system packages are frequently unspecified.
10089 self._merge_wait_queue = []
10090 # Holds merges that have been transfered from the merge_wait_queue to
10091 # the actual merge queue. They are removed from this list upon
10092 # completion. Other packages can start building only when this list is
10094 self._merge_wait_scheduled = []
10096 # Holds system packages and their deep runtime dependencies. Before
10097 # being merged, these packages go to merge_wait_queue, to be merged
10098 # when no other packages are building.
10099 self._deep_system_deps = set()
10101 # Holds packages to merge which will satisfy currently unsatisfied
10102 # deep runtime dependencies of system packages. If this is not empty
10103 # then no parallel builds will be spawned until it is empty. This
10104 # minimizes the possibility that a build will fail due to the system
10105 # being in a fragile state. For example, see bug #259954.
10106 self._unsatisfied_system_deps = set()
10108 self._status_display = JobStatusDisplay()
10109 self._max_load = myopts.get("--load-average")
10110 max_jobs = myopts.get("--jobs")
10111 if max_jobs is None:
10113 self._set_max_jobs(max_jobs)
10115 # The root where the currently running
10116 # portage instance is installed.
10117 self._running_root = trees["/"]["root_config"]
10119 if settings.get("PORTAGE_DEBUG", "") == "1":
10121 self.pkgsettings = {}
10122 self._config_pool = {}
10123 self._blocker_db = {}
10125 self._config_pool[root] = []
10126 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10128 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10129 schedule=self._schedule_fetch)
10130 self._sched_iface = self._iface_class(
10131 dblinkEbuildPhase=self._dblink_ebuild_phase,
10132 dblinkDisplayMerge=self._dblink_display_merge,
10133 dblinkElog=self._dblink_elog,
10134 dblinkEmergeLog=self._dblink_emerge_log,
10135 fetch=fetch_iface, register=self._register,
10136 schedule=self._schedule_wait,
10137 scheduleSetup=self._schedule_setup,
10138 scheduleUnpack=self._schedule_unpack,
10139 scheduleYield=self._schedule_yield,
10140 unregister=self._unregister)
10142 self._prefetchers = weakref.WeakValueDictionary()
10143 self._pkg_queue = []
10144 self._completed_tasks = set()
10146 self._failed_pkgs = []
10147 self._failed_pkgs_all = []
10148 self._failed_pkgs_die_msgs = []
10149 self._post_mod_echo_msgs = []
10150 self._parallel_fetch = False
10151 merge_count = len([x for x in mergelist \
10152 if isinstance(x, Package) and x.operation == "merge"])
10153 self._pkg_count = self._pkg_count_class(
10154 curval=0, maxval=merge_count)
10155 self._status_display.maxval = self._pkg_count.maxval
10157 # The load average takes some time to respond when new
10158 # jobs are added, so we need to limit the rate of adding
10160 self._job_delay_max = 10
10161 self._job_delay_factor = 1.0
10162 self._job_delay_exp = 1.5
10163 self._previous_job_start_time = None
10165 self._set_digraph(digraph)
10167 # This is used to memoize the _choose_pkg() result when
10168 # no packages can be chosen until one of the existing
10170 self._choose_pkg_return_early = False
10172 features = self.settings.features
10173 if "parallel-fetch" in features and \
10174 not ("--pretend" in self.myopts or \
10175 "--fetch-all-uri" in self.myopts or \
10176 "--fetchonly" in self.myopts):
10177 if "distlocks" not in features:
10178 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10179 portage.writemsg(red("!!!")+" parallel-fetching " + \
10180 "requires the distlocks feature enabled"+"\n",
10182 portage.writemsg(red("!!!")+" you have it disabled, " + \
10183 "thus parallel-fetching is being disabled"+"\n",
10185 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10186 elif len(mergelist) > 1:
10187 self._parallel_fetch = True
10189 if self._parallel_fetch:
10190 # clear out existing fetch log if it exists
10192 open(self._fetch_log, 'w')
10193 except EnvironmentError:
10196 self._running_portage = None
10197 portage_match = self._running_root.trees["vartree"].dbapi.match(
10198 portage.const.PORTAGE_PACKAGE_ATOM)
10200 cpv = portage_match.pop()
10201 self._running_portage = self._pkg(cpv, "installed",
10202 self._running_root, installed=True)
10204 def _poll(self, timeout=None):
10206 PollScheduler._poll(self, timeout=timeout)
10208 def _set_max_jobs(self, max_jobs):
10209 self._max_jobs = max_jobs
10210 self._task_queues.jobs.max_jobs = max_jobs
10212 def _background_mode(self):
10214 Check if background mode is enabled and adjust states as necessary.
10217 @returns: True if background mode is enabled, False otherwise.
10219 background = (self._max_jobs is True or \
10220 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10221 not bool(self._opts_no_background.intersection(self.myopts))
10224 interactive_tasks = self._get_interactive_tasks()
10225 if interactive_tasks:
10227 writemsg_level(">>> Sending package output to stdio due " + \
10228 "to interactive package(s):\n",
10229 level=logging.INFO, noiselevel=-1)
10231 for pkg in interactive_tasks:
10232 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10233 if pkg.root != "/":
10234 pkg_str += " for " + pkg.root
10235 msg.append(pkg_str)
10237 writemsg_level("".join("%s\n" % (l,) for l in msg),
10238 level=logging.INFO, noiselevel=-1)
10239 if self._max_jobs is True or self._max_jobs > 1:
10240 self._set_max_jobs(1)
10241 writemsg_level(">>> Setting --jobs=1 due " + \
10242 "to the above interactive package(s)\n",
10243 level=logging.INFO, noiselevel=-1)
10245 self._status_display.quiet = \
10246 not background or \
10247 ("--quiet" in self.myopts and \
10248 "--verbose" not in self.myopts)
10250 self._logger.xterm_titles = \
10251 "notitles" not in self.settings.features and \
10252 self._status_display.quiet
10256 def _get_interactive_tasks(self):
10257 from portage import flatten
10258 from portage.dep import use_reduce, paren_reduce
10259 interactive_tasks = []
10260 for task in self._mergelist:
10261 if not (isinstance(task, Package) and \
10262 task.operation == "merge"):
10265 properties = flatten(use_reduce(paren_reduce(
10266 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10267 except portage.exception.InvalidDependString, e:
10268 show_invalid_depstring_notice(task,
10269 task.metadata["PROPERTIES"], str(e))
10270 raise self._unknown_internal_error()
10271 if "interactive" in properties:
10272 interactive_tasks.append(task)
10273 return interactive_tasks
10275 def _set_digraph(self, digraph):
10276 if "--nodeps" in self.myopts or \
10277 (self._max_jobs is not True and self._max_jobs < 2):
10279 self._digraph = None
10282 self._digraph = digraph
10283 self._find_system_deps()
10284 self._prune_digraph()
10285 self._prevent_builddir_collisions()
10287 def _find_system_deps(self):
10289 Find system packages and their deep runtime dependencies. Before being
10290 merged, these packages go to merge_wait_queue, to be merged when no
10291 other packages are building.
10293 deep_system_deps = self._deep_system_deps
10294 deep_system_deps.clear()
10295 deep_system_deps.update(
10296 _find_deep_system_runtime_deps(self._digraph))
10297 deep_system_deps.difference_update([pkg for pkg in \
10298 deep_system_deps if pkg.operation != "merge"])
10300 def _prune_digraph(self):
10302 Prune any root nodes that are irrelevant.
10305 graph = self._digraph
10306 completed_tasks = self._completed_tasks
10307 removed_nodes = set()
10309 for node in graph.root_nodes():
10310 if not isinstance(node, Package) or \
10311 (node.installed and node.operation == "nomerge") or \
10313 node in completed_tasks:
10314 removed_nodes.add(node)
10316 graph.difference_update(removed_nodes)
10317 if not removed_nodes:
10319 removed_nodes.clear()
10321 def _prevent_builddir_collisions(self):
10323 When building stages, sometimes the same exact cpv needs to be merged
10324 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10325 in the builddir. Currently, normal file locks would be inappropriate
10326 for this purpose since emerge holds all of it's build dir locks from
10330 for pkg in self._mergelist:
10331 if not isinstance(pkg, Package):
10332 # a satisfied blocker
10336 if pkg.cpv not in cpv_map:
10337 cpv_map[pkg.cpv] = [pkg]
10339 for earlier_pkg in cpv_map[pkg.cpv]:
10340 self._digraph.add(earlier_pkg, pkg,
10341 priority=DepPriority(buildtime=True))
10342 cpv_map[pkg.cpv].append(pkg)
10344 class _pkg_failure(portage.exception.PortageException):
10346 An instance of this class is raised by unmerge() when
10347 an uninstallation fails.
10350 def __init__(self, *pargs):
10351 portage.exception.PortageException.__init__(self, pargs)
10353 self.status = pargs[0]
10355 def _schedule_fetch(self, fetcher):
10357 Schedule a fetcher on the fetch queue, in order to
10358 serialize access to the fetch log.
10360 self._task_queues.fetch.addFront(fetcher)
10362 def _schedule_setup(self, setup_phase):
10364 Schedule a setup phase on the merge queue, in order to
10365 serialize unsandboxed access to the live filesystem.
10367 self._task_queues.merge.addFront(setup_phase)
10370 def _schedule_unpack(self, unpack_phase):
10372 Schedule an unpack phase on the unpack queue, in order
10373 to serialize $DISTDIR access for live ebuilds.
10375 self._task_queues.unpack.add(unpack_phase)
10377 def _find_blockers(self, new_pkg):
10379 Returns a callable which should be called only when
10380 the vdb lock has been acquired.
10382 def get_blockers():
10383 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10384 return get_blockers
10386 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10387 if self._opts_ignore_blockers.intersection(self.myopts):
10390 # Call gc.collect() here to avoid heap overflow that
10391 # triggers 'Cannot allocate memory' errors (reported
10392 # with python-2.5).
10396 blocker_db = self._blocker_db[new_pkg.root]
10398 blocker_dblinks = []
10399 for blocking_pkg in blocker_db.findInstalledBlockers(
10400 new_pkg, acquire_lock=acquire_lock):
10401 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10403 if new_pkg.cpv == blocking_pkg.cpv:
10405 blocker_dblinks.append(portage.dblink(
10406 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10407 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10408 vartree=self.trees[blocking_pkg.root]["vartree"]))
10412 return blocker_dblinks
10414 def _dblink_pkg(self, pkg_dblink):
10415 cpv = pkg_dblink.mycpv
10416 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10417 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10418 installed = type_name == "installed"
10419 return self._pkg(cpv, type_name, root_config, installed=installed)
10421 def _append_to_log_path(self, log_path, msg):
10422 f = open(log_path, 'a')
10428 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10430 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10433 background = self._background
10435 if background and log_path is not None:
10436 log_file = open(log_path, 'a')
10441 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10443 if log_file is not None:
10446 def _dblink_emerge_log(self, msg):
10447 self._logger.log(msg)
10449 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10450 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10451 background = self._background
10453 if log_path is None:
10454 if not (background and level < logging.WARN):
10455 portage.util.writemsg_level(msg,
10456 level=level, noiselevel=noiselevel)
10459 portage.util.writemsg_level(msg,
10460 level=level, noiselevel=noiselevel)
10461 self._append_to_log_path(log_path, msg)
10463 def _dblink_ebuild_phase(self,
10464 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10466 Using this callback for merge phases allows the scheduler
10467 to run while these phases execute asynchronously, and allows
10468 the scheduler control output handling.
10471 scheduler = self._sched_iface
10472 settings = pkg_dblink.settings
10473 pkg = self._dblink_pkg(pkg_dblink)
10474 background = self._background
10475 log_path = settings.get("PORTAGE_LOG_FILE")
10477 ebuild_phase = EbuildPhase(background=background,
10478 pkg=pkg, phase=phase, scheduler=scheduler,
10479 settings=settings, tree=pkg_dblink.treetype)
10480 ebuild_phase.start()
10481 ebuild_phase.wait()
10483 return ebuild_phase.returncode
10485 def _generate_digests(self):
10487 Generate digests if necessary for --digests or FEATURES=digest.
10488 In order to avoid interference, this must done before parallel
10492 if '--fetchonly' in self.myopts:
10495 digest = '--digest' in self.myopts
10497 for pkgsettings in self.pkgsettings.itervalues():
10498 if 'digest' in pkgsettings.features:
10505 for x in self._mergelist:
10506 if not isinstance(x, Package) or \
10507 x.type_name != 'ebuild' or \
10508 x.operation != 'merge':
10510 pkgsettings = self.pkgsettings[x.root]
10511 if '--digest' not in self.myopts and \
10512 'digest' not in pkgsettings.features:
10514 portdb = x.root_config.trees['porttree'].dbapi
10515 ebuild_path = portdb.findname(x.cpv)
10516 if not ebuild_path:
10518 "!!! Could not locate ebuild for '%s'.\n" \
10519 % x.cpv, level=logging.ERROR, noiselevel=-1)
10521 pkgsettings['O'] = os.path.dirname(ebuild_path)
10522 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10524 "!!! Unable to generate manifest for '%s'.\n" \
10525 % x.cpv, level=logging.ERROR, noiselevel=-1)
10530 def _check_manifests(self):
10531 # Verify all the manifests now so that the user is notified of failure
10532 # as soon as possible.
10533 if "strict" not in self.settings.features or \
10534 "--fetchonly" in self.myopts or \
10535 "--fetch-all-uri" in self.myopts:
10538 shown_verifying_msg = False
10539 quiet_settings = {}
10540 for myroot, pkgsettings in self.pkgsettings.iteritems():
10541 quiet_config = portage.config(clone=pkgsettings)
10542 quiet_config["PORTAGE_QUIET"] = "1"
10543 quiet_config.backup_changes("PORTAGE_QUIET")
10544 quiet_settings[myroot] = quiet_config
10547 for x in self._mergelist:
10548 if not isinstance(x, Package) or \
10549 x.type_name != "ebuild":
10552 if not shown_verifying_msg:
10553 shown_verifying_msg = True
10554 self._status_msg("Verifying ebuild manifests")
10556 root_config = x.root_config
10557 portdb = root_config.trees["porttree"].dbapi
10558 quiet_config = quiet_settings[root_config.root]
10559 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10560 if not portage.digestcheck([], quiet_config, strict=True):
10565 def _add_prefetchers(self):
10567 if not self._parallel_fetch:
10570 if self._parallel_fetch:
10571 self._status_msg("Starting parallel fetch")
10573 prefetchers = self._prefetchers
10574 getbinpkg = "--getbinpkg" in self.myopts
10576 # In order to avoid "waiting for lock" messages
10577 # at the beginning, which annoy users, never
10578 # spawn a prefetcher for the first package.
10579 for pkg in self._mergelist[1:]:
10580 prefetcher = self._create_prefetcher(pkg)
10581 if prefetcher is not None:
10582 self._task_queues.fetch.add(prefetcher)
10583 prefetchers[pkg] = prefetcher
10585 def _create_prefetcher(self, pkg):
10587 @return: a prefetcher, or None if not applicable
10591 if not isinstance(pkg, Package):
10594 elif pkg.type_name == "ebuild":
10596 prefetcher = EbuildFetcher(background=True,
10597 config_pool=self._ConfigPool(pkg.root,
10598 self._allocate_config, self._deallocate_config),
10599 fetchonly=1, logfile=self._fetch_log,
10600 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10602 elif pkg.type_name == "binary" and \
10603 "--getbinpkg" in self.myopts and \
10604 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10606 prefetcher = BinpkgPrefetcher(background=True,
10607 pkg=pkg, scheduler=self._sched_iface)
10611 def _is_restart_scheduled(self):
10613 Check if the merge list contains a replacement
10614 for the current running instance, that will result
10615 in restart after merge.
10617 @returns: True if a restart is scheduled, False otherwise.
10619 if self._opts_no_restart.intersection(self.myopts):
10622 mergelist = self._mergelist
10624 for i, pkg in enumerate(mergelist):
10625 if self._is_restart_necessary(pkg) and \
10626 i != len(mergelist) - 1:
10631 def _is_restart_necessary(self, pkg):
10633 @return: True if merging the given package
10634 requires restart, False otherwise.
10637 # Figure out if we need a restart.
10638 if pkg.root == self._running_root.root and \
10639 portage.match_from_list(
10640 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10641 if self._running_portage:
10642 return pkg.cpv != self._running_portage.cpv
10646 def _restart_if_necessary(self, pkg):
10648 Use execv() to restart emerge. This happens
10649 if portage upgrades itself and there are
10650 remaining packages in the list.
10653 if self._opts_no_restart.intersection(self.myopts):
10656 if not self._is_restart_necessary(pkg):
10659 if pkg == self._mergelist[-1]:
10662 self._main_loop_cleanup()
10664 logger = self._logger
10665 pkg_count = self._pkg_count
10666 mtimedb = self._mtimedb
10667 bad_resume_opts = self._bad_resume_opts
10669 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10670 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10672 logger.log(" *** RESTARTING " + \
10673 "emerge via exec() after change of " + \
10674 "portage version.")
10676 mtimedb["resume"]["mergelist"].remove(list(pkg))
10678 portage.run_exitfuncs()
10679 mynewargv = [sys.argv[0], "--resume"]
10680 resume_opts = self.myopts.copy()
10681 # For automatic resume, we need to prevent
10682 # any of bad_resume_opts from leaking in
10683 # via EMERGE_DEFAULT_OPTS.
10684 resume_opts["--ignore-default-opts"] = True
10685 for myopt, myarg in resume_opts.iteritems():
10686 if myopt not in bad_resume_opts:
10688 mynewargv.append(myopt)
10690 mynewargv.append(myopt +"="+ str(myarg))
10691 # priority only needs to be adjusted on the first run
10692 os.environ["PORTAGE_NICENESS"] = "0"
10693 os.execv(mynewargv[0], mynewargv)
10697 if "--resume" in self.myopts:
10699 portage.writemsg_stdout(
10700 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10701 self._logger.log(" *** Resuming merge...")
10703 self._save_resume_list()
10706 self._background = self._background_mode()
10707 except self._unknown_internal_error:
10710 for root in self.trees:
10711 root_config = self.trees[root]["root_config"]
10713 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10714 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10715 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10716 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10717 if not tmpdir or not os.path.isdir(tmpdir):
10718 msg = "The directory specified in your " + \
10719 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10720 "does not exist. Please create this " + \
10721 "directory or correct your PORTAGE_TMPDIR setting."
10722 msg = textwrap.wrap(msg, 70)
10723 out = portage.output.EOutput()
10728 if self._background:
10729 root_config.settings.unlock()
10730 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10731 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10732 root_config.settings.lock()
10734 self.pkgsettings[root] = portage.config(
10735 clone=root_config.settings)
10737 rval = self._generate_digests()
10738 if rval != os.EX_OK:
10741 rval = self._check_manifests()
10742 if rval != os.EX_OK:
10745 keep_going = "--keep-going" in self.myopts
10746 fetchonly = self._build_opts.fetchonly
10747 mtimedb = self._mtimedb
10748 failed_pkgs = self._failed_pkgs
10751 rval = self._merge()
10752 if rval == os.EX_OK or fetchonly or not keep_going:
10754 if "resume" not in mtimedb:
10756 mergelist = self._mtimedb["resume"].get("mergelist")
10760 if not failed_pkgs:
10763 for failed_pkg in failed_pkgs:
10764 mergelist.remove(list(failed_pkg.pkg))
10766 self._failed_pkgs_all.extend(failed_pkgs)
10772 if not self._calc_resume_list():
10775 clear_caches(self.trees)
10776 if not self._mergelist:
10779 self._save_resume_list()
10780 self._pkg_count.curval = 0
10781 self._pkg_count.maxval = len([x for x in self._mergelist \
10782 if isinstance(x, Package) and x.operation == "merge"])
10783 self._status_display.maxval = self._pkg_count.maxval
10785 self._logger.log(" *** Finished. Cleaning up...")
10788 self._failed_pkgs_all.extend(failed_pkgs)
10791 background = self._background
10792 failure_log_shown = False
10793 if background and len(self._failed_pkgs_all) == 1:
10794 # If only one package failed then just show it's
10795 # whole log for easy viewing.
10796 failed_pkg = self._failed_pkgs_all[-1]
10797 build_dir = failed_pkg.build_dir
10800 log_paths = [failed_pkg.build_log]
10802 log_path = self._locate_failure_log(failed_pkg)
10803 if log_path is not None:
10805 log_file = open(log_path)
10809 if log_file is not None:
10811 for line in log_file:
10812 writemsg_level(line, noiselevel=-1)
10815 failure_log_shown = True
10817 # Dump mod_echo output now since it tends to flood the terminal.
10818 # This allows us to avoid having more important output, generated
10819 # later, from being swept away by the mod_echo output.
10820 mod_echo_output = _flush_elog_mod_echo()
10822 if background and not failure_log_shown and \
10823 self._failed_pkgs_all and \
10824 self._failed_pkgs_die_msgs and \
10825 not mod_echo_output:
10827 printer = portage.output.EOutput()
10828 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10830 if mysettings["ROOT"] != "/":
10831 root_msg = " merged to %s" % mysettings["ROOT"]
10833 printer.einfo("Error messages for package %s%s:" % \
10834 (colorize("INFORM", key), root_msg))
10836 for phase in portage.const.EBUILD_PHASES:
10837 if phase not in logentries:
10839 for msgtype, msgcontent in logentries[phase]:
10840 if isinstance(msgcontent, basestring):
10841 msgcontent = [msgcontent]
10842 for line in msgcontent:
10843 printer.eerror(line.strip("\n"))
10845 if self._post_mod_echo_msgs:
10846 for msg in self._post_mod_echo_msgs:
10849 if len(self._failed_pkgs_all) > 1 or \
10850 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10851 if len(self._failed_pkgs_all) > 1:
10852 msg = "The following %d packages have " % \
10853 len(self._failed_pkgs_all) + \
10854 "failed to build or install:"
10856 msg = "The following package has " + \
10857 "failed to build or install:"
10858 prefix = bad(" * ")
10859 writemsg(prefix + "\n", noiselevel=-1)
10860 from textwrap import wrap
10861 for line in wrap(msg, 72):
10862 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10863 writemsg(prefix + "\n", noiselevel=-1)
10864 for failed_pkg in self._failed_pkgs_all:
10865 writemsg("%s\t%s\n" % (prefix,
10866 colorize("INFORM", str(failed_pkg.pkg))),
10868 writemsg(prefix + "\n", noiselevel=-1)
10872 def _elog_listener(self, mysettings, key, logentries, fulltext):
10873 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10875 self._failed_pkgs_die_msgs.append(
10876 (mysettings, key, errors))
10878 def _locate_failure_log(self, failed_pkg):
10880 build_dir = failed_pkg.build_dir
10883 log_paths = [failed_pkg.build_log]
10885 for log_path in log_paths:
10890 log_size = os.stat(log_path).st_size
10901 def _add_packages(self):
10902 pkg_queue = self._pkg_queue
10903 for pkg in self._mergelist:
10904 if isinstance(pkg, Package):
10905 pkg_queue.append(pkg)
10906 elif isinstance(pkg, Blocker):
10909 def _system_merge_started(self, merge):
10911 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10913 graph = self._digraph
10916 pkg = merge.merge.pkg
10918 # Skip this if $ROOT != / since it shouldn't matter if there
10919 # are unsatisfied system runtime deps in this case.
10920 if pkg.root != '/':
10923 completed_tasks = self._completed_tasks
10924 unsatisfied = self._unsatisfied_system_deps
10926 def ignore_non_runtime_or_satisfied(priority):
10928 Ignore non-runtime and satisfied runtime priorities.
10930 if isinstance(priority, DepPriority) and \
10931 not priority.satisfied and \
10932 (priority.runtime or priority.runtime_post):
10936 # When checking for unsatisfied runtime deps, only check
10937 # direct deps since indirect deps are checked when the
10938 # corresponding parent is merged.
10939 for child in graph.child_nodes(pkg,
10940 ignore_priority=ignore_non_runtime_or_satisfied):
10941 if not isinstance(child, Package) or \
10942 child.operation == 'uninstall':
10946 if child.operation == 'merge' and \
10947 child not in completed_tasks:
10948 unsatisfied.add(child)
10950 def _merge_wait_exit_handler(self, task):
10951 self._merge_wait_scheduled.remove(task)
10952 self._merge_exit(task)
10954 def _merge_exit(self, merge):
10955 self._do_merge_exit(merge)
10956 self._deallocate_config(merge.merge.settings)
10957 if merge.returncode == os.EX_OK and \
10958 not merge.merge.pkg.installed:
10959 self._status_display.curval += 1
10960 self._status_display.merges = len(self._task_queues.merge)
10963 def _do_merge_exit(self, merge):
10964 pkg = merge.merge.pkg
10965 if merge.returncode != os.EX_OK:
10966 settings = merge.merge.settings
10967 build_dir = settings.get("PORTAGE_BUILDDIR")
10968 build_log = settings.get("PORTAGE_LOG_FILE")
10970 self._failed_pkgs.append(self._failed_pkg(
10971 build_dir=build_dir, build_log=build_log,
10973 returncode=merge.returncode))
10974 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10976 self._status_display.failed = len(self._failed_pkgs)
10979 self._task_complete(pkg)
10980 pkg_to_replace = merge.merge.pkg_to_replace
10981 if pkg_to_replace is not None:
10982 # When a package is replaced, mark it's uninstall
10983 # task complete (if any).
10984 uninst_hash_key = \
10985 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10986 self._task_complete(uninst_hash_key)
10991 self._restart_if_necessary(pkg)
10993 # Call mtimedb.commit() after each merge so that
10994 # --resume still works after being interrupted
10995 # by reboot, sigkill or similar.
10996 mtimedb = self._mtimedb
10997 mtimedb["resume"]["mergelist"].remove(list(pkg))
10998 if not mtimedb["resume"]["mergelist"]:
10999 del mtimedb["resume"]
11002 def _build_exit(self, build):
11003 if build.returncode == os.EX_OK:
11005 merge = PackageMerge(merge=build)
11006 if not build.build_opts.buildpkgonly and \
11007 build.pkg in self._deep_system_deps:
11008 # Since dependencies on system packages are frequently
11009 # unspecified, merge them only when no builds are executing.
11010 self._merge_wait_queue.append(merge)
11011 merge.addStartListener(self._system_merge_started)
11013 merge.addExitListener(self._merge_exit)
11014 self._task_queues.merge.add(merge)
11015 self._status_display.merges = len(self._task_queues.merge)
11017 settings = build.settings
11018 build_dir = settings.get("PORTAGE_BUILDDIR")
11019 build_log = settings.get("PORTAGE_LOG_FILE")
11021 self._failed_pkgs.append(self._failed_pkg(
11022 build_dir=build_dir, build_log=build_log,
11024 returncode=build.returncode))
11025 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11027 self._status_display.failed = len(self._failed_pkgs)
11028 self._deallocate_config(build.settings)
11030 self._status_display.running = self._jobs
11033 def _extract_exit(self, build):
11034 self._build_exit(build)
11036 def _task_complete(self, pkg):
11037 self._completed_tasks.add(pkg)
11038 self._unsatisfied_system_deps.discard(pkg)
11039 self._choose_pkg_return_early = False
11043 self._add_prefetchers()
11044 self._add_packages()
11045 pkg_queue = self._pkg_queue
11046 failed_pkgs = self._failed_pkgs
11047 portage.locks._quiet = self._background
11048 portage.elog._emerge_elog_listener = self._elog_listener
11054 self._main_loop_cleanup()
11055 portage.locks._quiet = False
11056 portage.elog._emerge_elog_listener = None
11058 rval = failed_pkgs[-1].returncode
11062 def _main_loop_cleanup(self):
11063 del self._pkg_queue[:]
11064 self._completed_tasks.clear()
11065 self._deep_system_deps.clear()
11066 self._unsatisfied_system_deps.clear()
11067 self._choose_pkg_return_early = False
11068 self._status_display.reset()
11069 self._digraph = None
11070 self._task_queues.fetch.clear()
11072 def _choose_pkg(self):
11074 Choose a task that has all it's dependencies satisfied.
11077 if self._choose_pkg_return_early:
11080 if self._digraph is None:
11081 if (self._jobs or self._task_queues.merge) and \
11082 not ("--nodeps" in self.myopts and \
11083 (self._max_jobs is True or self._max_jobs > 1)):
11084 self._choose_pkg_return_early = True
11086 return self._pkg_queue.pop(0)
11088 if not (self._jobs or self._task_queues.merge):
11089 return self._pkg_queue.pop(0)
11091 self._prune_digraph()
11094 later = set(self._pkg_queue)
11095 for pkg in self._pkg_queue:
11097 if not self._dependent_on_scheduled_merges(pkg, later):
11101 if chosen_pkg is not None:
11102 self._pkg_queue.remove(chosen_pkg)
11104 if chosen_pkg is None:
11105 # There's no point in searching for a package to
11106 # choose until at least one of the existing jobs
11108 self._choose_pkg_return_early = True
11112 def _dependent_on_scheduled_merges(self, pkg, later):
11114 Traverse the subgraph of the given packages deep dependencies
11115 to see if it contains any scheduled merges.
11116 @param pkg: a package to check dependencies for
11118 @param later: packages for which dependence should be ignored
11119 since they will be merged later than pkg anyway and therefore
11120 delaying the merge of pkg will not result in a more optimal
11124 @returns: True if the package is dependent, False otherwise.
11127 graph = self._digraph
11128 completed_tasks = self._completed_tasks
11131 traversed_nodes = set([pkg])
11132 direct_deps = graph.child_nodes(pkg)
11133 node_stack = direct_deps
11134 direct_deps = frozenset(direct_deps)
11136 node = node_stack.pop()
11137 if node in traversed_nodes:
11139 traversed_nodes.add(node)
11140 if not ((node.installed and node.operation == "nomerge") or \
11141 (node.operation == "uninstall" and \
11142 node not in direct_deps) or \
11143 node in completed_tasks or \
11147 node_stack.extend(graph.child_nodes(node))
11151 def _allocate_config(self, root):
11153 Allocate a unique config instance for a task in order
11154 to prevent interference between parallel tasks.
11156 if self._config_pool[root]:
11157 temp_settings = self._config_pool[root].pop()
11159 temp_settings = portage.config(clone=self.pkgsettings[root])
11160 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11161 # performance reasons, call it here to make sure all settings from the
11162 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11163 temp_settings.reload()
11164 temp_settings.reset()
11165 return temp_settings
11167 def _deallocate_config(self, settings):
11168 self._config_pool[settings["ROOT"]].append(settings)
11170 def _main_loop(self):
11172 # Only allow 1 job max if a restart is scheduled
11173 # due to portage update.
11174 if self._is_restart_scheduled() or \
11175 self._opts_no_background.intersection(self.myopts):
11176 self._set_max_jobs(1)
11178 merge_queue = self._task_queues.merge
11180 while self._schedule():
11181 if self._poll_event_handlers:
11186 if not (self._jobs or merge_queue):
11188 if self._poll_event_handlers:
11191 def _keep_scheduling(self):
11192 return bool(self._pkg_queue and \
11193 not (self._failed_pkgs and not self._build_opts.fetchonly))
11195 def _schedule_tasks(self):
11197 # When the number of jobs drops to zero, process all waiting merges.
11198 if not self._jobs and self._merge_wait_queue:
11199 for task in self._merge_wait_queue:
11200 task.addExitListener(self._merge_wait_exit_handler)
11201 self._task_queues.merge.add(task)
11202 self._status_display.merges = len(self._task_queues.merge)
11203 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11204 del self._merge_wait_queue[:]
11206 self._schedule_tasks_imp()
11207 self._status_display.display()
11210 for q in self._task_queues.values():
11214 # Cancel prefetchers if they're the only reason
11215 # the main poll loop is still running.
11216 if self._failed_pkgs and not self._build_opts.fetchonly and \
11217 not (self._jobs or self._task_queues.merge) and \
11218 self._task_queues.fetch:
11219 self._task_queues.fetch.clear()
11223 self._schedule_tasks_imp()
11224 self._status_display.display()
11226 return self._keep_scheduling()
11228 def _job_delay(self):
11231 @returns: True if job scheduling should be delayed, False otherwise.
11234 if self._jobs and self._max_load is not None:
11236 current_time = time.time()
11238 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11239 if delay > self._job_delay_max:
11240 delay = self._job_delay_max
11241 if (current_time - self._previous_job_start_time) < delay:
11246 def _schedule_tasks_imp(self):
11249 @returns: True if state changed, False otherwise.
11256 if not self._keep_scheduling():
11257 return bool(state_change)
11259 if self._choose_pkg_return_early or \
11260 self._merge_wait_scheduled or \
11261 (self._jobs and self._unsatisfied_system_deps) or \
11262 not self._can_add_job() or \
11264 return bool(state_change)
11266 pkg = self._choose_pkg()
11268 return bool(state_change)
11272 if not pkg.installed:
11273 self._pkg_count.curval += 1
11275 task = self._task(pkg)
11278 merge = PackageMerge(merge=task)
11279 merge.addExitListener(self._merge_exit)
11280 self._task_queues.merge.add(merge)
11284 self._previous_job_start_time = time.time()
11285 self._status_display.running = self._jobs
11286 task.addExitListener(self._extract_exit)
11287 self._task_queues.jobs.add(task)
11291 self._previous_job_start_time = time.time()
11292 self._status_display.running = self._jobs
11293 task.addExitListener(self._build_exit)
11294 self._task_queues.jobs.add(task)
11296 return bool(state_change)
11298 def _task(self, pkg):
11300 pkg_to_replace = None
11301 if pkg.operation != "uninstall":
11302 vardb = pkg.root_config.trees["vartree"].dbapi
11303 previous_cpv = vardb.match(pkg.slot_atom)
11305 previous_cpv = previous_cpv.pop()
11306 pkg_to_replace = self._pkg(previous_cpv,
11307 "installed", pkg.root_config, installed=True)
11309 task = MergeListItem(args_set=self._args_set,
11310 background=self._background, binpkg_opts=self._binpkg_opts,
11311 build_opts=self._build_opts,
11312 config_pool=self._ConfigPool(pkg.root,
11313 self._allocate_config, self._deallocate_config),
11314 emerge_opts=self.myopts,
11315 find_blockers=self._find_blockers(pkg), logger=self._logger,
11316 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11317 pkg_to_replace=pkg_to_replace,
11318 prefetcher=self._prefetchers.get(pkg),
11319 scheduler=self._sched_iface,
11320 settings=self._allocate_config(pkg.root),
11321 statusMessage=self._status_msg,
11322 world_atom=self._world_atom)
11326 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11327 pkg = failed_pkg.pkg
11328 msg = "%s to %s %s" % \
11329 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11330 if pkg.root != "/":
11331 msg += " %s %s" % (preposition, pkg.root)
11333 log_path = self._locate_failure_log(failed_pkg)
11334 if log_path is not None:
11335 msg += ", Log file:"
11336 self._status_msg(msg)
11338 if log_path is not None:
11339 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11341 def _status_msg(self, msg):
11343 Display a brief status message (no newlines) in the status display.
11344 This is called by tasks to provide feedback to the user. This
11345 delegates the resposibility of generating \r and \n control characters,
11346 to guarantee that lines are created or erased when necessary and
11350 @param msg: a brief status message (no newlines allowed)
11352 if not self._background:
11353 writemsg_level("\n")
11354 self._status_display.displayMessage(msg)
11356 def _save_resume_list(self):
11358 Do this before verifying the ebuild Manifests since it might
11359 be possible for the user to use --resume --skipfirst get past
11360 a non-essential package with a broken digest.
11362 mtimedb = self._mtimedb
11363 mtimedb["resume"]["mergelist"] = [list(x) \
11364 for x in self._mergelist \
11365 if isinstance(x, Package) and x.operation == "merge"]
11369 def _calc_resume_list(self):
11371 Use the current resume list to calculate a new one,
11372 dropping any packages with unsatisfied deps.
11374 @returns: True if successful, False otherwise.
11376 print colorize("GOOD", "*** Resuming merge...")
11378 if self._show_list():
11379 if "--tree" in self.myopts:
11380 portage.writemsg_stdout("\n" + \
11381 darkgreen("These are the packages that " + \
11382 "would be merged, in reverse order:\n\n"))
11385 portage.writemsg_stdout("\n" + \
11386 darkgreen("These are the packages that " + \
11387 "would be merged, in order:\n\n"))
11389 show_spinner = "--quiet" not in self.myopts and \
11390 "--nodeps" not in self.myopts
11393 print "Calculating dependencies ",
11395 myparams = create_depgraph_params(self.myopts, None)
11399 success, mydepgraph, dropped_tasks = resume_depgraph(
11400 self.settings, self.trees, self._mtimedb, self.myopts,
11401 myparams, self._spinner)
11402 except depgraph.UnsatisfiedResumeDep, exc:
11403 # rename variable to avoid python-3.0 error:
11404 # SyntaxError: can not delete variable 'e' referenced in nested
11407 mydepgraph = e.depgraph
11408 dropped_tasks = set()
11411 print "\b\b... done!"
11414 def unsatisfied_resume_dep_msg():
11415 mydepgraph.display_problems()
11416 out = portage.output.EOutput()
11417 out.eerror("One or more packages are either masked or " + \
11418 "have missing dependencies:")
11421 show_parents = set()
11422 for dep in e.value:
11423 if dep.parent in show_parents:
11425 show_parents.add(dep.parent)
11426 if dep.atom is None:
11427 out.eerror(indent + "Masked package:")
11428 out.eerror(2 * indent + str(dep.parent))
11431 out.eerror(indent + str(dep.atom) + " pulled in by:")
11432 out.eerror(2 * indent + str(dep.parent))
11434 msg = "The resume list contains packages " + \
11435 "that are either masked or have " + \
11436 "unsatisfied dependencies. " + \
11437 "Please restart/continue " + \
11438 "the operation manually, or use --skipfirst " + \
11439 "to skip the first package in the list and " + \
11440 "any other packages that may be " + \
11441 "masked or have missing dependencies."
11442 for line in textwrap.wrap(msg, 72):
11444 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11447 if success and self._show_list():
11448 mylist = mydepgraph.altlist()
11450 if "--tree" in self.myopts:
11452 mydepgraph.display(mylist, favorites=self._favorites)
11455 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11457 mydepgraph.display_problems()
11459 mylist = mydepgraph.altlist()
11460 mydepgraph.break_refs(mylist)
11461 mydepgraph.break_refs(dropped_tasks)
11462 self._mergelist = mylist
11463 self._set_digraph(mydepgraph.schedulerGraph())
11466 for task in dropped_tasks:
11467 if not (isinstance(task, Package) and task.operation == "merge"):
11470 msg = "emerge --keep-going:" + \
11472 if pkg.root != "/":
11473 msg += " for %s" % (pkg.root,)
11474 msg += " dropped due to unsatisfied dependency."
11475 for line in textwrap.wrap(msg, msg_width):
11476 eerror(line, phase="other", key=pkg.cpv)
11477 settings = self.pkgsettings[pkg.root]
11478 # Ensure that log collection from $T is disabled inside
11479 # elog_process(), since any logs that might exist are
11481 settings.pop("T", None)
11482 portage.elog.elog_process(pkg.cpv, settings)
11483 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11487 def _show_list(self):
11488 myopts = self.myopts
11489 if "--quiet" not in myopts and \
11490 ("--ask" in myopts or "--tree" in myopts or \
11491 "--verbose" in myopts):
11495 def _world_atom(self, pkg):
11497 Add the package to the world file, but only if
11498 it's supposed to be added. Otherwise, do nothing.
11501 if set(("--buildpkgonly", "--fetchonly",
11503 "--oneshot", "--onlydeps",
11504 "--pretend")).intersection(self.myopts):
11507 if pkg.root != self.target_root:
11510 args_set = self._args_set
11511 if not args_set.findAtomForPackage(pkg):
11514 logger = self._logger
11515 pkg_count = self._pkg_count
11516 root_config = pkg.root_config
11517 world_set = root_config.sets["world"]
11518 world_locked = False
11519 if hasattr(world_set, "lock"):
11521 world_locked = True
11524 if hasattr(world_set, "load"):
11525 world_set.load() # maybe it's changed on disk
11527 atom = create_world_atom(pkg, args_set, root_config)
11529 if hasattr(world_set, "add"):
11530 self._status_msg(('Recording %s in "world" ' + \
11531 'favorites file...') % atom)
11532 logger.log(" === (%s of %s) Updating world file (%s)" % \
11533 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11534 world_set.add(atom)
11536 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11537 (atom,), level=logging.WARN, noiselevel=-1)
11542 def _pkg(self, cpv, type_name, root_config, installed=False):
11544 Get a package instance from the cache, or create a new
11545 one if necessary. Raises KeyError from aux_get if it
11546 failures for some reason (package does not exist or is
11549 operation = "merge"
11551 operation = "nomerge"
11553 if self._digraph is not None:
11554 # Reuse existing instance when available.
11555 pkg = self._digraph.get(
11556 (type_name, root_config.root, cpv, operation))
11557 if pkg is not None:
11560 tree_type = depgraph.pkg_tree_map[type_name]
11561 db = root_config.trees[tree_type].dbapi
11562 db_keys = list(self.trees[root_config.root][
11563 tree_type].dbapi._aux_cache_keys)
11564 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11565 pkg = Package(cpv=cpv, metadata=metadata,
11566 root_config=root_config, installed=installed)
11567 if type_name == "ebuild":
11568 settings = self.pkgsettings[root_config.root]
11569 settings.setcpv(pkg)
11570 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11571 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11575 class MetadataRegen(PollScheduler):
11577 def __init__(self, portdb, cp_iter=None, consumer=None,
11578 max_jobs=None, max_load=None):
11579 PollScheduler.__init__(self)
11580 self._portdb = portdb
11581 self._global_cleanse = False
11582 if cp_iter is None:
11583 cp_iter = self._iter_every_cp()
11584 # We can globally cleanse stale cache only if we
11585 # iterate over every single cp.
11586 self._global_cleanse = True
11587 self._cp_iter = cp_iter
11588 self._consumer = consumer
11590 if max_jobs is None:
11593 self._max_jobs = max_jobs
11594 self._max_load = max_load
11595 self._sched_iface = self._sched_iface_class(
11596 register=self._register,
11597 schedule=self._schedule_wait,
11598 unregister=self._unregister)
11600 self._valid_pkgs = set()
11601 self._cp_set = set()
11602 self._process_iter = self._iter_metadata_processes()
11603 self.returncode = os.EX_OK
11604 self._error_count = 0
11606 def _iter_every_cp(self):
11607 every_cp = self._portdb.cp_all()
11608 every_cp.sort(reverse=True)
11611 yield every_cp.pop()
11615 def _iter_metadata_processes(self):
11616 portdb = self._portdb
11617 valid_pkgs = self._valid_pkgs
11618 cp_set = self._cp_set
11619 consumer = self._consumer
11621 for cp in self._cp_iter:
11623 portage.writemsg_stdout("Processing %s\n" % cp)
11624 cpv_list = portdb.cp_list(cp)
11625 for cpv in cpv_list:
11626 valid_pkgs.add(cpv)
11627 ebuild_path, repo_path = portdb.findname2(cpv)
11628 metadata, st, emtime = portdb._pull_valid_cache(
11629 cpv, ebuild_path, repo_path)
11630 if metadata is not None:
11631 if consumer is not None:
11632 consumer(cpv, ebuild_path,
11633 repo_path, metadata)
11636 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11637 ebuild_mtime=emtime,
11638 metadata_callback=portdb._metadata_callback,
11639 portdb=portdb, repo_path=repo_path,
11640 settings=portdb.doebuild_settings)
11644 portdb = self._portdb
11645 from portage.cache.cache_errors import CacheError
11648 while self._schedule():
11654 if self._global_cleanse:
11655 for mytree in portdb.porttrees:
11657 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11658 except CacheError, e:
11659 portage.writemsg("Error listing cache entries for " + \
11660 "'%s': %s, continuing...\n" % (mytree, e),
11666 cp_set = self._cp_set
11667 cpv_getkey = portage.cpv_getkey
11668 for mytree in portdb.porttrees:
11670 dead_nodes[mytree] = set(cpv for cpv in \
11671 portdb.auxdb[mytree].iterkeys() \
11672 if cpv_getkey(cpv) in cp_set)
11673 except CacheError, e:
11674 portage.writemsg("Error listing cache entries for " + \
11675 "'%s': %s, continuing...\n" % (mytree, e),
11682 for y in self._valid_pkgs:
11683 for mytree in portdb.porttrees:
11684 if portdb.findname2(y, mytree=mytree)[0]:
11685 dead_nodes[mytree].discard(y)
11687 for mytree, nodes in dead_nodes.iteritems():
11688 auxdb = portdb.auxdb[mytree]
11692 except (KeyError, CacheError):
11695 def _schedule_tasks(self):
11698 @returns: True if there may be remaining tasks to schedule,
11701 while self._can_add_job():
11703 metadata_process = self._process_iter.next()
11704 except StopIteration:
11708 metadata_process.scheduler = self._sched_iface
11709 metadata_process.addExitListener(self._metadata_exit)
11710 metadata_process.start()
11713 def _metadata_exit(self, metadata_process):
11715 if metadata_process.returncode != os.EX_OK:
11716 self.returncode = 1
11717 self._error_count += 1
11718 self._valid_pkgs.discard(metadata_process.cpv)
11719 portage.writemsg("Error processing %s, continuing...\n" % \
11720 (metadata_process.cpv,), noiselevel=-1)
11722 if self._consumer is not None:
11723 # On failure, still notify the consumer (in this case the metadata
11724 # argument is None).
11725 self._consumer(metadata_process.cpv,
11726 metadata_process.ebuild_path,
11727 metadata_process.repo_path,
11728 metadata_process.metadata)
11732 class UninstallFailure(portage.exception.PortageException):
11734 An instance of this class is raised by unmerge() when
11735 an uninstallation fails.
11738 def __init__(self, *pargs):
11739 portage.exception.PortageException.__init__(self, pargs)
11741 self.status = pargs[0]
11743 def unmerge(root_config, myopts, unmerge_action,
11744 unmerge_files, ldpath_mtimes, autoclean=0,
11745 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11746 scheduler=None, writemsg_level=portage.util.writemsg_level):
11748 quiet = "--quiet" in myopts
11749 settings = root_config.settings
11750 sets = root_config.sets
11751 vartree = root_config.trees["vartree"]
11752 candidate_catpkgs=[]
11754 xterm_titles = "notitles" not in settings.features
11755 out = portage.output.EOutput()
11757 db_keys = list(vartree.dbapi._aux_cache_keys)
11760 pkg = pkg_cache.get(cpv)
11762 pkg = Package(cpv=cpv, installed=True,
11763 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11764 root_config=root_config,
11765 type_name="installed")
11766 pkg_cache[cpv] = pkg
11769 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11771 # At least the parent needs to exist for the lock file.
11772 portage.util.ensure_dirs(vdb_path)
11773 except portage.exception.PortageException:
11777 if os.access(vdb_path, os.W_OK):
11778 vdb_lock = portage.locks.lockdir(vdb_path)
11779 realsyslist = sets["system"].getAtoms()
11781 for x in realsyslist:
11782 mycp = portage.dep_getkey(x)
11783 if mycp in settings.getvirtuals():
11785 for provider in settings.getvirtuals()[mycp]:
11786 if vartree.dbapi.match(provider):
11787 providers.append(provider)
11788 if len(providers) == 1:
11789 syslist.extend(providers)
11791 syslist.append(mycp)
11793 mysettings = portage.config(clone=settings)
11795 if not unmerge_files:
11796 if unmerge_action == "unmerge":
11798 print bold("emerge unmerge") + " can only be used with specific package names"
11804 localtree = vartree
11805 # process all arguments and add all
11806 # valid db entries to candidate_catpkgs
11808 if not unmerge_files:
11809 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11811 #we've got command-line arguments
11812 if not unmerge_files:
11813 print "\nNo packages to unmerge have been provided.\n"
11815 for x in unmerge_files:
11816 arg_parts = x.split('/')
11817 if x[0] not in [".","/"] and \
11818 arg_parts[-1][-7:] != ".ebuild":
11819 #possible cat/pkg or dep; treat as such
11820 candidate_catpkgs.append(x)
11821 elif unmerge_action in ["prune","clean"]:
11822 print "\n!!! Prune and clean do not accept individual" + \
11823 " ebuilds as arguments;\n skipping.\n"
11826 # it appears that the user is specifying an installed
11827 # ebuild and we're in "unmerge" mode, so it's ok.
11828 if not os.path.exists(x):
11829 print "\n!!! The path '"+x+"' doesn't exist.\n"
11832 absx = os.path.abspath(x)
11833 sp_absx = absx.split("/")
11834 if sp_absx[-1][-7:] == ".ebuild":
11836 absx = "/".join(sp_absx)
11838 sp_absx_len = len(sp_absx)
11840 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11841 vdb_len = len(vdb_path)
11843 sp_vdb = vdb_path.split("/")
11844 sp_vdb_len = len(sp_vdb)
11846 if not os.path.exists(absx+"/CONTENTS"):
11847 print "!!! Not a valid db dir: "+str(absx)
11850 if sp_absx_len <= sp_vdb_len:
11851 # The Path is shorter... so it can't be inside the vdb.
11854 print "\n!!!",x,"cannot be inside "+ \
11855 vdb_path+"; aborting.\n"
11858 for idx in range(0,sp_vdb_len):
11859 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11862 print "\n!!!", x, "is not inside "+\
11863 vdb_path+"; aborting.\n"
11866 print "="+"/".join(sp_absx[sp_vdb_len:])
11867 candidate_catpkgs.append(
11868 "="+"/".join(sp_absx[sp_vdb_len:]))
11871 if (not "--quiet" in myopts):
11873 if settings["ROOT"] != "/":
11874 writemsg_level(darkgreen(newline+ \
11875 ">>> Using system located in ROOT tree %s\n" % \
11878 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11879 not ("--quiet" in myopts):
11880 writemsg_level(darkgreen(newline+\
11881 ">>> These are the packages that would be unmerged:\n"))
11883 # Preservation of order is required for --depclean and --prune so
11884 # that dependencies are respected. Use all_selected to eliminate
11885 # duplicate packages since the same package may be selected by
11888 all_selected = set()
11889 for x in candidate_catpkgs:
11890 # cycle through all our candidate deps and determine
11891 # what will and will not get unmerged
11893 mymatch = vartree.dbapi.match(x)
11894 except portage.exception.AmbiguousPackageName, errpkgs:
11895 print "\n\n!!! The short ebuild name \"" + \
11896 x + "\" is ambiguous. Please specify"
11897 print "!!! one of the following fully-qualified " + \
11898 "ebuild names instead:\n"
11899 for i in errpkgs[0]:
11900 print " " + green(i)
11904 if not mymatch and x[0] not in "<>=~":
11905 mymatch = localtree.dep_match(x)
11907 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11908 (x, unmerge_action), noiselevel=-1)
11912 {"protected": set(), "selected": set(), "omitted": set()})
11913 mykey = len(pkgmap) - 1
11914 if unmerge_action=="unmerge":
11916 if y not in all_selected:
11917 pkgmap[mykey]["selected"].add(y)
11918 all_selected.add(y)
11919 elif unmerge_action == "prune":
11920 if len(mymatch) == 1:
11922 best_version = mymatch[0]
11923 best_slot = vartree.getslot(best_version)
11924 best_counter = vartree.dbapi.cpv_counter(best_version)
11925 for mypkg in mymatch[1:]:
11926 myslot = vartree.getslot(mypkg)
11927 mycounter = vartree.dbapi.cpv_counter(mypkg)
11928 if (myslot == best_slot and mycounter > best_counter) or \
11929 mypkg == portage.best([mypkg, best_version]):
11930 if myslot == best_slot:
11931 if mycounter < best_counter:
11932 # On slot collision, keep the one with the
11933 # highest counter since it is the most
11934 # recently installed.
11936 best_version = mypkg
11938 best_counter = mycounter
11939 pkgmap[mykey]["protected"].add(best_version)
11940 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11941 if mypkg != best_version and mypkg not in all_selected)
11942 all_selected.update(pkgmap[mykey]["selected"])
11944 # unmerge_action == "clean"
11946 for mypkg in mymatch:
11947 if unmerge_action == "clean":
11948 myslot = localtree.getslot(mypkg)
11950 # since we're pruning, we don't care about slots
11951 # and put all the pkgs in together
11953 if myslot not in slotmap:
11954 slotmap[myslot] = {}
11955 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11957 for mypkg in vartree.dbapi.cp_list(
11958 portage.dep_getkey(mymatch[0])):
11959 myslot = vartree.getslot(mypkg)
11960 if myslot not in slotmap:
11961 slotmap[myslot] = {}
11962 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11964 for myslot in slotmap:
11965 counterkeys = slotmap[myslot].keys()
11966 if not counterkeys:
11969 pkgmap[mykey]["protected"].add(
11970 slotmap[myslot][counterkeys[-1]])
11971 del counterkeys[-1]
11973 for counter in counterkeys[:]:
11974 mypkg = slotmap[myslot][counter]
11975 if mypkg not in mymatch:
11976 counterkeys.remove(counter)
11977 pkgmap[mykey]["protected"].add(
11978 slotmap[myslot][counter])
11980 #be pretty and get them in order of merge:
11981 for ckey in counterkeys:
11982 mypkg = slotmap[myslot][ckey]
11983 if mypkg not in all_selected:
11984 pkgmap[mykey]["selected"].add(mypkg)
11985 all_selected.add(mypkg)
11986 # ok, now the last-merged package
11987 # is protected, and the rest are selected
11988 numselected = len(all_selected)
11989 if global_unmerge and not numselected:
11990 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11993 if not numselected:
11994 portage.writemsg_stdout(
11995 "\n>>> No packages selected for removal by " + \
11996 unmerge_action + "\n")
12000 vartree.dbapi.flush_cache()
12001 portage.locks.unlockdir(vdb_lock)
12003 from portage.sets.base import EditablePackageSet
12005 # generate a list of package sets that are directly or indirectly listed in "world",
12006 # as there is no persistent list of "installed" sets
12007 installed_sets = ["world"]
12012 pos = len(installed_sets)
12013 for s in installed_sets[pos - 1:]:
12016 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12019 installed_sets += candidates
12020 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12023 # we don't want to unmerge packages that are still listed in user-editable package sets
12024 # listed in "world" as they would be remerged on the next update of "world" or the
12025 # relevant package sets.
12026 unknown_sets = set()
12027 for cp in xrange(len(pkgmap)):
12028 for cpv in pkgmap[cp]["selected"].copy():
12032 # It could have been uninstalled
12033 # by a concurrent process.
12036 if unmerge_action != "clean" and \
12037 root_config.root == "/" and \
12038 portage.match_from_list(
12039 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12040 msg = ("Not unmerging package %s since there is no valid " + \
12041 "reason for portage to unmerge itself.") % (pkg.cpv,)
12042 for line in textwrap.wrap(msg, 75):
12044 # adjust pkgmap so the display output is correct
12045 pkgmap[cp]["selected"].remove(cpv)
12046 all_selected.remove(cpv)
12047 pkgmap[cp]["protected"].add(cpv)
12051 for s in installed_sets:
12052 # skip sets that the user requested to unmerge, and skip world
12053 # unless we're unmerging a package set (as the package would be
12054 # removed from "world" later on)
12055 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12059 if s in unknown_sets:
12061 unknown_sets.add(s)
12062 out = portage.output.EOutput()
12063 out.eerror(("Unknown set '@%s' in " + \
12064 "%svar/lib/portage/world_sets") % \
12065 (s, root_config.root))
12068 # only check instances of EditablePackageSet as other classes are generally used for
12069 # special purposes and can be ignored here (and are usually generated dynamically, so the
12070 # user can't do much about them anyway)
12071 if isinstance(sets[s], EditablePackageSet):
12073 # This is derived from a snippet of code in the
12074 # depgraph._iter_atoms_for_pkg() method.
12075 for atom in sets[s].iterAtomsForPackage(pkg):
12076 inst_matches = vartree.dbapi.match(atom)
12077 inst_matches.reverse() # descending order
12079 for inst_cpv in inst_matches:
12081 inst_pkg = _pkg(inst_cpv)
12083 # It could have been uninstalled
12084 # by a concurrent process.
12087 if inst_pkg.cp != atom.cp:
12089 if pkg >= inst_pkg:
12090 # This is descending order, and we're not
12091 # interested in any versions <= pkg given.
12093 if pkg.slot_atom != inst_pkg.slot_atom:
12094 higher_slot = inst_pkg
12096 if higher_slot is None:
12100 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12101 #print colorize("WARN", "but still listed in the following package sets:")
12102 #print " %s\n" % ", ".join(parents)
12103 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12104 print colorize("WARN", "still referenced by the following package sets:")
12105 print " %s\n" % ", ".join(parents)
12106 # adjust pkgmap so the display output is correct
12107 pkgmap[cp]["selected"].remove(cpv)
12108 all_selected.remove(cpv)
12109 pkgmap[cp]["protected"].add(cpv)
12113 numselected = len(all_selected)
12114 if not numselected:
12116 "\n>>> No packages selected for removal by " + \
12117 unmerge_action + "\n")
12120 # Unmerge order only matters in some cases
12124 selected = d["selected"]
12127 cp = portage.cpv_getkey(iter(selected).next())
12128 cp_dict = unordered.get(cp)
12129 if cp_dict is None:
12131 unordered[cp] = cp_dict
12134 for k, v in d.iteritems():
12135 cp_dict[k].update(v)
12136 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12138 for x in xrange(len(pkgmap)):
12139 selected = pkgmap[x]["selected"]
12142 for mytype, mylist in pkgmap[x].iteritems():
12143 if mytype == "selected":
12145 mylist.difference_update(all_selected)
12146 cp = portage.cpv_getkey(iter(selected).next())
12147 for y in localtree.dep_match(cp):
12148 if y not in pkgmap[x]["omitted"] and \
12149 y not in pkgmap[x]["selected"] and \
12150 y not in pkgmap[x]["protected"] and \
12151 y not in all_selected:
12152 pkgmap[x]["omitted"].add(y)
12153 if global_unmerge and not pkgmap[x]["selected"]:
12154 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12156 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12157 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12158 "'%s' is part of your system profile.\n" % cp),
12159 level=logging.WARNING, noiselevel=-1)
12160 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12161 "be damaging to your system.\n\n"),
12162 level=logging.WARNING, noiselevel=-1)
12163 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12164 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12165 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12167 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12169 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12170 for mytype in ["selected","protected","omitted"]:
12172 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12173 if pkgmap[x][mytype]:
12174 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12175 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12176 for pn, ver, rev in sorted_pkgs:
12180 myversion = ver + "-" + rev
12181 if mytype == "selected":
12183 colorize("UNMERGE_WARN", myversion + " "),
12187 colorize("GOOD", myversion + " "), noiselevel=-1)
12189 writemsg_level("none ", noiselevel=-1)
12191 writemsg_level("\n", noiselevel=-1)
12193 writemsg_level("\n", noiselevel=-1)
12195 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12196 " packages are slated for removal.\n")
12197 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12198 " and " + colorize("GOOD", "'omitted'") + \
12199 " packages will not be removed.\n\n")
12201 if "--pretend" in myopts:
12202 #we're done... return
12204 if "--ask" in myopts:
12205 if userquery("Would you like to unmerge these packages?")=="No":
12206 # enter pretend mode for correct formatting of results
12207 myopts["--pretend"] = True
12212 #the real unmerging begins, after a short delay....
12213 if clean_delay and not autoclean:
12214 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12216 for x in xrange(len(pkgmap)):
12217 for y in pkgmap[x]["selected"]:
12218 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12219 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12220 mysplit = y.split("/")
12222 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12223 mysettings, unmerge_action not in ["clean","prune"],
12224 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12225 scheduler=scheduler)
12227 if retval != os.EX_OK:
12228 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12230 raise UninstallFailure(retval)
12233 if clean_world and hasattr(sets["world"], "cleanPackage"):
12234 sets["world"].cleanPackage(vartree.dbapi, y)
12235 emergelog(xterm_titles, " >>> unmerge success: "+y)
12236 if clean_world and hasattr(sets["world"], "remove"):
12237 for s in root_config.setconfig.active:
12238 sets["world"].remove(SETPREFIX+s)
12241 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12243 if os.path.exists("/usr/bin/install-info"):
12244 out = portage.output.EOutput()
12249 inforoot=normpath(root+z)
12250 if os.path.isdir(inforoot):
12251 infomtime = long(os.stat(inforoot).st_mtime)
12252 if inforoot not in prev_mtimes or \
12253 prev_mtimes[inforoot] != infomtime:
12254 regen_infodirs.append(inforoot)
12256 if not regen_infodirs:
12257 portage.writemsg_stdout("\n")
12258 out.einfo("GNU info directory index is up-to-date.")
12260 portage.writemsg_stdout("\n")
12261 out.einfo("Regenerating GNU info directory index...")
12263 dir_extensions = ("", ".gz", ".bz2")
12267 for inforoot in regen_infodirs:
12271 if not os.path.isdir(inforoot) or \
12272 not os.access(inforoot, os.W_OK):
12275 file_list = os.listdir(inforoot)
12277 dir_file = os.path.join(inforoot, "dir")
12278 moved_old_dir = False
12279 processed_count = 0
12280 for x in file_list:
12281 if x.startswith(".") or \
12282 os.path.isdir(os.path.join(inforoot, x)):
12284 if x.startswith("dir"):
12286 for ext in dir_extensions:
12287 if x == "dir" + ext or \
12288 x == "dir" + ext + ".old":
12293 if processed_count == 0:
12294 for ext in dir_extensions:
12296 os.rename(dir_file + ext, dir_file + ext + ".old")
12297 moved_old_dir = True
12298 except EnvironmentError, e:
12299 if e.errno != errno.ENOENT:
12302 processed_count += 1
12303 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12304 existsstr="already exists, for file `"
12306 if re.search(existsstr,myso):
12307 # Already exists... Don't increment the count for this.
12309 elif myso[:44]=="install-info: warning: no info dir entry in ":
12310 # This info file doesn't contain a DIR-header: install-info produces this
12311 # (harmless) warning (the --quiet switch doesn't seem to work).
12312 # Don't increment the count for this.
12315 badcount=badcount+1
12316 errmsg += myso + "\n"
12319 if moved_old_dir and not os.path.exists(dir_file):
12320 # We didn't generate a new dir file, so put the old file
12321 # back where it was originally found.
12322 for ext in dir_extensions:
12324 os.rename(dir_file + ext + ".old", dir_file + ext)
12325 except EnvironmentError, e:
12326 if e.errno != errno.ENOENT:
12330 # Clean dir.old cruft so that they don't prevent
12331 # unmerge of otherwise empty directories.
12332 for ext in dir_extensions:
12334 os.unlink(dir_file + ext + ".old")
12335 except EnvironmentError, e:
12336 if e.errno != errno.ENOENT:
12340 #update mtime so we can potentially avoid regenerating.
12341 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12344 out.eerror("Processed %d info files; %d errors." % \
12345 (icount, badcount))
12346 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12349 out.einfo("Processed %d info files." % (icount,))
12352 def display_news_notification(root_config, myopts):
12353 target_root = root_config.root
12354 trees = root_config.trees
12355 settings = trees["vartree"].settings
12356 portdb = trees["porttree"].dbapi
12357 vardb = trees["vartree"].dbapi
12358 NEWS_PATH = os.path.join("metadata", "news")
12359 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12360 newsReaderDisplay = False
12361 update = "--pretend" not in myopts
12363 for repo in portdb.getRepositories():
12364 unreadItems = checkUpdatedNewsItems(
12365 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12367 if not newsReaderDisplay:
12368 newsReaderDisplay = True
12370 print colorize("WARN", " * IMPORTANT:"),
12371 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12374 if newsReaderDisplay:
12375 print colorize("WARN", " *"),
12376 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12379 def display_preserved_libs(vardbapi):
12382 # Ensure the registry is consistent with existing files.
12383 vardbapi.plib_registry.pruneNonExisting()
12385 if vardbapi.plib_registry.hasEntries():
12387 print colorize("WARN", "!!!") + " existing preserved libs:"
12388 plibdata = vardbapi.plib_registry.getPreservedLibs()
12389 linkmap = vardbapi.linkmap
12392 linkmap_broken = False
12396 except portage.exception.CommandNotFound, e:
12397 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12398 level=logging.ERROR, noiselevel=-1)
12400 linkmap_broken = True
12402 search_for_owners = set()
12403 for cpv in plibdata:
12404 internal_plib_keys = set(linkmap._obj_key(f) \
12405 for f in plibdata[cpv])
12406 for f in plibdata[cpv]:
12407 if f in consumer_map:
12410 for c in linkmap.findConsumers(f):
12411 # Filter out any consumers that are also preserved libs
12412 # belonging to the same package as the provider.
12413 if linkmap._obj_key(c) not in internal_plib_keys:
12414 consumers.append(c)
12416 consumer_map[f] = consumers
12417 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12419 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12421 for cpv in plibdata:
12422 print colorize("WARN", ">>>") + " package: %s" % cpv
12424 for f in plibdata[cpv]:
12425 obj_key = linkmap._obj_key(f)
12426 alt_paths = samefile_map.get(obj_key)
12427 if alt_paths is None:
12429 samefile_map[obj_key] = alt_paths
12432 for alt_paths in samefile_map.itervalues():
12433 alt_paths = sorted(alt_paths)
12434 for p in alt_paths:
12435 print colorize("WARN", " * ") + " - %s" % (p,)
12437 consumers = consumer_map.get(f, [])
12438 for c in consumers[:MAX_DISPLAY]:
12439 print colorize("WARN", " * ") + " used by %s (%s)" % \
12440 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12441 if len(consumers) == MAX_DISPLAY + 1:
12442 print colorize("WARN", " * ") + " used by %s (%s)" % \
12443 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12444 for x in owners.get(consumers[MAX_DISPLAY], [])))
12445 elif len(consumers) > MAX_DISPLAY:
12446 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12447 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12450 def _flush_elog_mod_echo():
12452 Dump the mod_echo output now so that our other
12453 notifications are shown last.
12455 @returns: True if messages were shown, False otherwise.
12457 messages_shown = False
12459 from portage.elog import mod_echo
12460 except ImportError:
12461 pass # happens during downgrade to a version without the module
12463 messages_shown = bool(mod_echo._items)
12464 mod_echo.finalize()
12465 return messages_shown
12467 def post_emerge(root_config, myopts, mtimedb, retval):
12469 Misc. things to run at the end of a merge session.
12472 Update Config Files
12475 Display preserved libs warnings
12478 @param trees: A dictionary mapping each ROOT to it's package databases
12480 @param mtimedb: The mtimeDB to store data needed across merge invocations
12481 @type mtimedb: MtimeDB class instance
12482 @param retval: Emerge's return value
12486 1. Calls sys.exit(retval)
12489 target_root = root_config.root
12490 trees = { target_root : root_config.trees }
12491 vardbapi = trees[target_root]["vartree"].dbapi
12492 settings = vardbapi.settings
12493 info_mtimes = mtimedb["info"]
12495 # Load the most current variables from ${ROOT}/etc/profile.env
12498 settings.regenerate()
12501 config_protect = settings.get("CONFIG_PROTECT","").split()
12502 infodirs = settings.get("INFOPATH","").split(":") + \
12503 settings.get("INFODIR","").split(":")
12507 if retval == os.EX_OK:
12508 exit_msg = " *** exiting successfully."
12510 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12511 emergelog("notitles" not in settings.features, exit_msg)
12513 _flush_elog_mod_echo()
12515 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12516 if "--pretend" in myopts or (counter_hash is not None and \
12517 counter_hash == vardbapi._counter_hash()):
12518 display_news_notification(root_config, myopts)
12519 # If vdb state has not changed then there's nothing else to do.
12522 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12523 portage.util.ensure_dirs(vdb_path)
12525 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12526 vdb_lock = portage.locks.lockdir(vdb_path)
12530 if "noinfo" not in settings.features:
12531 chk_updated_info_files(target_root,
12532 infodirs, info_mtimes, retval)
12536 portage.locks.unlockdir(vdb_lock)
12538 chk_updated_cfg_files(target_root, config_protect)
12540 display_news_notification(root_config, myopts)
12541 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12542 display_preserved_libs(vardbapi)
12547 def chk_updated_cfg_files(target_root, config_protect):
12549 #number of directories with some protect files in them
12551 for x in config_protect:
12552 x = os.path.join(target_root, x.lstrip(os.path.sep))
12553 if not os.access(x, os.W_OK):
12554 # Avoid Permission denied errors generated
12558 mymode = os.lstat(x).st_mode
12561 if stat.S_ISLNK(mymode):
12562 # We want to treat it like a directory if it
12563 # is a symlink to an existing directory.
12565 real_mode = os.stat(x).st_mode
12566 if stat.S_ISDIR(real_mode):
12570 if stat.S_ISDIR(mymode):
12571 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12573 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12574 os.path.split(x.rstrip(os.path.sep))
12575 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12576 a = commands.getstatusoutput(mycommand)
12578 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12580 # Show the error message alone, sending stdout to /dev/null.
12581 os.system(mycommand + " 1>/dev/null")
12583 files = a[1].split('\0')
12584 # split always produces an empty string as the last element
12585 if files and not files[-1]:
12589 print "\n"+colorize("WARN", " * IMPORTANT:"),
12590 if stat.S_ISDIR(mymode):
12591 print "%d config files in '%s' need updating." % \
12594 print "config file '%s' needs updating." % x
12597 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12598 " section of the " + bold("emerge")
12599 print " "+yellow("*")+" man page to learn how to update config files."
12601 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12604 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12605 Returns the number of unread (yet relevent) items.
12607 @param portdb: a portage tree database
12608 @type portdb: pordbapi
12609 @param vardb: an installed package database
12610 @type vardb: vardbapi
12613 @param UNREAD_PATH:
12619 1. The number of unread but relevant news items.
12622 from portage.news import NewsManager
12623 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12624 return manager.getUnreadItems( repo_id, update=update )
12626 def insert_category_into_atom(atom, category):
12627 alphanum = re.search(r'\w', atom)
12629 ret = atom[:alphanum.start()] + "%s/" % category + \
12630 atom[alphanum.start():]
12635 def is_valid_package_atom(x):
12637 alphanum = re.search(r'\w', x)
12639 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12640 return portage.isvalidatom(x)
12642 def show_blocker_docs_link():
12644 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12645 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12647 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12650 def show_mask_docs():
12651 print "For more information, see the MASKED PACKAGES section in the emerge"
12652 print "man page or refer to the Gentoo Handbook."
12654 def action_sync(settings, trees, mtimedb, myopts, myaction):
12655 xterm_titles = "notitles" not in settings.features
12656 emergelog(xterm_titles, " === sync")
12657 myportdir = settings.get("PORTDIR", None)
12658 out = portage.output.EOutput()
12660 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12662 if myportdir[-1]=="/":
12663 myportdir=myportdir[:-1]
12665 st = os.stat(myportdir)
12669 print ">>>",myportdir,"not found, creating it."
12670 os.makedirs(myportdir,0755)
12671 st = os.stat(myportdir)
12674 spawn_kwargs["env"] = settings.environ()
12675 if 'usersync' in settings.features and \
12676 portage.data.secpass >= 2 and \
12677 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12678 st.st_gid != os.getgid() and st.st_mode & 0070):
12680 homedir = pwd.getpwuid(st.st_uid).pw_dir
12684 # Drop privileges when syncing, in order to match
12685 # existing uid/gid settings.
12686 spawn_kwargs["uid"] = st.st_uid
12687 spawn_kwargs["gid"] = st.st_gid
12688 spawn_kwargs["groups"] = [st.st_gid]
12689 spawn_kwargs["env"]["HOME"] = homedir
12691 if not st.st_mode & 0020:
12692 umask = umask | 0020
12693 spawn_kwargs["umask"] = umask
12695 syncuri = settings.get("SYNC", "").strip()
12697 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12698 noiselevel=-1, level=logging.ERROR)
12701 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12702 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12705 dosyncuri = syncuri
12706 updatecache_flg = False
12707 if myaction == "metadata":
12708 print "skipping sync"
12709 updatecache_flg = True
12710 elif ".git" in vcs_dirs:
12711 # Update existing git repository, and ignore the syncuri. We are
12712 # going to trust the user and assume that the user is in the branch
12713 # that he/she wants updated. We'll let the user manage branches with
12715 if portage.process.find_binary("git") is None:
12716 msg = ["Command not found: git",
12717 "Type \"emerge dev-util/git\" to enable git support."]
12719 writemsg_level("!!! %s\n" % l,
12720 level=logging.ERROR, noiselevel=-1)
12722 msg = ">>> Starting git pull in %s..." % myportdir
12723 emergelog(xterm_titles, msg )
12724 writemsg_level(msg + "\n")
12725 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12726 (portage._shell_quote(myportdir),), **spawn_kwargs)
12727 if exitcode != os.EX_OK:
12728 msg = "!!! git pull error in %s." % myportdir
12729 emergelog(xterm_titles, msg)
12730 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12732 msg = ">>> Git pull in %s successful" % myportdir
12733 emergelog(xterm_titles, msg)
12734 writemsg_level(msg + "\n")
12735 exitcode = git_sync_timestamps(settings, myportdir)
12736 if exitcode == os.EX_OK:
12737 updatecache_flg = True
12738 elif syncuri[:8]=="rsync://":
12739 for vcs_dir in vcs_dirs:
12740 writemsg_level(("!!! %s appears to be under revision " + \
12741 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12742 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12744 if not os.path.exists("/usr/bin/rsync"):
12745 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12746 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12751 if settings["PORTAGE_RSYNC_OPTS"] == "":
12752 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12753 rsync_opts.extend([
12754 "--recursive", # Recurse directories
12755 "--links", # Consider symlinks
12756 "--safe-links", # Ignore links outside of tree
12757 "--perms", # Preserve permissions
12758 "--times", # Preserive mod times
12759 "--compress", # Compress the data transmitted
12760 "--force", # Force deletion on non-empty dirs
12761 "--whole-file", # Don't do block transfers, only entire files
12762 "--delete", # Delete files that aren't in the master tree
12763 "--stats", # Show final statistics about what was transfered
12764 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12765 "--exclude=/distfiles", # Exclude distfiles from consideration
12766 "--exclude=/local", # Exclude local from consideration
12767 "--exclude=/packages", # Exclude packages from consideration
12771 # The below validation is not needed when using the above hardcoded
12774 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12776 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12777 for opt in ("--recursive", "--times"):
12778 if opt not in rsync_opts:
12779 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12780 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12781 rsync_opts.append(opt)
12783 for exclude in ("distfiles", "local", "packages"):
12784 opt = "--exclude=/%s" % exclude
12785 if opt not in rsync_opts:
12786 portage.writemsg(yellow("WARNING:") + \
12787 " adding required option %s not included in " % opt + \
12788 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12789 rsync_opts.append(opt)
12791 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12792 def rsync_opt_startswith(opt_prefix):
12793 for x in rsync_opts:
12794 if x.startswith(opt_prefix):
12798 if not rsync_opt_startswith("--timeout="):
12799 rsync_opts.append("--timeout=%d" % mytimeout)
12801 for opt in ("--compress", "--whole-file"):
12802 if opt not in rsync_opts:
12803 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12804 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12805 rsync_opts.append(opt)
12807 if "--quiet" in myopts:
12808 rsync_opts.append("--quiet") # Shut up a lot
12810 rsync_opts.append("--verbose") # Print filelist
12812 if "--verbose" in myopts:
12813 rsync_opts.append("--progress") # Progress meter for each file
12815 if "--debug" in myopts:
12816 rsync_opts.append("--checksum") # Force checksum on all files
12818 # Real local timestamp file.
12819 servertimestampfile = os.path.join(
12820 myportdir, "metadata", "timestamp.chk")
12822 content = portage.util.grabfile(servertimestampfile)
12826 mytimestamp = time.mktime(time.strptime(content[0],
12827 "%a, %d %b %Y %H:%M:%S +0000"))
12828 except (OverflowError, ValueError):
12833 rsync_initial_timeout = \
12834 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12836 rsync_initial_timeout = 15
12839 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12840 except SystemExit, e:
12841 raise # Needed else can't exit
12843 maxretries=3 #default number of retries
12846 user_name, hostname, port = re.split(
12847 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12850 if user_name is None:
12852 updatecache_flg=True
12853 all_rsync_opts = set(rsync_opts)
12854 extra_rsync_opts = shlex.split(
12855 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12856 all_rsync_opts.update(extra_rsync_opts)
12857 family = socket.AF_INET
12858 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12859 family = socket.AF_INET
12860 elif socket.has_ipv6 and \
12861 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12862 family = socket.AF_INET6
12864 SERVER_OUT_OF_DATE = -1
12865 EXCEEDED_MAX_RETRIES = -2
12871 for addrinfo in socket.getaddrinfo(
12872 hostname, None, family, socket.SOCK_STREAM):
12873 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12874 # IPv6 addresses need to be enclosed in square brackets
12875 ips.append("[%s]" % addrinfo[4][0])
12877 ips.append(addrinfo[4][0])
12878 from random import shuffle
12880 except SystemExit, e:
12881 raise # Needed else can't exit
12882 except Exception, e:
12883 print "Notice:",str(e)
12888 dosyncuri = syncuri.replace(
12889 "//" + user_name + hostname + port + "/",
12890 "//" + user_name + ips[0] + port + "/", 1)
12891 except SystemExit, e:
12892 raise # Needed else can't exit
12893 except Exception, e:
12894 print "Notice:",str(e)
12898 if "--ask" in myopts:
12899 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12904 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12905 if "--quiet" not in myopts:
12906 print ">>> Starting rsync with "+dosyncuri+"..."
12908 emergelog(xterm_titles,
12909 ">>> Starting retry %d of %d with %s" % \
12910 (retries,maxretries,dosyncuri))
12911 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12913 if mytimestamp != 0 and "--quiet" not in myopts:
12914 print ">>> Checking server timestamp ..."
12916 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12918 if "--debug" in myopts:
12921 exitcode = os.EX_OK
12922 servertimestamp = 0
12923 # Even if there's no timestamp available locally, fetch the
12924 # timestamp anyway as an initial probe to verify that the server is
12925 # responsive. This protects us from hanging indefinitely on a
12926 # connection attempt to an unresponsive server which rsync's
12927 # --timeout option does not prevent.
12929 # Temporary file for remote server timestamp comparison.
12930 from tempfile import mkstemp
12931 fd, tmpservertimestampfile = mkstemp()
12933 mycommand = rsynccommand[:]
12934 mycommand.append(dosyncuri.rstrip("/") + \
12935 "/metadata/timestamp.chk")
12936 mycommand.append(tmpservertimestampfile)
12940 def timeout_handler(signum, frame):
12941 raise portage.exception.PortageException("timed out")
12942 signal.signal(signal.SIGALRM, timeout_handler)
12943 # Timeout here in case the server is unresponsive. The
12944 # --timeout rsync option doesn't apply to the initial
12945 # connection attempt.
12946 if rsync_initial_timeout:
12947 signal.alarm(rsync_initial_timeout)
12949 mypids.extend(portage.process.spawn(
12950 mycommand, env=settings.environ(), returnpid=True))
12951 exitcode = os.waitpid(mypids[0], 0)[1]
12952 content = portage.grabfile(tmpservertimestampfile)
12954 if rsync_initial_timeout:
12957 os.unlink(tmpservertimestampfile)
12960 except portage.exception.PortageException, e:
12964 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12965 os.kill(mypids[0], signal.SIGTERM)
12966 os.waitpid(mypids[0], 0)
12967 # This is the same code rsync uses for timeout.
12970 if exitcode != os.EX_OK:
12971 if exitcode & 0xff:
12972 exitcode = (exitcode & 0xff) << 8
12974 exitcode = exitcode >> 8
12976 portage.process.spawned_pids.remove(mypids[0])
12979 servertimestamp = time.mktime(time.strptime(
12980 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12981 except (OverflowError, ValueError):
12983 del mycommand, mypids, content
12984 if exitcode == os.EX_OK:
12985 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12986 emergelog(xterm_titles,
12987 ">>> Cancelling sync -- Already current.")
12990 print ">>> Timestamps on the server and in the local repository are the same."
12991 print ">>> Cancelling all further sync action. You are already up to date."
12993 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12997 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12998 emergelog(xterm_titles,
12999 ">>> Server out of date: %s" % dosyncuri)
13002 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13004 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13007 exitcode = SERVER_OUT_OF_DATE
13008 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13010 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13011 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13012 if exitcode in [0,1,3,4,11,14,20,21]:
13014 elif exitcode in [1,3,4,11,14,20,21]:
13017 # Code 2 indicates protocol incompatibility, which is expected
13018 # for servers with protocol < 29 that don't support
13019 # --prune-empty-directories. Retry for a server that supports
13020 # at least rsync protocol version 29 (>=rsync-2.6.4).
13025 if retries<=maxretries:
13026 print ">>> Retrying..."
13031 updatecache_flg=False
13032 exitcode = EXCEEDED_MAX_RETRIES
13036 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13037 elif exitcode == SERVER_OUT_OF_DATE:
13039 elif exitcode == EXCEEDED_MAX_RETRIES:
13041 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13046 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13047 msg.append("that your SYNC statement is proper.")
13048 msg.append("SYNC=" + settings["SYNC"])
13050 msg.append("Rsync has reported that there is a File IO error. Normally")
13051 msg.append("this means your disk is full, but can be caused by corruption")
13052 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13053 msg.append("and try again after the problem has been fixed.")
13054 msg.append("PORTDIR=" + settings["PORTDIR"])
13056 msg.append("Rsync was killed before it finished.")
13058 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13059 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13060 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13061 msg.append("temporary problem unless complications exist with your network")
13062 msg.append("(and possibly your system's filesystem) configuration.")
13066 elif syncuri[:6]=="cvs://":
13067 if not os.path.exists("/usr/bin/cvs"):
13068 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13069 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13071 cvsroot=syncuri[6:]
13072 cvsdir=os.path.dirname(myportdir)
13073 if not os.path.exists(myportdir+"/CVS"):
13075 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13076 if os.path.exists(cvsdir+"/gentoo-x86"):
13077 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13080 os.rmdir(myportdir)
13082 if e.errno != errno.ENOENT:
13084 "!!! existing '%s' directory; exiting.\n" % myportdir)
13087 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13088 print "!!! cvs checkout error; exiting."
13090 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13093 print ">>> Starting cvs update with "+syncuri+"..."
13094 retval = portage.process.spawn_bash(
13095 "cd %s; cvs -z0 -q update -dP" % \
13096 (portage._shell_quote(myportdir),), **spawn_kwargs)
13097 if retval != os.EX_OK:
13099 dosyncuri = syncuri
13101 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13102 noiselevel=-1, level=logging.ERROR)
13105 if updatecache_flg and \
13106 myaction != "metadata" and \
13107 "metadata-transfer" not in settings.features:
13108 updatecache_flg = False
13110 # Reload the whole config from scratch.
13111 settings, trees, mtimedb = load_emerge_config(trees=trees)
13112 root_config = trees[settings["ROOT"]]["root_config"]
13113 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13115 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13116 action_metadata(settings, portdb, myopts)
13118 if portage._global_updates(trees, mtimedb["updates"]):
13120 # Reload the whole config from scratch.
13121 settings, trees, mtimedb = load_emerge_config(trees=trees)
13122 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13123 root_config = trees[settings["ROOT"]]["root_config"]
13125 mybestpv = portdb.xmatch("bestmatch-visible",
13126 portage.const.PORTAGE_PACKAGE_ATOM)
13127 mypvs = portage.best(
13128 trees[settings["ROOT"]]["vartree"].dbapi.match(
13129 portage.const.PORTAGE_PACKAGE_ATOM))
13131 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13133 if myaction != "metadata":
13134 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13135 retval = portage.process.spawn(
13136 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13137 dosyncuri], env=settings.environ())
13138 if retval != os.EX_OK:
13139 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13141 if(mybestpv != mypvs) and not "--quiet" in myopts:
13143 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13144 print red(" * ")+"that you update portage now, before any other packages are updated."
13146 print red(" * ")+"To update portage, run 'emerge portage' now."
13149 display_news_notification(root_config, myopts)
13152 def git_sync_timestamps(settings, portdir):
13154 Since git doesn't preserve timestamps, synchronize timestamps between
13155 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13156 for a given file as long as the file in the working tree is not modified
13157 (relative to HEAD).
13159 cache_dir = os.path.join(portdir, "metadata", "cache")
13160 if not os.path.isdir(cache_dir):
13162 writemsg_level(">>> Synchronizing timestamps...\n")
13164 from portage.cache.cache_errors import CacheError
13166 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13167 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13168 except CacheError, e:
13169 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13170 level=logging.ERROR, noiselevel=-1)
13173 ec_dir = os.path.join(portdir, "eclass")
13175 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13176 if f.endswith(".eclass"))
13178 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13179 level=logging.ERROR, noiselevel=-1)
13182 args = [portage.const.BASH_BINARY, "-c",
13183 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13184 portage._shell_quote(portdir)]
13186 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13187 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13189 if rval != os.EX_OK:
13192 modified_eclasses = set(ec for ec in ec_names \
13193 if os.path.join("eclass", ec + ".eclass") in modified_files)
13195 updated_ec_mtimes = {}
13197 for cpv in cache_db:
13198 cpv_split = portage.catpkgsplit(cpv)
13199 if cpv_split is None:
13200 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13201 level=logging.ERROR, noiselevel=-1)
13204 cat, pn, ver, rev = cpv_split
13205 cat, pf = portage.catsplit(cpv)
13206 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13207 if relative_eb_path in modified_files:
13211 cache_entry = cache_db[cpv]
13212 eb_mtime = cache_entry.get("_mtime_")
13213 ec_mtimes = cache_entry.get("_eclasses_")
13215 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13216 level=logging.ERROR, noiselevel=-1)
13218 except CacheError, e:
13219 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13220 (cpv, e), level=logging.ERROR, noiselevel=-1)
13223 if eb_mtime is None:
13224 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13225 level=logging.ERROR, noiselevel=-1)
13229 eb_mtime = long(eb_mtime)
13231 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13232 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13235 if ec_mtimes is None:
13236 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13237 level=logging.ERROR, noiselevel=-1)
13240 if modified_eclasses.intersection(ec_mtimes):
13243 missing_eclasses = set(ec_mtimes).difference(ec_names)
13244 if missing_eclasses:
13245 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13246 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13250 eb_path = os.path.join(portdir, relative_eb_path)
13252 current_eb_mtime = os.stat(eb_path)
13254 writemsg_level("!!! Missing ebuild: %s\n" % \
13255 (cpv,), level=logging.ERROR, noiselevel=-1)
13258 inconsistent = False
13259 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13260 updated_mtime = updated_ec_mtimes.get(ec)
13261 if updated_mtime is not None and updated_mtime != ec_mtime:
13262 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13263 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13264 inconsistent = True
13270 if current_eb_mtime != eb_mtime:
13271 os.utime(eb_path, (eb_mtime, eb_mtime))
13273 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13274 if ec in updated_ec_mtimes:
13276 ec_path = os.path.join(ec_dir, ec + ".eclass")
13277 current_mtime = long(os.stat(ec_path).st_mtime)
13278 if current_mtime != ec_mtime:
13279 os.utime(ec_path, (ec_mtime, ec_mtime))
13280 updated_ec_mtimes[ec] = ec_mtime
13284 def action_metadata(settings, portdb, myopts):
13285 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13286 old_umask = os.umask(0002)
13287 cachedir = os.path.normpath(settings.depcachedir)
13288 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13289 "/lib", "/opt", "/proc", "/root", "/sbin",
13290 "/sys", "/tmp", "/usr", "/var"]:
13291 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13292 "ROOT DIRECTORY ON YOUR SYSTEM."
13293 print >> sys.stderr, \
13294 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13296 if not os.path.exists(cachedir):
13299 ec = portage.eclass_cache.cache(portdb.porttree_root)
13300 myportdir = os.path.realpath(settings["PORTDIR"])
13301 cm = settings.load_best_module("portdbapi.metadbmodule")(
13302 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13304 from portage.cache import util
13306 class percentage_noise_maker(util.quiet_mirroring):
13307 def __init__(self, dbapi):
13309 self.cp_all = dbapi.cp_all()
13310 l = len(self.cp_all)
13311 self.call_update_min = 100000000
13312 self.min_cp_all = l/100.0
13316 def __iter__(self):
13317 for x in self.cp_all:
13319 if self.count > self.min_cp_all:
13320 self.call_update_min = 0
13322 for y in self.dbapi.cp_list(x):
13324 self.call_update_mine = 0
13326 def update(self, *arg):
13328 self.pstr = int(self.pstr) + 1
13331 sys.stdout.write("%s%i%%" % \
13332 ("\b" * (len(str(self.pstr))+1), self.pstr))
13334 self.call_update_min = 10000000
13336 def finish(self, *arg):
13337 sys.stdout.write("\b\b\b\b100%\n")
13340 if "--quiet" in myopts:
13341 def quicky_cpv_generator(cp_all_list):
13342 for x in cp_all_list:
13343 for y in portdb.cp_list(x):
13345 source = quicky_cpv_generator(portdb.cp_all())
13346 noise_maker = portage.cache.util.quiet_mirroring()
13348 noise_maker = source = percentage_noise_maker(portdb)
13349 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13350 eclass_cache=ec, verbose_instance=noise_maker)
13353 os.umask(old_umask)
13355 def action_regen(settings, portdb, max_jobs, max_load):
13356 xterm_titles = "notitles" not in settings.features
13357 emergelog(xterm_titles, " === regen")
13358 #regenerate cache entries
13359 portage.writemsg_stdout("Regenerating cache entries...\n")
13361 os.close(sys.stdin.fileno())
13362 except SystemExit, e:
13363 raise # Needed else can't exit
13368 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13371 portage.writemsg_stdout("done!\n")
13372 return regen.returncode
13374 def action_config(settings, trees, myopts, myfiles):
13375 if len(myfiles) != 1:
13376 print red("!!! config can only take a single package atom at this time\n")
13378 if not is_valid_package_atom(myfiles[0]):
13379 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13381 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13382 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13386 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13387 except portage.exception.AmbiguousPackageName, e:
13388 # Multiple matches thrown from cpv_expand
13391 print "No packages found.\n"
13393 elif len(pkgs) > 1:
13394 if "--ask" in myopts:
13396 print "Please select a package to configure:"
13400 options.append(str(idx))
13401 print options[-1]+") "+pkg
13403 options.append("X")
13404 idx = userquery("Selection?", options)
13407 pkg = pkgs[int(idx)-1]
13409 print "The following packages available:"
13412 print "\nPlease use a specific atom or the --ask option."
13418 if "--ask" in myopts:
13419 if userquery("Ready to configure "+pkg+"?") == "No":
13422 print "Configuring pkg..."
13424 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13425 mysettings = portage.config(clone=settings)
13426 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13427 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13428 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13430 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13431 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13432 if retval == os.EX_OK:
13433 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13434 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13437 def action_info(settings, trees, myopts, myfiles):
13438 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13439 settings.profile_path, settings["CHOST"],
13440 trees[settings["ROOT"]]["vartree"].dbapi)
13442 header_title = "System Settings"
13444 print header_width * "="
13445 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13446 print header_width * "="
13447 print "System uname: "+platform.platform(aliased=1)
13449 lastSync = portage.grabfile(os.path.join(
13450 settings["PORTDIR"], "metadata", "timestamp.chk"))
13451 print "Timestamp of tree:",
13457 output=commands.getstatusoutput("distcc --version")
13459 print str(output[1].split("\n",1)[0]),
13460 if "distcc" in settings.features:
13465 output=commands.getstatusoutput("ccache -V")
13467 print str(output[1].split("\n",1)[0]),
13468 if "ccache" in settings.features:
13473 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13474 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13475 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13476 myvars = portage.util.unique_array(myvars)
13480 if portage.isvalidatom(x):
13481 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13482 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13483 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13485 for pn, ver, rev in pkg_matches:
13487 pkgs.append(ver + "-" + rev)
13491 pkgs = ", ".join(pkgs)
13492 print "%-20s %s" % (x+":", pkgs)
13494 print "%-20s %s" % (x+":", "[NOT VALID]")
13496 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13498 if "--verbose" in myopts:
13499 myvars=settings.keys()
13501 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13502 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13503 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13504 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13506 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13508 myvars = portage.util.unique_array(myvars)
13514 print '%s="%s"' % (x, settings[x])
13516 use = set(settings["USE"].split())
13517 use_expand = settings["USE_EXPAND"].split()
13519 for varname in use_expand:
13520 flag_prefix = varname.lower() + "_"
13521 for f in list(use):
13522 if f.startswith(flag_prefix):
13526 print 'USE="%s"' % " ".join(use),
13527 for varname in use_expand:
13528 myval = settings.get(varname)
13530 print '%s="%s"' % (varname, myval),
13533 unset_vars.append(x)
13535 print "Unset: "+", ".join(unset_vars)
13538 if "--debug" in myopts:
13539 for x in dir(portage):
13540 module = getattr(portage, x)
13541 if "cvs_id_string" in dir(module):
13542 print "%s: %s" % (str(x), str(module.cvs_id_string))
13544 # See if we can find any packages installed matching the strings
13545 # passed on the command line
13547 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13548 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13550 mypkgs.extend(vardb.match(x))
13552 # If some packages were found...
13554 # Get our global settings (we only print stuff if it varies from
13555 # the current config)
13556 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13557 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13559 pkgsettings = portage.config(clone=settings)
13561 for myvar in mydesiredvars:
13562 global_vals[myvar] = set(settings.get(myvar, "").split())
13564 # Loop through each package
13565 # Only print settings if they differ from global settings
13566 header_title = "Package Settings"
13567 print header_width * "="
13568 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13569 print header_width * "="
13570 from portage.output import EOutput
13573 # Get all package specific variables
13574 auxvalues = vardb.aux_get(pkg, auxkeys)
13576 for i in xrange(len(auxkeys)):
13577 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13579 for myvar in mydesiredvars:
13580 # If the package variable doesn't match the
13581 # current global variable, something has changed
13582 # so set diff_found so we know to print
13583 if valuesmap[myvar] != global_vals[myvar]:
13584 diff_values[myvar] = valuesmap[myvar]
13585 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13586 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13587 pkgsettings.reset()
13588 # If a matching ebuild is no longer available in the tree, maybe it
13589 # would make sense to compare against the flags for the best
13590 # available version with the same slot?
13592 if portdb.cpv_exists(pkg):
13594 pkgsettings.setcpv(pkg, mydb=mydb)
13595 if valuesmap["IUSE"].intersection(
13596 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13597 diff_values["USE"] = valuesmap["USE"]
13598 # If a difference was found, print the info for
13601 # Print package info
13602 print "%s was built with the following:" % pkg
13603 for myvar in mydesiredvars + ["USE"]:
13604 if myvar in diff_values:
13605 mylist = list(diff_values[myvar])
13607 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13609 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13610 ebuildpath = vardb.findname(pkg)
13611 if not ebuildpath or not os.path.exists(ebuildpath):
13612 out.ewarn("No ebuild found for '%s'" % pkg)
13614 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13615 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13616 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13619 def action_search(root_config, myopts, myfiles, spinner):
13621 print "emerge: no search terms provided."
13623 searchinstance = search(root_config,
13624 spinner, "--searchdesc" in myopts,
13625 "--quiet" not in myopts, "--usepkg" in myopts,
13626 "--usepkgonly" in myopts)
13627 for mysearch in myfiles:
13629 searchinstance.execute(mysearch)
13630 except re.error, comment:
13631 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13633 searchinstance.output()
13635 def action_depclean(settings, trees, ldpath_mtimes,
13636 myopts, action, myfiles, spinner):
13637 # Kill packages that aren't explicitly merged or are required as a
13638 # dependency of another package. World file is explicit.
13640 # Global depclean or prune operations are not very safe when there are
13641 # missing dependencies since it's unknown how badly incomplete
13642 # the dependency graph is, and we might accidentally remove packages
13643 # that should have been pulled into the graph. On the other hand, it's
13644 # relatively safe to ignore missing deps when only asked to remove
13645 # specific packages.
13646 allow_missing_deps = len(myfiles) > 0
13649 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13650 msg.append("mistakes. Packages that are part of the world set will always\n")
13651 msg.append("be kept. They can be manually added to this set with\n")
13652 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13653 msg.append("package.provided (see portage(5)) will be removed by\n")
13654 msg.append("depclean, even if they are part of the world set.\n")
13656 msg.append("As a safety measure, depclean will not remove any packages\n")
13657 msg.append("unless *all* required dependencies have been resolved. As a\n")
13658 msg.append("consequence, it is often necessary to run %s\n" % \
13659 good("`emerge --update"))
13660 msg.append(good("--newuse --deep @system @world`") + \
13661 " prior to depclean.\n")
13663 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13664 portage.writemsg_stdout("\n")
13666 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13668 xterm_titles = "notitles" not in settings.features
13669 myroot = settings["ROOT"]
13670 root_config = trees[myroot]["root_config"]
13671 getSetAtoms = root_config.setconfig.getSetAtoms
13672 vardb = trees[myroot]["vartree"].dbapi
13674 required_set_names = ("system", "world")
13678 for s in required_set_names:
13679 required_sets[s] = InternalPackageSet(
13680 initial_atoms=getSetAtoms(s))
13683 # When removing packages, use a temporary version of world
13684 # which excludes packages that are intended to be eligible for
13686 world_temp_set = required_sets["world"]
13687 system_set = required_sets["system"]
13689 if not system_set or not world_temp_set:
13692 writemsg_level("!!! You have no system list.\n",
13693 level=logging.ERROR, noiselevel=-1)
13695 if not world_temp_set:
13696 writemsg_level("!!! You have no world file.\n",
13697 level=logging.WARNING, noiselevel=-1)
13699 writemsg_level("!!! Proceeding is likely to " + \
13700 "break your installation.\n",
13701 level=logging.WARNING, noiselevel=-1)
13702 if "--pretend" not in myopts:
13703 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13705 if action == "depclean":
13706 emergelog(xterm_titles, " >>> depclean")
13709 args_set = InternalPackageSet()
13712 if not is_valid_package_atom(x):
13713 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13714 level=logging.ERROR, noiselevel=-1)
13715 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13718 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13719 except portage.exception.AmbiguousPackageName, e:
13720 msg = "The short ebuild name \"" + x + \
13721 "\" is ambiguous. Please specify " + \
13722 "one of the following " + \
13723 "fully-qualified ebuild names instead:"
13724 for line in textwrap.wrap(msg, 70):
13725 writemsg_level("!!! %s\n" % (line,),
13726 level=logging.ERROR, noiselevel=-1)
13728 writemsg_level(" %s\n" % colorize("INFORM", i),
13729 level=logging.ERROR, noiselevel=-1)
13730 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13733 matched_packages = False
13736 matched_packages = True
13738 if not matched_packages:
13739 writemsg_level(">>> No packages selected for removal by %s\n" % \
13743 writemsg_level("\nCalculating dependencies ")
13744 resolver_params = create_depgraph_params(myopts, "remove")
13745 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13746 vardb = resolver.trees[myroot]["vartree"].dbapi
13748 if action == "depclean":
13751 # Pull in everything that's installed but not matched
13752 # by an argument atom since we don't want to clean any
13753 # package if something depends on it.
13755 world_temp_set.clear()
13760 if args_set.findAtomForPackage(pkg) is None:
13761 world_temp_set.add("=" + pkg.cpv)
13763 except portage.exception.InvalidDependString, e:
13764 show_invalid_depstring_notice(pkg,
13765 pkg.metadata["PROVIDE"], str(e))
13767 world_temp_set.add("=" + pkg.cpv)
13770 elif action == "prune":
13772 # Pull in everything that's installed since we don't
13773 # to prune a package if something depends on it.
13774 world_temp_set.clear()
13775 world_temp_set.update(vardb.cp_all())
13779 # Try to prune everything that's slotted.
13780 for cp in vardb.cp_all():
13781 if len(vardb.cp_list(cp)) > 1:
13784 # Remove atoms from world that match installed packages
13785 # that are also matched by argument atoms, but do not remove
13786 # them if they match the highest installed version.
13789 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13790 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13791 raise AssertionError("package expected in matches: " + \
13792 "cp = %s, cpv = %s matches = %s" % \
13793 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13795 highest_version = pkgs_for_cp[-1]
13796 if pkg == highest_version:
13797 # pkg is the highest version
13798 world_temp_set.add("=" + pkg.cpv)
13801 if len(pkgs_for_cp) <= 1:
13802 raise AssertionError("more packages expected: " + \
13803 "cp = %s, cpv = %s matches = %s" % \
13804 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13807 if args_set.findAtomForPackage(pkg) is None:
13808 world_temp_set.add("=" + pkg.cpv)
13810 except portage.exception.InvalidDependString, e:
13811 show_invalid_depstring_notice(pkg,
13812 pkg.metadata["PROVIDE"], str(e))
13814 world_temp_set.add("=" + pkg.cpv)
13818 for s, package_set in required_sets.iteritems():
13819 set_atom = SETPREFIX + s
13820 set_arg = SetArg(arg=set_atom, set=package_set,
13821 root_config=resolver.roots[myroot])
13822 set_args[s] = set_arg
13823 for atom in set_arg.set:
13824 resolver._dep_stack.append(
13825 Dependency(atom=atom, root=myroot, parent=set_arg))
13826 resolver.digraph.add(set_arg, None)
13828 success = resolver._complete_graph()
13829 writemsg_level("\b\b... done!\n")
13831 resolver.display_problems()
13836 def unresolved_deps():
13838 unresolvable = set()
13839 for dep in resolver._initially_unsatisfied_deps:
13840 if isinstance(dep.parent, Package) and \
13841 (dep.priority > UnmergeDepPriority.SOFT):
13842 unresolvable.add((dep.atom, dep.parent.cpv))
13844 if not unresolvable:
13847 if unresolvable and not allow_missing_deps:
13848 prefix = bad(" * ")
13850 msg.append("Dependencies could not be completely resolved due to")
13851 msg.append("the following required packages not being installed:")
13853 for atom, parent in unresolvable:
13854 msg.append(" %s pulled in by:" % (atom,))
13855 msg.append(" %s" % (parent,))
13857 msg.append("Have you forgotten to run " + \
13858 good("`emerge --update --newuse --deep @system @world`") + " prior")
13859 msg.append(("to %s? It may be necessary to manually " + \
13860 "uninstall packages that no longer") % action)
13861 msg.append("exist in the portage tree since " + \
13862 "it may not be possible to satisfy their")
13863 msg.append("dependencies. Also, be aware of " + \
13864 "the --with-bdeps option that is documented")
13865 msg.append("in " + good("`man emerge`") + ".")
13866 if action == "prune":
13868 msg.append("If you would like to ignore " + \
13869 "dependencies then use %s." % good("--nodeps"))
13870 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13871 level=logging.ERROR, noiselevel=-1)
13875 if unresolved_deps():
13878 graph = resolver.digraph.copy()
13879 required_pkgs_total = 0
13881 if isinstance(node, Package):
13882 required_pkgs_total += 1
13884 def show_parents(child_node):
13885 parent_nodes = graph.parent_nodes(child_node)
13886 if not parent_nodes:
13887 # With --prune, the highest version can be pulled in without any
13888 # real parent since all installed packages are pulled in. In that
13889 # case there's nothing to show here.
13892 for node in parent_nodes:
13893 parent_strs.append(str(getattr(node, "cpv", node)))
13896 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13897 for parent_str in parent_strs:
13898 msg.append(" %s\n" % (parent_str,))
13900 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13902 def cmp_pkg_cpv(pkg1, pkg2):
13903 """Sort Package instances by cpv."""
13904 if pkg1.cpv > pkg2.cpv:
13906 elif pkg1.cpv == pkg2.cpv:
13911 def create_cleanlist():
13912 pkgs_to_remove = []
13914 if action == "depclean":
13917 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13920 arg_atom = args_set.findAtomForPackage(pkg)
13921 except portage.exception.InvalidDependString:
13922 # this error has already been displayed by now
13926 if pkg not in graph:
13927 pkgs_to_remove.append(pkg)
13928 elif "--verbose" in myopts:
13932 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13933 if pkg not in graph:
13934 pkgs_to_remove.append(pkg)
13935 elif "--verbose" in myopts:
13938 elif action == "prune":
13939 # Prune really uses all installed instead of world. It's not
13940 # a real reverse dependency so don't display it as such.
13941 graph.remove(set_args["world"])
13943 for atom in args_set:
13944 for pkg in vardb.match_pkgs(atom):
13945 if pkg not in graph:
13946 pkgs_to_remove.append(pkg)
13947 elif "--verbose" in myopts:
13950 if not pkgs_to_remove:
13952 ">>> No packages selected for removal by %s\n" % action)
13953 if "--verbose" not in myopts:
13955 ">>> To see reverse dependencies, use %s\n" % \
13957 if action == "prune":
13959 ">>> To ignore dependencies, use %s\n" % \
13962 return pkgs_to_remove
13964 cleanlist = create_cleanlist()
13967 clean_set = set(cleanlist)
13969 # Check if any of these package are the sole providers of libraries
13970 # with consumers that have not been selected for removal. If so, these
13971 # packages and any dependencies need to be added to the graph.
13972 real_vardb = trees[myroot]["vartree"].dbapi
13973 linkmap = real_vardb.linkmap
13974 liblist = linkmap.listLibraryObjects()
13975 consumer_cache = {}
13976 provider_cache = {}
13980 writemsg_level(">>> Checking for lib consumers...\n")
13982 for pkg in cleanlist:
13983 pkg_dblink = real_vardb._dblink(pkg.cpv)
13984 provided_libs = set()
13986 for lib in liblist:
13987 if pkg_dblink.isowner(lib, myroot):
13988 provided_libs.add(lib)
13990 if not provided_libs:
13994 for lib in provided_libs:
13995 lib_consumers = consumer_cache.get(lib)
13996 if lib_consumers is None:
13997 lib_consumers = linkmap.findConsumers(lib)
13998 consumer_cache[lib] = lib_consumers
14000 consumers[lib] = lib_consumers
14005 for lib, lib_consumers in consumers.items():
14006 for consumer_file in list(lib_consumers):
14007 if pkg_dblink.isowner(consumer_file, myroot):
14008 lib_consumers.remove(consumer_file)
14009 if not lib_consumers:
14015 for lib, lib_consumers in consumers.iteritems():
14017 soname = soname_cache.get(lib)
14019 soname = linkmap.getSoname(lib)
14020 soname_cache[lib] = soname
14022 consumer_providers = []
14023 for lib_consumer in lib_consumers:
14024 providers = provider_cache.get(lib)
14025 if providers is None:
14026 providers = linkmap.findProviders(lib_consumer)
14027 provider_cache[lib_consumer] = providers
14028 if soname not in providers:
14029 # Why does this happen?
14031 consumer_providers.append(
14032 (lib_consumer, providers[soname]))
14034 consumers[lib] = consumer_providers
14036 consumer_map[pkg] = consumers
14040 search_files = set()
14041 for consumers in consumer_map.itervalues():
14042 for lib, consumer_providers in consumers.iteritems():
14043 for lib_consumer, providers in consumer_providers:
14044 search_files.add(lib_consumer)
14045 search_files.update(providers)
14047 writemsg_level(">>> Assigning files to packages...\n")
14048 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14050 for pkg, consumers in consumer_map.items():
14051 for lib, consumer_providers in consumers.items():
14052 lib_consumers = set()
14054 for lib_consumer, providers in consumer_providers:
14055 owner_set = file_owners.get(lib_consumer)
14056 provider_dblinks = set()
14057 provider_pkgs = set()
14059 if len(providers) > 1:
14060 for provider in providers:
14061 provider_set = file_owners.get(provider)
14062 if provider_set is not None:
14063 provider_dblinks.update(provider_set)
14065 if len(provider_dblinks) > 1:
14066 for provider_dblink in provider_dblinks:
14067 pkg_key = ("installed", myroot,
14068 provider_dblink.mycpv, "nomerge")
14069 if pkg_key not in clean_set:
14070 provider_pkgs.add(vardb.get(pkg_key))
14075 if owner_set is not None:
14076 lib_consumers.update(owner_set)
14078 for consumer_dblink in list(lib_consumers):
14079 if ("installed", myroot, consumer_dblink.mycpv,
14080 "nomerge") in clean_set:
14081 lib_consumers.remove(consumer_dblink)
14085 consumers[lib] = lib_consumers
14089 del consumer_map[pkg]
14092 # TODO: Implement a package set for rebuilding consumer packages.
14094 msg = "In order to avoid breakage of link level " + \
14095 "dependencies, one or more packages will not be removed. " + \
14096 "This can be solved by rebuilding " + \
14097 "the packages that pulled them in."
14099 prefix = bad(" * ")
14100 from textwrap import wrap
14101 writemsg_level("".join(prefix + "%s\n" % line for \
14102 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14105 for pkg, consumers in consumer_map.iteritems():
14106 unique_consumers = set(chain(*consumers.values()))
14107 unique_consumers = sorted(consumer.mycpv \
14108 for consumer in unique_consumers)
14110 msg.append(" %s pulled in by:" % (pkg.cpv,))
14111 for consumer in unique_consumers:
14112 msg.append(" %s" % (consumer,))
14114 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14115 level=logging.WARNING, noiselevel=-1)
14117 # Add lib providers to the graph as children of lib consumers,
14118 # and also add any dependencies pulled in by the provider.
14119 writemsg_level(">>> Adding lib providers to graph...\n")
14121 for pkg, consumers in consumer_map.iteritems():
14122 for consumer_dblink in set(chain(*consumers.values())):
14123 consumer_pkg = vardb.get(("installed", myroot,
14124 consumer_dblink.mycpv, "nomerge"))
14125 if not resolver._add_pkg(pkg,
14126 Dependency(parent=consumer_pkg,
14127 priority=UnmergeDepPriority(runtime=True),
14129 resolver.display_problems()
14132 writemsg_level("\nCalculating dependencies ")
14133 success = resolver._complete_graph()
14134 writemsg_level("\b\b... done!\n")
14135 resolver.display_problems()
14138 if unresolved_deps():
14141 graph = resolver.digraph.copy()
14142 required_pkgs_total = 0
14144 if isinstance(node, Package):
14145 required_pkgs_total += 1
14146 cleanlist = create_cleanlist()
14149 clean_set = set(cleanlist)
14151 # Use a topological sort to create an unmerge order such that
14152 # each package is unmerged before it's dependencies. This is
14153 # necessary to avoid breaking things that may need to run
14154 # during pkg_prerm or pkg_postrm phases.
14156 # Create a new graph to account for dependencies between the
14157 # packages being unmerged.
14161 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14162 runtime = UnmergeDepPriority(runtime=True)
14163 runtime_post = UnmergeDepPriority(runtime_post=True)
14164 buildtime = UnmergeDepPriority(buildtime=True)
14166 "RDEPEND": runtime,
14167 "PDEPEND": runtime_post,
14168 "DEPEND": buildtime,
14171 for node in clean_set:
14172 graph.add(node, None)
14174 node_use = node.metadata["USE"].split()
14175 for dep_type in dep_keys:
14176 depstr = node.metadata[dep_type]
14180 portage.dep._dep_check_strict = False
14181 success, atoms = portage.dep_check(depstr, None, settings,
14182 myuse=node_use, trees=resolver._graph_trees,
14185 portage.dep._dep_check_strict = True
14187 # Ignore invalid deps of packages that will
14188 # be uninstalled anyway.
14191 priority = priority_map[dep_type]
14193 if not isinstance(atom, portage.dep.Atom):
14194 # Ignore invalid atoms returned from dep_check().
14198 matches = vardb.match_pkgs(atom)
14201 for child_node in matches:
14202 if child_node in clean_set:
14203 graph.add(child_node, node, priority=priority)
14206 if len(graph.order) == len(graph.root_nodes()):
14207 # If there are no dependencies between packages
14208 # let unmerge() group them by cat/pn.
14210 cleanlist = [pkg.cpv for pkg in graph.order]
14212 # Order nodes from lowest to highest overall reference count for
14213 # optimal root node selection.
14214 node_refcounts = {}
14215 for node in graph.order:
14216 node_refcounts[node] = len(graph.parent_nodes(node))
14217 def cmp_reference_count(node1, node2):
14218 return node_refcounts[node1] - node_refcounts[node2]
14219 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14221 ignore_priority_range = [None]
14222 ignore_priority_range.extend(
14223 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14224 while not graph.empty():
14225 for ignore_priority in ignore_priority_range:
14226 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14230 raise AssertionError("no root nodes")
14231 if ignore_priority is not None:
14232 # Some deps have been dropped due to circular dependencies,
14233 # so only pop one node in order do minimize the number that
14238 cleanlist.append(node.cpv)
14240 unmerge(root_config, myopts, "unmerge", cleanlist,
14241 ldpath_mtimes, ordered=ordered)
14243 if action == "prune":
14246 if not cleanlist and "--quiet" in myopts:
14249 print "Packages installed: "+str(len(vardb.cpv_all()))
14250 print "Packages in world: " + \
14251 str(len(root_config.sets["world"].getAtoms()))
14252 print "Packages in system: " + \
14253 str(len(root_config.sets["system"].getAtoms()))
14254 print "Required packages: "+str(required_pkgs_total)
14255 if "--pretend" in myopts:
14256 print "Number to remove: "+str(len(cleanlist))
14258 print "Number removed: "+str(len(cleanlist))
14260 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14262 Construct a depgraph for the given resume list. This will raise
14263 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14265 @returns: (success, depgraph, dropped_tasks)
14268 skip_unsatisfied = True
14269 mergelist = mtimedb["resume"]["mergelist"]
14270 dropped_tasks = set()
14272 mydepgraph = depgraph(settings, trees,
14273 myopts, myparams, spinner)
14275 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14276 skip_masked=skip_masked)
14277 except depgraph.UnsatisfiedResumeDep, e:
14278 if not skip_unsatisfied:
14281 graph = mydepgraph.digraph
14282 unsatisfied_parents = dict((dep.parent, dep.parent) \
14283 for dep in e.value)
14284 traversed_nodes = set()
14285 unsatisfied_stack = list(unsatisfied_parents)
14286 while unsatisfied_stack:
14287 pkg = unsatisfied_stack.pop()
14288 if pkg in traversed_nodes:
14290 traversed_nodes.add(pkg)
14292 # If this package was pulled in by a parent
14293 # package scheduled for merge, removing this
14294 # package may cause the the parent package's
14295 # dependency to become unsatisfied.
14296 for parent_node in graph.parent_nodes(pkg):
14297 if not isinstance(parent_node, Package) \
14298 or parent_node.operation not in ("merge", "nomerge"):
14301 graph.child_nodes(parent_node,
14302 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14303 if pkg in unsatisfied:
14304 unsatisfied_parents[parent_node] = parent_node
14305 unsatisfied_stack.append(parent_node)
14307 pruned_mergelist = []
14308 for x in mergelist:
14309 if isinstance(x, list) and \
14310 tuple(x) not in unsatisfied_parents:
14311 pruned_mergelist.append(x)
14313 # If the mergelist doesn't shrink then this loop is infinite.
14314 if len(pruned_mergelist) == len(mergelist):
14315 # This happens if a package can't be dropped because
14316 # it's already installed, but it has unsatisfied PDEPEND.
14318 mergelist[:] = pruned_mergelist
14320 # Exclude installed packages that have been removed from the graph due
14321 # to failure to build/install runtime dependencies after the dependent
14322 # package has already been installed.
14323 dropped_tasks.update(pkg for pkg in \
14324 unsatisfied_parents if pkg.operation != "nomerge")
14325 mydepgraph.break_refs(unsatisfied_parents)
14327 del e, graph, traversed_nodes, \
14328 unsatisfied_parents, unsatisfied_stack
14332 return (success, mydepgraph, dropped_tasks)
14334 def action_build(settings, trees, mtimedb,
14335 myopts, myaction, myfiles, spinner):
14337 # validate the state of the resume data
14338 # so that we can make assumptions later.
14339 for k in ("resume", "resume_backup"):
14340 if k not in mtimedb:
14342 resume_data = mtimedb[k]
14343 if not isinstance(resume_data, dict):
14346 mergelist = resume_data.get("mergelist")
14347 if not isinstance(mergelist, list):
14350 for x in mergelist:
14351 if not (isinstance(x, list) and len(x) == 4):
14353 pkg_type, pkg_root, pkg_key, pkg_action = x
14354 if pkg_root not in trees:
14355 # Current $ROOT setting differs,
14356 # so the list must be stale.
14362 resume_opts = resume_data.get("myopts")
14363 if not isinstance(resume_opts, (dict, list)):
14366 favorites = resume_data.get("favorites")
14367 if not isinstance(favorites, list):
14372 if "--resume" in myopts and \
14373 ("resume" in mtimedb or
14374 "resume_backup" in mtimedb):
14376 if "resume" not in mtimedb:
14377 mtimedb["resume"] = mtimedb["resume_backup"]
14378 del mtimedb["resume_backup"]
14380 # "myopts" is a list for backward compatibility.
14381 resume_opts = mtimedb["resume"].get("myopts", [])
14382 if isinstance(resume_opts, list):
14383 resume_opts = dict((k,True) for k in resume_opts)
14384 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14385 resume_opts.pop(opt, None)
14386 myopts.update(resume_opts)
14388 if "--debug" in myopts:
14389 writemsg_level("myopts %s\n" % (myopts,))
14391 # Adjust config according to options of the command being resumed.
14392 for myroot in trees:
14393 mysettings = trees[myroot]["vartree"].settings
14394 mysettings.unlock()
14395 adjust_config(myopts, mysettings)
14397 del myroot, mysettings
14399 ldpath_mtimes = mtimedb["ldpath"]
14402 buildpkgonly = "--buildpkgonly" in myopts
14403 pretend = "--pretend" in myopts
14404 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14405 ask = "--ask" in myopts
14406 nodeps = "--nodeps" in myopts
14407 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14408 tree = "--tree" in myopts
14409 if nodeps and tree:
14411 del myopts["--tree"]
14412 portage.writemsg(colorize("WARN", " * ") + \
14413 "--tree is broken with --nodeps. Disabling...\n")
14414 debug = "--debug" in myopts
14415 verbose = "--verbose" in myopts
14416 quiet = "--quiet" in myopts
14417 if pretend or fetchonly:
14418 # make the mtimedb readonly
14419 mtimedb.filename = None
14420 if '--digest' in myopts or 'digest' in settings.features:
14421 if '--digest' in myopts:
14422 msg = "The --digest option"
14424 msg = "The FEATURES=digest setting"
14426 msg += " can prevent corruption from being" + \
14427 " noticed. The `repoman manifest` command is the preferred" + \
14428 " way to generate manifests and it is capable of doing an" + \
14429 " entire repository or category at once."
14430 prefix = bad(" * ")
14431 writemsg(prefix + "\n")
14432 from textwrap import wrap
14433 for line in wrap(msg, 72):
14434 writemsg("%s%s\n" % (prefix, line))
14435 writemsg(prefix + "\n")
14437 if "--quiet" not in myopts and \
14438 ("--pretend" in myopts or "--ask" in myopts or \
14439 "--tree" in myopts or "--verbose" in myopts):
14441 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14443 elif "--buildpkgonly" in myopts:
14447 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14449 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14453 print darkgreen("These are the packages that would be %s, in order:") % action
14456 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14457 if not show_spinner:
14458 spinner.update = spinner.update_quiet
14461 favorites = mtimedb["resume"].get("favorites")
14462 if not isinstance(favorites, list):
14466 print "Calculating dependencies ",
14467 myparams = create_depgraph_params(myopts, myaction)
14469 resume_data = mtimedb["resume"]
14470 mergelist = resume_data["mergelist"]
14471 if mergelist and "--skipfirst" in myopts:
14472 for i, task in enumerate(mergelist):
14473 if isinstance(task, list) and \
14474 task and task[-1] == "merge":
14481 success, mydepgraph, dropped_tasks = resume_depgraph(
14482 settings, trees, mtimedb, myopts, myparams, spinner)
14483 except (portage.exception.PackageNotFound,
14484 depgraph.UnsatisfiedResumeDep), e:
14485 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14486 mydepgraph = e.depgraph
14489 from textwrap import wrap
14490 from portage.output import EOutput
14493 resume_data = mtimedb["resume"]
14494 mergelist = resume_data.get("mergelist")
14495 if not isinstance(mergelist, list):
14497 if mergelist and debug or (verbose and not quiet):
14498 out.eerror("Invalid resume list:")
14501 for task in mergelist:
14502 if isinstance(task, list):
14503 out.eerror(indent + str(tuple(task)))
14506 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14507 out.eerror("One or more packages are either masked or " + \
14508 "have missing dependencies:")
14511 for dep in e.value:
14512 if dep.atom is None:
14513 out.eerror(indent + "Masked package:")
14514 out.eerror(2 * indent + str(dep.parent))
14517 out.eerror(indent + str(dep.atom) + " pulled in by:")
14518 out.eerror(2 * indent + str(dep.parent))
14520 msg = "The resume list contains packages " + \
14521 "that are either masked or have " + \
14522 "unsatisfied dependencies. " + \
14523 "Please restart/continue " + \
14524 "the operation manually, or use --skipfirst " + \
14525 "to skip the first package in the list and " + \
14526 "any other packages that may be " + \
14527 "masked or have missing dependencies."
14528 for line in wrap(msg, 72):
14530 elif isinstance(e, portage.exception.PackageNotFound):
14531 out.eerror("An expected package is " + \
14532 "not available: %s" % str(e))
14534 msg = "The resume list contains one or more " + \
14535 "packages that are no longer " + \
14536 "available. Please restart/continue " + \
14537 "the operation manually."
14538 for line in wrap(msg, 72):
14542 print "\b\b... done!"
14546 portage.writemsg("!!! One or more packages have been " + \
14547 "dropped due to\n" + \
14548 "!!! masking or unsatisfied dependencies:\n\n",
14550 for task in dropped_tasks:
14551 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14552 portage.writemsg("\n", noiselevel=-1)
14555 if mydepgraph is not None:
14556 mydepgraph.display_problems()
14557 if not (ask or pretend):
14558 # delete the current list and also the backup
14559 # since it's probably stale too.
14560 for k in ("resume", "resume_backup"):
14561 mtimedb.pop(k, None)
14566 if ("--resume" in myopts):
14567 print darkgreen("emerge: It seems we have nothing to resume...")
14570 myparams = create_depgraph_params(myopts, myaction)
14571 if "--quiet" not in myopts and "--nodeps" not in myopts:
14572 print "Calculating dependencies ",
14574 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14576 retval, favorites = mydepgraph.select_files(myfiles)
14577 except portage.exception.PackageNotFound, e:
14578 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14580 except portage.exception.PackageSetNotFound, e:
14581 root_config = trees[settings["ROOT"]]["root_config"]
14582 display_missing_pkg_set(root_config, e.value)
14585 print "\b\b... done!"
14587 mydepgraph.display_problems()
14590 if "--pretend" not in myopts and \
14591 ("--ask" in myopts or "--tree" in myopts or \
14592 "--verbose" in myopts) and \
14593 not ("--quiet" in myopts and "--ask" not in myopts):
14594 if "--resume" in myopts:
14595 mymergelist = mydepgraph.altlist()
14596 if len(mymergelist) == 0:
14597 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14599 favorites = mtimedb["resume"]["favorites"]
14600 retval = mydepgraph.display(
14601 mydepgraph.altlist(reversed=tree),
14602 favorites=favorites)
14603 mydepgraph.display_problems()
14604 if retval != os.EX_OK:
14606 prompt="Would you like to resume merging these packages?"
14608 retval = mydepgraph.display(
14609 mydepgraph.altlist(reversed=("--tree" in myopts)),
14610 favorites=favorites)
14611 mydepgraph.display_problems()
14612 if retval != os.EX_OK:
14615 for x in mydepgraph.altlist():
14616 if isinstance(x, Package) and x.operation == "merge":
14620 sets = trees[settings["ROOT"]]["root_config"].sets
14621 world_candidates = None
14622 if "--noreplace" in myopts and \
14623 not oneshot and favorites:
14624 # Sets that are not world candidates are filtered
14625 # out here since the favorites list needs to be
14626 # complete for depgraph.loadResumeCommand() to
14627 # operate correctly.
14628 world_candidates = [x for x in favorites \
14629 if not (x.startswith(SETPREFIX) and \
14630 not sets[x[1:]].world_candidate)]
14631 if "--noreplace" in myopts and \
14632 not oneshot and world_candidates:
14634 for x in world_candidates:
14635 print " %s %s" % (good("*"), x)
14636 prompt="Would you like to add these packages to your world favorites?"
14637 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14638 prompt="Nothing to merge; would you like to auto-clean packages?"
14641 print "Nothing to merge; quitting."
14644 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14645 prompt="Would you like to fetch the source files for these packages?"
14647 prompt="Would you like to merge these packages?"
14649 if "--ask" in myopts and userquery(prompt) == "No":
14654 # Don't ask again (e.g. when auto-cleaning packages after merge)
14655 myopts.pop("--ask", None)
14657 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14658 if ("--resume" in myopts):
14659 mymergelist = mydepgraph.altlist()
14660 if len(mymergelist) == 0:
14661 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14663 favorites = mtimedb["resume"]["favorites"]
14664 retval = mydepgraph.display(
14665 mydepgraph.altlist(reversed=tree),
14666 favorites=favorites)
14667 mydepgraph.display_problems()
14668 if retval != os.EX_OK:
14671 retval = mydepgraph.display(
14672 mydepgraph.altlist(reversed=("--tree" in myopts)),
14673 favorites=favorites)
14674 mydepgraph.display_problems()
14675 if retval != os.EX_OK:
14677 if "--buildpkgonly" in myopts:
14678 graph_copy = mydepgraph.digraph.clone()
14679 removed_nodes = set()
14680 for node in graph_copy:
14681 if not isinstance(node, Package) or \
14682 node.operation == "nomerge":
14683 removed_nodes.add(node)
14684 graph_copy.difference_update(removed_nodes)
14685 if not graph_copy.hasallzeros(ignore_priority = \
14686 DepPrioritySatisfiedRange.ignore_medium):
14687 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14688 print "!!! You have to merge the dependencies before you can build this package.\n"
14691 if "--buildpkgonly" in myopts:
14692 graph_copy = mydepgraph.digraph.clone()
14693 removed_nodes = set()
14694 for node in graph_copy:
14695 if not isinstance(node, Package) or \
14696 node.operation == "nomerge":
14697 removed_nodes.add(node)
14698 graph_copy.difference_update(removed_nodes)
14699 if not graph_copy.hasallzeros(ignore_priority = \
14700 DepPrioritySatisfiedRange.ignore_medium):
14701 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14702 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14705 if ("--resume" in myopts):
14706 favorites=mtimedb["resume"]["favorites"]
14707 mymergelist = mydepgraph.altlist()
14708 mydepgraph.break_refs(mymergelist)
14709 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14710 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14711 del mydepgraph, mymergelist
14712 clear_caches(trees)
14714 retval = mergetask.merge()
14715 merge_count = mergetask.curval
14717 if "resume" in mtimedb and \
14718 "mergelist" in mtimedb["resume"] and \
14719 len(mtimedb["resume"]["mergelist"]) > 1:
14720 mtimedb["resume_backup"] = mtimedb["resume"]
14721 del mtimedb["resume"]
14723 mtimedb["resume"]={}
14724 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14725 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14726 # a list type for options.
14727 mtimedb["resume"]["myopts"] = myopts.copy()
14729 # Convert Atom instances to plain str.
14730 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14732 pkglist = mydepgraph.altlist()
14733 mydepgraph.saveNomergeFavorites()
14734 mydepgraph.break_refs(pkglist)
14735 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14736 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14737 del mydepgraph, pkglist
14738 clear_caches(trees)
14740 retval = mergetask.merge()
14741 merge_count = mergetask.curval
14743 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14744 if "yes" == settings.get("AUTOCLEAN"):
14745 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14746 unmerge(trees[settings["ROOT"]]["root_config"],
14747 myopts, "clean", [],
14748 ldpath_mtimes, autoclean=1)
14750 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14751 + " AUTOCLEAN is disabled. This can cause serious"
14752 + " problems due to overlapping packages.\n")
14753 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14757 def multiple_actions(action1, action2):
14758 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14759 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14762 def insert_optional_args(args):
14764 Parse optional arguments and insert a value if one has
14765 not been provided. This is done before feeding the args
14766 to the optparse parser since that parser does not support
14767 this feature natively.
14771 jobs_opts = ("-j", "--jobs")
14772 arg_stack = args[:]
14773 arg_stack.reverse()
14775 arg = arg_stack.pop()
14777 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14778 if not (short_job_opt or arg in jobs_opts):
14779 new_args.append(arg)
14782 # Insert an empty placeholder in order to
14783 # satisfy the requirements of optparse.
14785 new_args.append("--jobs")
14788 if short_job_opt and len(arg) > 2:
14789 if arg[:2] == "-j":
14791 job_count = int(arg[2:])
14793 saved_opts = arg[2:]
14796 saved_opts = arg[1:].replace("j", "")
14798 if job_count is None and arg_stack:
14800 job_count = int(arg_stack[-1])
14804 # Discard the job count from the stack
14805 # since we're consuming it here.
14808 if job_count is None:
14809 # unlimited number of jobs
14810 new_args.append("True")
14812 new_args.append(str(job_count))
14814 if saved_opts is not None:
14815 new_args.append("-" + saved_opts)
14819 def parse_opts(tmpcmdline, silent=False):
14824 global actions, options, shortmapping
14826 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14827 argument_options = {
14829 "help":"specify the location for portage configuration files",
14833 "help":"enable or disable color output",
14835 "choices":("y", "n")
14840 "help" : "Specifies the number of packages to build " + \
14846 "--load-average": {
14848 "help" :"Specifies that no new builds should be started " + \
14849 "if there are other builds running and the load average " + \
14850 "is at least LOAD (a floating-point number).",
14856 "help":"include unnecessary build time dependencies",
14858 "choices":("y", "n")
14861 "help":"specify conditions to trigger package reinstallation",
14863 "choices":["changed-use"]
14866 "help" : "specify the target root filesystem for merging packages",
14871 from optparse import OptionParser
14872 parser = OptionParser()
14873 if parser.has_option("--help"):
14874 parser.remove_option("--help")
14876 for action_opt in actions:
14877 parser.add_option("--" + action_opt, action="store_true",
14878 dest=action_opt.replace("-", "_"), default=False)
14879 for myopt in options:
14880 parser.add_option(myopt, action="store_true",
14881 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14882 for shortopt, longopt in shortmapping.iteritems():
14883 parser.add_option("-" + shortopt, action="store_true",
14884 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14885 for myalias, myopt in longopt_aliases.iteritems():
14886 parser.add_option(myalias, action="store_true",
14887 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14889 for myopt, kwargs in argument_options.iteritems():
14890 parser.add_option(myopt,
14891 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14893 tmpcmdline = insert_optional_args(tmpcmdline)
14895 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14899 if myoptions.jobs == "True":
14903 jobs = int(myoptions.jobs)
14907 if jobs is not True and \
14911 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14912 (myoptions.jobs,), noiselevel=-1)
14914 myoptions.jobs = jobs
14916 if myoptions.load_average:
14918 load_average = float(myoptions.load_average)
14922 if load_average <= 0.0:
14923 load_average = None
14925 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14926 (myoptions.load_average,), noiselevel=-1)
14928 myoptions.load_average = load_average
14930 for myopt in options:
14931 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14933 myopts[myopt] = True
14935 for myopt in argument_options:
14936 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14940 if myoptions.searchdesc:
14941 myoptions.search = True
14943 for action_opt in actions:
14944 v = getattr(myoptions, action_opt.replace("-", "_"))
14947 multiple_actions(myaction, action_opt)
14949 myaction = action_opt
14953 return myaction, myopts, myfiles
14955 def validate_ebuild_environment(trees):
14956 for myroot in trees:
14957 settings = trees[myroot]["vartree"].settings
14958 settings.validate()
14960 def clear_caches(trees):
14961 for d in trees.itervalues():
14962 d["porttree"].dbapi.melt()
14963 d["porttree"].dbapi._aux_cache.clear()
14964 d["bintree"].dbapi._aux_cache.clear()
14965 d["bintree"].dbapi._clear_cache()
14966 d["vartree"].dbapi.linkmap._clear_cache()
14967 portage.dircache.clear()
14970 def load_emerge_config(trees=None):
14972 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14973 v = os.environ.get(envvar, None)
14974 if v and v.strip():
14976 trees = portage.create_trees(trees=trees, **kwargs)
14978 for root, root_trees in trees.iteritems():
14979 settings = root_trees["vartree"].settings
14980 setconfig = load_default_config(settings, root_trees)
14981 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14983 settings = trees["/"]["vartree"].settings
14985 for myroot in trees:
14987 settings = trees[myroot]["vartree"].settings
14990 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14991 mtimedb = portage.MtimeDB(mtimedbfile)
14993 return settings, trees, mtimedb
14995 def adjust_config(myopts, settings):
14996 """Make emerge specific adjustments to the config."""
14998 # To enhance usability, make some vars case insensitive by forcing them to
15000 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15001 if myvar in settings:
15002 settings[myvar] = settings[myvar].lower()
15003 settings.backup_changes(myvar)
15006 # Kill noauto as it will break merges otherwise.
15007 if "noauto" in settings.features:
15008 settings.features.remove('noauto')
15009 settings['FEATURES'] = ' '.join(sorted(settings.features))
15010 settings.backup_changes("FEATURES")
15014 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15015 except ValueError, e:
15016 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15017 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15018 settings["CLEAN_DELAY"], noiselevel=-1)
15019 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15020 settings.backup_changes("CLEAN_DELAY")
15022 EMERGE_WARNING_DELAY = 10
15024 EMERGE_WARNING_DELAY = int(settings.get(
15025 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15026 except ValueError, e:
15027 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15028 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15029 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15030 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15031 settings.backup_changes("EMERGE_WARNING_DELAY")
15033 if "--quiet" in myopts:
15034 settings["PORTAGE_QUIET"]="1"
15035 settings.backup_changes("PORTAGE_QUIET")
15037 if "--verbose" in myopts:
15038 settings["PORTAGE_VERBOSE"] = "1"
15039 settings.backup_changes("PORTAGE_VERBOSE")
15041 # Set so that configs will be merged regardless of remembered status
15042 if ("--noconfmem" in myopts):
15043 settings["NOCONFMEM"]="1"
15044 settings.backup_changes("NOCONFMEM")
15046 # Set various debug markers... They should be merged somehow.
15049 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15050 if PORTAGE_DEBUG not in (0, 1):
15051 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15052 PORTAGE_DEBUG, noiselevel=-1)
15053 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15056 except ValueError, e:
15057 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15058 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15059 settings["PORTAGE_DEBUG"], noiselevel=-1)
15061 if "--debug" in myopts:
15063 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15064 settings.backup_changes("PORTAGE_DEBUG")
15066 if settings.get("NOCOLOR") not in ("yes","true"):
15067 portage.output.havecolor = 1
15069 """The explicit --color < y | n > option overrides the NOCOLOR environment
15070 variable and stdout auto-detection."""
15071 if "--color" in myopts:
15072 if "y" == myopts["--color"]:
15073 portage.output.havecolor = 1
15074 settings["NOCOLOR"] = "false"
15076 portage.output.havecolor = 0
15077 settings["NOCOLOR"] = "true"
15078 settings.backup_changes("NOCOLOR")
15079 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15080 portage.output.havecolor = 0
15081 settings["NOCOLOR"] = "true"
15082 settings.backup_changes("NOCOLOR")
15084 def apply_priorities(settings):
15088 def nice(settings):
15090 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15091 except (OSError, ValueError), e:
15092 out = portage.output.EOutput()
15093 out.eerror("Failed to change nice value to '%s'" % \
15094 settings["PORTAGE_NICENESS"])
15095 out.eerror("%s\n" % str(e))
15097 def ionice(settings):
15099 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15101 ionice_cmd = shlex.split(ionice_cmd)
15105 from portage.util import varexpand
15106 variables = {"PID" : str(os.getpid())}
15107 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15110 rval = portage.process.spawn(cmd, env=os.environ)
15111 except portage.exception.CommandNotFound:
15112 # The OS kernel probably doesn't support ionice,
15113 # so return silently.
15116 if rval != os.EX_OK:
15117 out = portage.output.EOutput()
15118 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15119 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15121 def display_missing_pkg_set(root_config, set_name):
15124 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15125 "The following sets exist:") % \
15126 colorize("INFORM", set_name))
15129 for s in sorted(root_config.sets):
15130 msg.append(" %s" % s)
15133 writemsg_level("".join("%s\n" % l for l in msg),
15134 level=logging.ERROR, noiselevel=-1)
15136 def expand_set_arguments(myfiles, myaction, root_config):
15138 setconfig = root_config.setconfig
15140 sets = setconfig.getSets()
15142 # In order to know exactly which atoms/sets should be added to the
15143 # world file, the depgraph performs set expansion later. It will get
15144 # confused about where the atoms came from if it's not allowed to
15145 # expand them itself.
15146 do_not_expand = (None, )
15149 if a in ("system", "world"):
15150 newargs.append(SETPREFIX+a)
15157 # separators for set arguments
15161 # WARNING: all operators must be of equal length
15163 DIFF_OPERATOR = "-@"
15164 UNION_OPERATOR = "+@"
15166 for i in range(0, len(myfiles)):
15167 if myfiles[i].startswith(SETPREFIX):
15170 x = myfiles[i][len(SETPREFIX):]
15173 start = x.find(ARG_START)
15174 end = x.find(ARG_END)
15175 if start > 0 and start < end:
15176 namepart = x[:start]
15177 argpart = x[start+1:end]
15179 # TODO: implement proper quoting
15180 args = argpart.split(",")
15184 k, v = a.split("=", 1)
15187 options[a] = "True"
15188 setconfig.update(namepart, options)
15189 newset += (x[:start-len(namepart)]+namepart)
15190 x = x[end+len(ARG_END):]
15194 myfiles[i] = SETPREFIX+newset
15196 sets = setconfig.getSets()
15198 # display errors that occured while loading the SetConfig instance
15199 for e in setconfig.errors:
15200 print colorize("BAD", "Error during set creation: %s" % e)
15202 # emerge relies on the existance of sets with names "world" and "system"
15203 required_sets = ("world", "system")
15206 for s in required_sets:
15208 missing_sets.append(s)
15210 if len(missing_sets) > 2:
15211 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15212 missing_sets_str += ', and "%s"' % missing_sets[-1]
15213 elif len(missing_sets) == 2:
15214 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15216 missing_sets_str = '"%s"' % missing_sets[-1]
15217 msg = ["emerge: incomplete set configuration, " + \
15218 "missing set(s): %s" % missing_sets_str]
15220 msg.append(" sets defined: %s" % ", ".join(sets))
15221 msg.append(" This usually means that '%s'" % \
15222 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15223 msg.append(" is missing or corrupt.")
15225 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15227 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15230 if a.startswith(SETPREFIX):
15231 # support simple set operations (intersection, difference and union)
15232 # on the commandline. Expressions are evaluated strictly left-to-right
15233 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15234 expression = a[len(SETPREFIX):]
15237 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15238 is_pos = expression.rfind(IS_OPERATOR)
15239 diff_pos = expression.rfind(DIFF_OPERATOR)
15240 union_pos = expression.rfind(UNION_OPERATOR)
15241 op_pos = max(is_pos, diff_pos, union_pos)
15242 s1 = expression[:op_pos]
15243 s2 = expression[op_pos+len(IS_OPERATOR):]
15244 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15246 display_missing_pkg_set(root_config, s2)
15248 expr_sets.insert(0, s2)
15249 expr_ops.insert(0, op)
15251 if not expression in sets:
15252 display_missing_pkg_set(root_config, expression)
15254 expr_sets.insert(0, expression)
15255 result = set(setconfig.getSetAtoms(expression))
15256 for i in range(0, len(expr_ops)):
15257 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15258 if expr_ops[i] == IS_OPERATOR:
15259 result.intersection_update(s2)
15260 elif expr_ops[i] == DIFF_OPERATOR:
15261 result.difference_update(s2)
15262 elif expr_ops[i] == UNION_OPERATOR:
15265 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15266 newargs.extend(result)
15268 s = a[len(SETPREFIX):]
15270 display_missing_pkg_set(root_config, s)
15272 setconfig.active.append(s)
15274 set_atoms = setconfig.getSetAtoms(s)
15275 except portage.exception.PackageSetNotFound, e:
15276 writemsg_level(("emerge: the given set '%s' " + \
15277 "contains a non-existent set named '%s'.\n") % \
15278 (s, e), level=logging.ERROR, noiselevel=-1)
15280 if myaction in unmerge_actions and \
15281 not sets[s].supportsOperation("unmerge"):
15282 sys.stderr.write("emerge: the given set '%s' does " % s + \
15283 "not support unmerge operations\n")
15285 elif not set_atoms:
15286 print "emerge: '%s' is an empty set" % s
15287 elif myaction not in do_not_expand:
15288 newargs.extend(set_atoms)
15290 newargs.append(SETPREFIX+s)
15291 for e in sets[s].errors:
15295 return (newargs, retval)
15297 def repo_name_check(trees):
15298 missing_repo_names = set()
15299 for root, root_trees in trees.iteritems():
15300 if "porttree" in root_trees:
15301 portdb = root_trees["porttree"].dbapi
15302 missing_repo_names.update(portdb.porttrees)
15303 repos = portdb.getRepositories()
15305 missing_repo_names.discard(portdb.getRepositoryPath(r))
15306 if portdb.porttree_root in missing_repo_names and \
15307 not os.path.exists(os.path.join(
15308 portdb.porttree_root, "profiles")):
15309 # This is normal if $PORTDIR happens to be empty,
15310 # so don't warn about it.
15311 missing_repo_names.remove(portdb.porttree_root)
15313 if missing_repo_names:
15315 msg.append("WARNING: One or more repositories " + \
15316 "have missing repo_name entries:")
15318 for p in missing_repo_names:
15319 msg.append("\t%s/profiles/repo_name" % (p,))
15321 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15322 "should be a plain text file containing a unique " + \
15323 "name for the repository on the first line.", 70))
15324 writemsg_level("".join("%s\n" % l for l in msg),
15325 level=logging.WARNING, noiselevel=-1)
15327 return bool(missing_repo_names)
15329 def config_protect_check(trees):
15330 for root, root_trees in trees.iteritems():
15331 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15332 msg = "!!! CONFIG_PROTECT is empty"
15334 msg += " for '%s'" % root
15335 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15337 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15339 if "--quiet" in myopts:
15340 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15341 print "!!! one of the following fully-qualified ebuild names instead:\n"
15342 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15343 print " " + colorize("INFORM", cp)
15346 s = search(root_config, spinner, "--searchdesc" in myopts,
15347 "--quiet" not in myopts, "--usepkg" in myopts,
15348 "--usepkgonly" in myopts)
15349 null_cp = portage.dep_getkey(insert_category_into_atom(
15351 cat, atom_pn = portage.catsplit(null_cp)
15352 s.searchkey = atom_pn
15353 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15356 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15357 print "!!! one of the above fully-qualified ebuild names instead.\n"
15359 def profile_check(trees, myaction, myopts):
15360 if myaction in ("info", "sync"):
15362 elif "--version" in myopts or "--help" in myopts:
15364 for root, root_trees in trees.iteritems():
15365 if root_trees["root_config"].settings.profiles:
15367 # generate some profile related warning messages
15368 validate_ebuild_environment(trees)
15369 msg = "If you have just changed your profile configuration, you " + \
15370 "should revert back to the previous configuration. Due to " + \
15371 "your current profile being invalid, allowed actions are " + \
15372 "limited to --help, --info, --sync, and --version."
15373 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15374 level=logging.ERROR, noiselevel=-1)
15379 global portage # NFC why this is necessary now - genone
15380 portage._disable_legacy_globals()
15381 # Disable color until we're sure that it should be enabled (after
15382 # EMERGE_DEFAULT_OPTS has been parsed).
15383 portage.output.havecolor = 0
15384 # This first pass is just for options that need to be known as early as
15385 # possible, such as --config-root. They will be parsed again later,
15386 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15387 # the value of --config-root).
15388 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15389 if "--debug" in myopts:
15390 os.environ["PORTAGE_DEBUG"] = "1"
15391 if "--config-root" in myopts:
15392 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15393 if "--root" in myopts:
15394 os.environ["ROOT"] = myopts["--root"]
15396 # Portage needs to ensure a sane umask for the files it creates.
15398 settings, trees, mtimedb = load_emerge_config()
15399 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15400 rval = profile_check(trees, myaction, myopts)
15401 if rval != os.EX_OK:
15404 if portage._global_updates(trees, mtimedb["updates"]):
15406 # Reload the whole config from scratch.
15407 settings, trees, mtimedb = load_emerge_config(trees=trees)
15408 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15410 xterm_titles = "notitles" not in settings.features
15413 if "--ignore-default-opts" not in myopts:
15414 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15415 tmpcmdline.extend(sys.argv[1:])
15416 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15418 if "--digest" in myopts:
15419 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15420 # Reload the whole config from scratch so that the portdbapi internal
15421 # config is updated with new FEATURES.
15422 settings, trees, mtimedb = load_emerge_config(trees=trees)
15423 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15425 for myroot in trees:
15426 mysettings = trees[myroot]["vartree"].settings
15427 mysettings.unlock()
15428 adjust_config(myopts, mysettings)
15429 if '--pretend' not in myopts and myaction in \
15430 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15431 mysettings["PORTAGE_COUNTER_HASH"] = \
15432 trees[myroot]["vartree"].dbapi._counter_hash()
15433 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15435 del myroot, mysettings
15437 apply_priorities(settings)
15439 spinner = stdout_spinner()
15440 if "candy" in settings.features:
15441 spinner.update = spinner.update_scroll
15443 if "--quiet" not in myopts:
15444 portage.deprecated_profile_check(settings=settings)
15445 repo_name_check(trees)
15446 config_protect_check(trees)
15448 eclasses_overridden = {}
15449 for mytrees in trees.itervalues():
15450 mydb = mytrees["porttree"].dbapi
15451 # Freeze the portdbapi for performance (memoize all xmatch results).
15453 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15456 if eclasses_overridden and \
15457 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15458 prefix = bad(" * ")
15459 if len(eclasses_overridden) == 1:
15460 writemsg(prefix + "Overlay eclass overrides " + \
15461 "eclass from PORTDIR:\n", noiselevel=-1)
15463 writemsg(prefix + "Overlay eclasses override " + \
15464 "eclasses from PORTDIR:\n", noiselevel=-1)
15465 writemsg(prefix + "\n", noiselevel=-1)
15466 for eclass_name in sorted(eclasses_overridden):
15467 writemsg(prefix + " '%s/%s.eclass'\n" % \
15468 (eclasses_overridden[eclass_name], eclass_name),
15470 writemsg(prefix + "\n", noiselevel=-1)
15471 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15472 "because it will trigger invalidation of cached ebuild metadata " + \
15473 "that is distributed with the portage tree. If you must " + \
15474 "override eclasses from PORTDIR then you are advised to add " + \
15475 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15476 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15477 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15478 "you would like to disable this warning."
15479 from textwrap import wrap
15480 for line in wrap(msg, 72):
15481 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15483 if "moo" in myfiles:
15486 Larry loves Gentoo (""" + platform.system() + """)
15488 _______________________
15489 < Have you mooed today? >
15490 -----------------------
15500 ext = os.path.splitext(x)[1]
15501 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15502 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15505 root_config = trees[settings["ROOT"]]["root_config"]
15506 if myaction == "list-sets":
15507 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15511 # only expand sets for actions taking package arguments
15512 oldargs = myfiles[:]
15513 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15514 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15515 if retval != os.EX_OK:
15518 # Need to handle empty sets specially, otherwise emerge will react
15519 # with the help message for empty argument lists
15520 if oldargs and not myfiles:
15521 print "emerge: no targets left after set expansion"
15524 if ("--tree" in myopts) and ("--columns" in myopts):
15525 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15528 if ("--quiet" in myopts):
15529 spinner.update = spinner.update_quiet
15530 portage.util.noiselimit = -1
15532 # Always create packages if FEATURES=buildpkg
15533 # Imply --buildpkg if --buildpkgonly
15534 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15535 if "--buildpkg" not in myopts:
15536 myopts["--buildpkg"] = True
15538 # Always try and fetch binary packages if FEATURES=getbinpkg
15539 if ("getbinpkg" in settings.features):
15540 myopts["--getbinpkg"] = True
15542 if "--buildpkgonly" in myopts:
15543 # --buildpkgonly will not merge anything, so
15544 # it cancels all binary package options.
15545 for opt in ("--getbinpkg", "--getbinpkgonly",
15546 "--usepkg", "--usepkgonly"):
15547 myopts.pop(opt, None)
15549 if "--fetch-all-uri" in myopts:
15550 myopts["--fetchonly"] = True
15552 if "--skipfirst" in myopts and "--resume" not in myopts:
15553 myopts["--resume"] = True
15555 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15556 myopts["--usepkgonly"] = True
15558 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15559 myopts["--getbinpkg"] = True
15561 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15562 myopts["--usepkg"] = True
15564 # Also allow -K to apply --usepkg/-k
15565 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15566 myopts["--usepkg"] = True
15568 # Allow -p to remove --ask
15569 if ("--pretend" in myopts) and ("--ask" in myopts):
15570 print ">>> --pretend disables --ask... removing --ask from options."
15571 del myopts["--ask"]
15573 # forbid --ask when not in a terminal
15574 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15575 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15576 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15580 if settings.get("PORTAGE_DEBUG", "") == "1":
15581 spinner.update = spinner.update_quiet
15583 if "python-trace" in settings.features:
15584 import portage.debug
15585 portage.debug.set_trace(True)
15587 if not ("--quiet" in myopts):
15588 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15589 spinner.update = spinner.update_basic
15591 if myaction == 'version':
15592 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15593 settings.profile_path, settings["CHOST"],
15594 trees[settings["ROOT"]]["vartree"].dbapi)
15596 elif "--help" in myopts:
15597 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15600 if "--debug" in myopts:
15601 print "myaction", myaction
15602 print "myopts", myopts
15604 if not myaction and not myfiles and "--resume" not in myopts:
15605 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15608 pretend = "--pretend" in myopts
15609 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15610 buildpkgonly = "--buildpkgonly" in myopts
15612 # check if root user is the current user for the actions where emerge needs this
15613 if portage.secpass < 2:
15614 # We've already allowed "--version" and "--help" above.
15615 if "--pretend" not in myopts and myaction not in ("search","info"):
15616 need_superuser = not \
15618 (buildpkgonly and secpass >= 1) or \
15619 myaction in ("metadata", "regen") or \
15620 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15621 if portage.secpass < 1 or \
15624 access_desc = "superuser"
15626 access_desc = "portage group"
15627 # Always show portage_group_warning() when only portage group
15628 # access is required but the user is not in the portage group.
15629 from portage.data import portage_group_warning
15630 if "--ask" in myopts:
15631 myopts["--pretend"] = True
15632 del myopts["--ask"]
15633 print ("%s access is required... " + \
15634 "adding --pretend to options.\n") % access_desc
15635 if portage.secpass < 1 and not need_superuser:
15636 portage_group_warning()
15638 sys.stderr.write(("emerge: %s access is " + \
15639 "required.\n\n") % access_desc)
15640 if portage.secpass < 1 and not need_superuser:
15641 portage_group_warning()
15644 disable_emergelog = False
15645 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15647 disable_emergelog = True
15649 if myaction in ("search", "info"):
15650 disable_emergelog = True
15651 if disable_emergelog:
15652 """ Disable emergelog for everything except build or unmerge
15653 operations. This helps minimize parallel emerge.log entries that can
15654 confuse log parsers. We especially want it disabled during
15655 parallel-fetch, which uses --resume --fetchonly."""
15657 def emergelog(*pargs, **kargs):
15660 if not "--pretend" in myopts:
15661 emergelog(xterm_titles, "Started emerge on: "+\
15662 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15665 myelogstr=" ".join(myopts)
15667 myelogstr+=" "+myaction
15669 myelogstr += " " + " ".join(oldargs)
15670 emergelog(xterm_titles, " *** emerge " + myelogstr)
15673 def emergeexitsig(signum, frame):
15674 signal.signal(signal.SIGINT, signal.SIG_IGN)
15675 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15676 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15677 sys.exit(100+signum)
15678 signal.signal(signal.SIGINT, emergeexitsig)
15679 signal.signal(signal.SIGTERM, emergeexitsig)
15682 """This gets out final log message in before we quit."""
15683 if "--pretend" not in myopts:
15684 emergelog(xterm_titles, " *** terminating.")
15685 if "notitles" not in settings.features:
15687 portage.atexit_register(emergeexit)
15689 if myaction in ("config", "metadata", "regen", "sync"):
15690 if "--pretend" in myopts:
15691 sys.stderr.write(("emerge: The '%s' action does " + \
15692 "not support '--pretend'.\n") % myaction)
15695 if "sync" == myaction:
15696 return action_sync(settings, trees, mtimedb, myopts, myaction)
15697 elif "metadata" == myaction:
15698 action_metadata(settings, portdb, myopts)
15699 elif myaction=="regen":
15700 validate_ebuild_environment(trees)
15701 return action_regen(settings, portdb, myopts.get("--jobs"),
15702 myopts.get("--load-average"))
15704 elif "config"==myaction:
15705 validate_ebuild_environment(trees)
15706 action_config(settings, trees, myopts, myfiles)
15709 elif "search"==myaction:
15710 validate_ebuild_environment(trees)
15711 action_search(trees[settings["ROOT"]]["root_config"],
15712 myopts, myfiles, spinner)
15713 elif myaction in ("clean", "unmerge") or \
15714 (myaction == "prune" and "--nodeps" in myopts):
15715 validate_ebuild_environment(trees)
15717 # Ensure atoms are valid before calling unmerge().
15718 # For backward compat, leading '=' is not required.
15720 if is_valid_package_atom(x) or \
15721 is_valid_package_atom("=" + x):
15724 msg.append("'%s' is not a valid package atom." % (x,))
15725 msg.append("Please check ebuild(5) for full details.")
15726 writemsg_level("".join("!!! %s\n" % line for line in msg),
15727 level=logging.ERROR, noiselevel=-1)
15730 # When given a list of atoms, unmerge
15731 # them in the order given.
15732 ordered = myaction == "unmerge"
15733 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15734 mtimedb["ldpath"], ordered=ordered):
15735 if not (buildpkgonly or fetchonly or pretend):
15736 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15738 elif myaction in ("depclean", "info", "prune"):
15740 # Ensure atoms are valid before calling unmerge().
15741 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15744 if is_valid_package_atom(x):
15746 valid_atoms.append(
15747 portage.dep_expand(x, mydb=vardb, settings=settings))
15748 except portage.exception.AmbiguousPackageName, e:
15749 msg = "The short ebuild name \"" + x + \
15750 "\" is ambiguous. Please specify " + \
15751 "one of the following " + \
15752 "fully-qualified ebuild names instead:"
15753 for line in textwrap.wrap(msg, 70):
15754 writemsg_level("!!! %s\n" % (line,),
15755 level=logging.ERROR, noiselevel=-1)
15757 writemsg_level(" %s\n" % colorize("INFORM", i),
15758 level=logging.ERROR, noiselevel=-1)
15759 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15763 msg.append("'%s' is not a valid package atom." % (x,))
15764 msg.append("Please check ebuild(5) for full details.")
15765 writemsg_level("".join("!!! %s\n" % line for line in msg),
15766 level=logging.ERROR, noiselevel=-1)
15769 if myaction == "info":
15770 return action_info(settings, trees, myopts, valid_atoms)
15772 validate_ebuild_environment(trees)
15773 action_depclean(settings, trees, mtimedb["ldpath"],
15774 myopts, myaction, valid_atoms, spinner)
15775 if not (buildpkgonly or fetchonly or pretend):
15776 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15777 # "update", "system", or just process files:
15779 validate_ebuild_environment(trees)
15782 if x.startswith(SETPREFIX) or \
15783 is_valid_package_atom(x):
15785 if x[:1] == os.sep:
15793 msg.append("'%s' is not a valid package atom." % (x,))
15794 msg.append("Please check ebuild(5) for full details.")
15795 writemsg_level("".join("!!! %s\n" % line for line in msg),
15796 level=logging.ERROR, noiselevel=-1)
15799 if "--pretend" not in myopts:
15800 display_news_notification(root_config, myopts)
15801 retval = action_build(settings, trees, mtimedb,
15802 myopts, myaction, myfiles, spinner)
15803 root_config = trees[settings["ROOT"]]["root_config"]
15804 post_emerge(root_config, myopts, mtimedb, retval)