2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.cache.cache_errors import CacheError
53 from portage.data import secpass
54 from portage.elog.messages import eerror
55 from portage.util import normalize_path as normpath
56 from portage.util import cmp_sort_key, writemsg, writemsg_level
57 from portage.sets import load_default_config, SETPREFIX
58 from portage.sets.base import InternalPackageSet
60 from itertools import chain, izip
63 import cPickle as pickle
68 from cStringIO import StringIO
70 from StringIO import StringIO
72 class stdout_spinner(object):
74 "Gentoo Rocks ("+platform.system()+")",
75 "Thank you for using Gentoo. :)",
76 "Are you actually trying to read this?",
77 "How many times have you stared at this?",
78 "We are generating the cache right now",
79 "You are paying too much attention.",
80 "A theory is better than its explanation.",
81 "Phasers locked on target, Captain.",
82 "Thrashing is just virtual crashing.",
83 "To be is to program.",
84 "Real Users hate Real Programmers.",
85 "When all else fails, read the instructions.",
86 "Functionality breeds Contempt.",
87 "The future lies ahead.",
88 "3.1415926535897932384626433832795028841971694",
89 "Sometimes insanity is the only alternative.",
90 "Inaccuracy saves a world of explanation.",
93 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
97 self.update = self.update_twirl
98 self.scroll_sequence = self.scroll_msgs[
99 int(time.time() * 100) % len(self.scroll_msgs)]
101 self.min_display_latency = 0.05
103 def _return_early(self):
105 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
106 each update* method should return without doing any output when this
109 cur_time = time.time()
110 if cur_time - self.last_update < self.min_display_latency:
112 self.last_update = cur_time
115 def update_basic(self):
116 self.spinpos = (self.spinpos + 1) % 500
117 if self._return_early():
119 if (self.spinpos % 100) == 0:
120 if self.spinpos == 0:
121 sys.stdout.write(". ")
123 sys.stdout.write(".")
126 def update_scroll(self):
127 if self._return_early():
129 if(self.spinpos >= len(self.scroll_sequence)):
130 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
131 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
133 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
135 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
137 def update_twirl(self):
138 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
139 if self._return_early():
141 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
144 def update_quiet(self):
147 def userquery(prompt, responses=None, colours=None):
148 """Displays a prompt and a set of responses, then waits for a response
149 which is checked against the responses and the first to match is
150 returned. An empty response will match the first value in responses. The
151 input buffer is *not* cleared prior to the prompt!
154 responses: a List of Strings.
155 colours: a List of Functions taking and returning a String, used to
156 process the responses for display. Typically these will be functions
157 like red() but could be e.g. lambda x: "DisplayString".
158 If responses is omitted, defaults to ["Yes", "No"], [green, red].
159 If only colours is omitted, defaults to [bold, ...].
161 Returns a member of the List responses. (If called without optional
162 arguments, returns "Yes" or "No".)
163 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
165 if responses is None:
166 responses = ["Yes", "No"]
168 create_color_func("PROMPT_CHOICE_DEFAULT"),
169 create_color_func("PROMPT_CHOICE_OTHER")
171 elif colours is None:
173 colours=(colours*len(responses))[:len(responses)]
177 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
178 for key in responses:
179 # An empty response will match the first value in responses.
180 if response.upper()==key[:len(response)].upper():
182 print "Sorry, response '%s' not understood." % response,
183 except (EOFError, KeyboardInterrupt):
187 actions = frozenset([
188 "clean", "config", "depclean",
189 "info", "list-sets", "metadata",
190 "prune", "regen", "search",
191 "sync", "unmerge", "version",
194 "--ask", "--alphabetical",
195 "--buildpkg", "--buildpkgonly",
196 "--changelog", "--columns",
201 "--fetchonly", "--fetch-all-uri",
202 "--getbinpkg", "--getbinpkgonly",
203 "--help", "--ignore-default-opts",
207 "--nodeps", "--noreplace",
208 "--nospinner", "--oneshot",
209 "--onlydeps", "--pretend",
210 "--quiet", "--resume",
211 "--searchdesc", "--selective",
215 "--usepkg", "--usepkgonly",
222 "b":"--buildpkg", "B":"--buildpkgonly",
223 "c":"--clean", "C":"--unmerge",
224 "d":"--debug", "D":"--deep",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
229 "k":"--usepkg", "K":"--usepkgonly",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps", "O":"--nodeps",
233 "p":"--pretend", "P":"--prune",
235 "s":"--search", "S":"--searchdesc",
238 "v":"--verbose", "V":"--version"
241 def emergelog(xterm_titles, mystr, short_msg=None):
242 if xterm_titles and short_msg:
243 if "HOSTNAME" in os.environ:
244 short_msg = os.environ["HOSTNAME"]+": "+short_msg
245 xtermTitle(short_msg)
247 file_path = "/var/log/emerge.log"
248 mylogfile = open(file_path, "a")
249 portage.util.apply_secpass_permissions(file_path,
250 uid=portage.portage_uid, gid=portage.portage_gid,
254 mylock = portage.locks.lockfile(mylogfile)
255 # seek because we may have gotten held up by the lock.
256 # if so, we may not be positioned at the end of the file.
258 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
262 portage.locks.unlockfile(mylock)
264 except (IOError,OSError,portage.exception.PortageException), e:
266 print >> sys.stderr, "emergelog():",e
268 def countdown(secs=5, doing="Starting"):
270 print ">>> Waiting",secs,"seconds before starting..."
271 print ">>> (Control-C to abort)...\n"+doing+" in: ",
275 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282 if isinstance(mysize, basestring):
284 if 0 != mysize % 1024:
285 # Always round up to the next kB so that it doesn't show 0 kB when
286 # some small file still needs to be fetched.
287 mysize += 1024 - mysize % 1024
288 mystr=str(mysize/1024)
292 mystr=mystr[:mycount]+","+mystr[mycount:]
296 def getgccversion(chost):
299 return: the current in-use gcc version
302 gcc_ver_command = 'gcc -dumpversion'
303 gcc_ver_prefix = 'gcc-'
305 gcc_not_found_error = red(
306 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307 "!!! to update the environment of this terminal and possibly\n" +
308 "!!! other terminals also.\n"
311 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
315 mystatus, myoutput = commands.getstatusoutput(
316 chost + "-" + gcc_ver_command)
317 if mystatus == os.EX_OK:
318 return gcc_ver_prefix + myoutput
320 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321 if mystatus == os.EX_OK:
322 return gcc_ver_prefix + myoutput
324 portage.writemsg(gcc_not_found_error, noiselevel=-1)
325 return "[unavailable]"
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328 profilever = "unavailable"
330 realpath = os.path.realpath(profile)
331 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
332 if realpath.startswith(basepath):
333 profilever = realpath[1 + len(basepath):]
336 profilever = "!" + os.readlink(profile)
339 del realpath, basepath
342 libclist = vardb.match("virtual/libc")
343 libclist += vardb.match("virtual/glibc")
344 libclist = portage.util.unique_array(libclist)
346 xs=portage.catpkgsplit(x)
348 libcver+=","+"-".join(xs[1:])
350 libcver="-".join(xs[1:])
352 libcver="unavailable"
354 gccver = getgccversion(chost)
355 unameout=platform.release()+" "+platform.machine()
357 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
359 def create_depgraph_params(myopts, myaction):
360 #configure emerge engine parameters
362 # self: include _this_ package regardless of if it is merged.
363 # selective: exclude the package if it is merged
364 # recurse: go into the dependencies
365 # deep: go into the dependencies of already merged packages
366 # empty: pretend nothing is merged
367 # complete: completely account for all known dependencies
368 # remove: build graph for use in removing packages
369 myparams = set(["recurse"])
371 if myaction == "remove":
372 myparams.add("remove")
373 myparams.add("complete")
376 if "--update" in myopts or \
377 "--newuse" in myopts or \
378 "--reinstall" in myopts or \
379 "--noreplace" in myopts:
380 myparams.add("selective")
381 if "--emptytree" in myopts:
382 myparams.add("empty")
383 myparams.discard("selective")
384 if "--nodeps" in myopts:
385 myparams.discard("recurse")
386 if "--deep" in myopts:
388 if "--complete-graph" in myopts:
389 myparams.add("complete")
392 # search functionality
393 class search(object):
404 def __init__(self, root_config, spinner, searchdesc,
405 verbose, usepkg, usepkgonly):
406 """Searches the available and installed packages for the supplied search key.
407 The list of available and installed packages is created at object instantiation.
408 This makes successive searches faster."""
409 self.settings = root_config.settings
410 self.vartree = root_config.trees["vartree"]
411 self.spinner = spinner
412 self.verbose = verbose
413 self.searchdesc = searchdesc
414 self.root_config = root_config
415 self.setconfig = root_config.setconfig
416 self.matches = {"pkg" : []}
421 self.portdb = fake_portdb
422 for attrib in ("aux_get", "cp_all",
423 "xmatch", "findname", "getFetchMap"):
424 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
428 portdb = root_config.trees["porttree"].dbapi
429 bindb = root_config.trees["bintree"].dbapi
430 vardb = root_config.trees["vartree"].dbapi
432 if not usepkgonly and portdb._have_root_eclass_dir:
433 self._dbs.append(portdb)
435 if (usepkg or usepkgonly) and bindb.cp_all():
436 self._dbs.append(bindb)
438 self._dbs.append(vardb)
439 self._portdb = portdb
444 cp_all.update(db.cp_all())
445 return list(sorted(cp_all))
447 def _aux_get(self, *args, **kwargs):
450 return db.aux_get(*args, **kwargs)
455 def _findname(self, *args, **kwargs):
457 if db is not self._portdb:
458 # We don't want findname to return anything
459 # unless it's an ebuild in a portage tree.
460 # Otherwise, it's already built and we don't
463 func = getattr(db, "findname", None)
465 value = func(*args, **kwargs)
470 def _getFetchMap(self, *args, **kwargs):
472 func = getattr(db, "getFetchMap", None)
474 value = func(*args, **kwargs)
479 def _visible(self, db, cpv, metadata):
480 installed = db is self.vartree.dbapi
481 built = installed or db is not self._portdb
484 pkg_type = "installed"
487 return visible(self.settings,
488 Package(type_name=pkg_type, root_config=self.root_config,
489 cpv=cpv, built=built, installed=installed, metadata=metadata))
491 def _xmatch(self, level, atom):
493 This method does not expand old-style virtuals because it
494 is restricted to returning matches for a single ${CATEGORY}/${PN}
495 and old-style virual matches unreliable for that when querying
496 multiple package databases. If necessary, old-style virtuals
497 can be performed on atoms prior to calling this method.
499 cp = portage.dep_getkey(atom)
500 if level == "match-all":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 matches.update(db.match(atom))
507 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508 db._cpv_sort_ascending(result)
509 elif level == "match-visible":
512 if hasattr(db, "xmatch"):
513 matches.update(db.xmatch(level, atom))
515 db_keys = list(db._aux_cache_keys)
516 for cpv in db.match(atom):
517 metadata = izip(db_keys,
518 db.aux_get(cpv, db_keys))
519 if not self._visible(db, cpv, metadata):
522 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523 db._cpv_sort_ascending(result)
524 elif level == "bestmatch-visible":
527 if hasattr(db, "xmatch"):
528 cpv = db.xmatch("bestmatch-visible", atom)
529 if not cpv or portage.cpv_getkey(cpv) != cp:
531 if not result or cpv == portage.best([cpv, result]):
534 db_keys = Package.metadata_keys
535 # break out of this loop with highest visible
536 # match, checked in descending order
537 for cpv in reversed(db.match(atom)):
538 if portage.cpv_getkey(cpv) != cp:
540 metadata = izip(db_keys,
541 db.aux_get(cpv, db_keys))
542 if not self._visible(db, cpv, metadata):
544 if not result or cpv == portage.best([cpv, result]):
548 raise NotImplementedError(level)
551 def execute(self,searchkey):
552 """Performs the search for the supplied search key"""
554 self.searchkey=searchkey
555 self.packagematches = []
558 self.matches = {"pkg":[], "desc":[], "set":[]}
561 self.matches = {"pkg":[], "set":[]}
562 print "Searching... ",
565 if self.searchkey.startswith('%'):
567 self.searchkey = self.searchkey[1:]
568 if self.searchkey.startswith('@'):
570 self.searchkey = self.searchkey[1:]
572 self.searchre=re.compile(self.searchkey,re.I)
574 self.searchre=re.compile(re.escape(self.searchkey), re.I)
575 for package in self.portdb.cp_all():
576 self.spinner.update()
579 match_string = package[:]
581 match_string = package.split("/")[-1]
584 if self.searchre.search(match_string):
585 if not self.portdb.xmatch("match-visible", package):
587 self.matches["pkg"].append([package,masked])
588 elif self.searchdesc: # DESCRIPTION searching
589 full_package = self.portdb.xmatch("bestmatch-visible", package)
591 #no match found; we don't want to query description
592 full_package = portage.best(
593 self.portdb.xmatch("match-all", package))
599 full_desc = self.portdb.aux_get(
600 full_package, ["DESCRIPTION"])[0]
602 print "emerge: search: aux_get() failed, skipping"
604 if self.searchre.search(full_desc):
605 self.matches["desc"].append([full_package,masked])
607 self.sdict = self.setconfig.getSets()
608 for setname in self.sdict:
609 self.spinner.update()
611 match_string = setname
613 match_string = setname.split("/")[-1]
615 if self.searchre.search(match_string):
616 self.matches["set"].append([setname, False])
617 elif self.searchdesc:
618 if self.searchre.search(
619 self.sdict[setname].getMetadata("DESCRIPTION")):
620 self.matches["set"].append([setname, False])
623 for mtype in self.matches:
624 self.matches[mtype].sort()
625 self.mlen += len(self.matches[mtype])
628 if not self.portdb.xmatch("match-all", cp):
631 if not self.portdb.xmatch("bestmatch-visible", cp):
633 self.matches["pkg"].append([cp, masked])
637 """Outputs the results of the search."""
638 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
639 print "[ Applications found : "+white(str(self.mlen))+" ]"
641 vardb = self.vartree.dbapi
642 for mtype in self.matches:
643 for match,masked in self.matches[mtype]:
647 full_package = self.portdb.xmatch(
648 "bestmatch-visible", match)
650 #no match found; we don't want to query description
652 full_package = portage.best(
653 self.portdb.xmatch("match-all",match))
654 elif mtype == "desc":
656 match = portage.cpv_getkey(match)
658 print green("*")+" "+white(match)
659 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
663 desc, homepage, license = self.portdb.aux_get(
664 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
666 print "emerge: search: aux_get() failed, skipping"
669 print green("*")+" "+white(match)+" "+red("[ Masked ]")
671 print green("*")+" "+white(match)
672 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
676 mycat = match.split("/")[0]
677 mypkg = match.split("/")[1]
678 mycpv = match + "-" + myversion
679 myebuild = self.portdb.findname(mycpv)
681 pkgdir = os.path.dirname(myebuild)
682 from portage import manifest
683 mf = manifest.Manifest(
684 pkgdir, self.settings["DISTDIR"])
686 uri_map = self.portdb.getFetchMap(mycpv)
687 except portage.exception.InvalidDependString, e:
688 file_size_str = "Unknown (%s)" % (e,)
692 mysum[0] = mf.getDistfilesSize(uri_map)
694 file_size_str = "Unknown (missing " + \
695 "digest for %s)" % (e,)
700 if db is not vardb and \
701 db.cpv_exists(mycpv):
703 if not myebuild and hasattr(db, "bintree"):
704 myebuild = db.bintree.getname(mycpv)
706 mysum[0] = os.stat(myebuild).st_size
711 if myebuild and file_size_str is None:
712 mystr = str(mysum[0] / 1024)
716 mystr = mystr[:mycount] + "," + mystr[mycount:]
717 file_size_str = mystr + " kB"
721 print " ", darkgreen("Latest version available:"),myversion
722 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
725 (darkgreen("Size of files:"), file_size_str)
726 print " ", darkgreen("Homepage:")+" ",homepage
727 print " ", darkgreen("Description:")+" ",desc
728 print " ", darkgreen("License:")+" ",license
733 def getInstallationStatus(self,package):
734 installed_package = self.vartree.dep_bestmatch(package)
736 version = self.getVersion(installed_package,search.VERSION_RELEASE)
738 result = darkgreen("Latest version installed:")+" "+version
740 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
743 def getVersion(self,full_package,detail):
744 if len(full_package) > 1:
745 package_parts = portage.catpkgsplit(full_package)
746 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747 result = package_parts[2]+ "-" + package_parts[3]
749 result = package_parts[2]
754 class RootConfig(object):
755 """This is used internally by depgraph to track information about a
759 "ebuild" : "porttree",
760 "binary" : "bintree",
761 "installed" : "vartree"
765 for k, v in pkg_tree_map.iteritems():
768 def __init__(self, settings, trees, setconfig):
770 self.settings = settings
771 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772 self.root = self.settings["ROOT"]
773 self.setconfig = setconfig
774 if setconfig is None:
777 self.sets = self.setconfig.getSets()
778 self.visible_pkgs = PackageVirtualDbapi(self.settings)
780 def create_world_atom(pkg, args_set, root_config):
781 """Create a new atom for the world file if one does not exist. If the
782 argument atom is precise enough to identify a specific slot then a slot
783 atom will be returned. Atoms that are in the system set may also be stored
784 in world since system atoms can only match one slot while world atoms can
785 be greedy with respect to slots. Unslotted system packages will not be
788 arg_atom = args_set.findAtomForPackage(pkg)
791 cp = portage.dep_getkey(arg_atom)
793 sets = root_config.sets
794 portdb = root_config.trees["porttree"].dbapi
795 vardb = root_config.trees["vartree"].dbapi
796 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
797 for cpv in portdb.match(cp))
798 slotted = len(available_slots) > 1 or \
799 (len(available_slots) == 1 and "0" not in available_slots)
801 # check the vdb in case this is multislot
802 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
803 for cpv in vardb.match(cp))
804 slotted = len(available_slots) > 1 or \
805 (len(available_slots) == 1 and "0" not in available_slots)
806 if slotted and arg_atom != cp:
807 # If the user gave a specific atom, store it as a
808 # slot atom in the world file.
809 slot_atom = pkg.slot_atom
811 # For USE=multislot, there are a couple of cases to
814 # 1) SLOT="0", but the real SLOT spontaneously changed to some
815 # unknown value, so just record an unslotted atom.
817 # 2) SLOT comes from an installed package and there is no
818 # matching SLOT in the portage tree.
820 # Make sure that the slot atom is available in either the
821 # portdb or the vardb, since otherwise the user certainly
822 # doesn't want the SLOT atom recorded in the world file
823 # (case 1 above). If it's only available in the vardb,
824 # the user may be trying to prevent a USE=multislot
825 # package from being removed by --depclean (case 2 above).
828 if not portdb.match(slot_atom):
829 # SLOT seems to come from an installed multislot package
831 # If there is no installed package matching the SLOT atom,
832 # it probably changed SLOT spontaneously due to USE=multislot,
833 # so just record an unslotted atom.
834 if vardb.match(slot_atom):
835 # Now verify that the argument is precise
836 # enough to identify a specific slot.
837 matches = mydb.match(arg_atom)
838 matched_slots = set()
840 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
841 if len(matched_slots) == 1:
842 new_world_atom = slot_atom
844 if new_world_atom == sets["world"].findAtomForPackage(pkg):
845 # Both atoms would be identical, so there's nothing to add.
848 # Unlike world atoms, system atoms are not greedy for slots, so they
849 # can't be safely excluded from world if they are slotted.
850 system_atom = sets["system"].findAtomForPackage(pkg)
852 if not portage.dep_getkey(system_atom).startswith("virtual/"):
854 # System virtuals aren't safe to exclude from world since they can
855 # match multiple old-style virtuals but only one of them will be
856 # pulled in by update or depclean.
857 providers = portdb.mysettings.getvirtuals().get(
858 portage.dep_getkey(system_atom))
859 if providers and len(providers) == 1 and providers[0] == cp:
861 return new_world_atom
863 def filter_iuse_defaults(iuse):
865 if flag.startswith("+") or flag.startswith("-"):
870 class SlotObject(object):
871 __slots__ = ("__weakref__",)
873 def __init__(self, **kwargs):
874 classes = [self.__class__]
879 classes.extend(c.__bases__)
880 slots = getattr(c, "__slots__", None)
884 myvalue = kwargs.get(myattr, None)
885 setattr(self, myattr, myvalue)
889 Create a new instance and copy all attributes
890 defined from __slots__ (including those from
893 obj = self.__class__()
895 classes = [self.__class__]
900 classes.extend(c.__bases__)
901 slots = getattr(c, "__slots__", None)
905 setattr(obj, myattr, getattr(self, myattr))
909 class AbstractDepPriority(SlotObject):
910 __slots__ = ("buildtime", "runtime", "runtime_post")
912 def __lt__(self, other):
913 return self.__int__() < other
915 def __le__(self, other):
916 return self.__int__() <= other
918 def __eq__(self, other):
919 return self.__int__() == other
921 def __ne__(self, other):
922 return self.__int__() != other
924 def __gt__(self, other):
925 return self.__int__() > other
927 def __ge__(self, other):
928 return self.__int__() >= other
932 return copy.copy(self)
934 class DepPriority(AbstractDepPriority):
936 __slots__ = ("satisfied", "optional", "rebuild")
940 Note: These priorities are only used for measuring hardness
941 in the circular dependency display via digraph.debug_print(),
942 and nothing more. For actual merge order calculations, the
943 measures defined by the DepPriorityNormalRange and
944 DepPrioritySatisfiedRange classes are used.
948 not satisfied and buildtime 8
949 not satisfied and runtime 7
950 not satisfied and runtime_post 6
951 satisfied and buildtime and rebuild 5
952 satisfied and buildtime 4
953 satisfied and runtime 3
954 satisfied and runtime_post 2
956 (none of the above) 0
959 if not self.satisfied:
964 if self.runtime_post:
972 if self.runtime_post:
985 if self.runtime_post:
986 return "runtime_post"
989 class BlockerDepPriority(DepPriority):
997 BlockerDepPriority.instance = BlockerDepPriority()
999 class UnmergeDepPriority(AbstractDepPriority):
1000 __slots__ = ("optional", "satisfied",)
1002 Combination of properties Priority Category
1005 runtime_post -1 HARD
1007 (none of the above) -2 SOFT
1017 if self.runtime_post:
1024 myvalue = self.__int__()
1025 if myvalue > self.SOFT:
1029 class DepPriorityNormalRange(object):
1031 DepPriority properties Index Category
1035 runtime_post 2 MEDIUM_SOFT
1037 (none of the above) 0 NONE
1045 def _ignore_optional(cls, priority):
1046 if priority.__class__ is not DepPriority:
1048 return bool(priority.optional)
1051 def _ignore_runtime_post(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional or priority.runtime_post)
1057 def _ignore_runtime(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 return not priority.buildtime
1062 ignore_medium = _ignore_runtime
1063 ignore_medium_soft = _ignore_runtime_post
1064 ignore_soft = _ignore_optional
1066 DepPriorityNormalRange.ignore_priority = (
1068 DepPriorityNormalRange._ignore_optional,
1069 DepPriorityNormalRange._ignore_runtime_post,
1070 DepPriorityNormalRange._ignore_runtime
1073 class DepPrioritySatisfiedRange(object):
1075 DepPriority Index Category
1077 not satisfied and buildtime HARD
1078 not satisfied and runtime 7 MEDIUM
1079 not satisfied and runtime_post 6 MEDIUM_SOFT
1080 satisfied and buildtime and rebuild 5 SOFT
1081 satisfied and buildtime 4 SOFT
1082 satisfied and runtime 3 SOFT
1083 satisfied and runtime_post 2 SOFT
1085 (none of the above) 0 NONE
1093 def _ignore_optional(cls, priority):
1094 if priority.__class__ is not DepPriority:
1096 return bool(priority.optional)
1099 def _ignore_satisfied_runtime_post(cls, priority):
1100 if priority.__class__ is not DepPriority:
1102 if priority.optional:
1104 if not priority.satisfied:
1106 return bool(priority.runtime_post)
1109 def _ignore_satisfied_runtime(cls, priority):
1110 if priority.__class__ is not DepPriority:
1112 if priority.optional:
1114 if not priority.satisfied:
1116 return not priority.buildtime
1119 def _ignore_satisfied_buildtime(cls, priority):
1120 if priority.__class__ is not DepPriority:
1122 if priority.optional:
1124 if not priority.satisfied:
1126 if priority.buildtime:
1127 return not priority.rebuild
1131 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1132 if priority.__class__ is not DepPriority:
1134 if priority.optional:
1136 return bool(priority.satisfied)
1139 def _ignore_runtime_post(cls, priority):
1140 if priority.__class__ is not DepPriority:
1142 return bool(priority.optional or \
1143 priority.satisfied or \
1144 priority.runtime_post)
1147 def _ignore_runtime(cls, priority):
1148 if priority.__class__ is not DepPriority:
1150 return bool(priority.satisfied or \
1151 not priority.buildtime)
1153 ignore_medium = _ignore_runtime
1154 ignore_medium_soft = _ignore_runtime_post
1155 ignore_soft = _ignore_satisfied_buildtime_rebuild
1157 DepPrioritySatisfiedRange.ignore_priority = (
1159 DepPrioritySatisfiedRange._ignore_optional,
1160 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1161 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1162 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1163 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1164 DepPrioritySatisfiedRange._ignore_runtime_post,
1165 DepPrioritySatisfiedRange._ignore_runtime
1168 def _find_deep_system_runtime_deps(graph):
1169 deep_system_deps = set()
1172 if not isinstance(node, Package) or \
1173 node.operation == 'uninstall':
1175 if node.root_config.sets['system'].findAtomForPackage(node):
1176 node_stack.append(node)
1178 def ignore_priority(priority):
1180 Ignore non-runtime priorities.
1182 if isinstance(priority, DepPriority) and \
1183 (priority.runtime or priority.runtime_post):
1188 node = node_stack.pop()
1189 if node in deep_system_deps:
1191 deep_system_deps.add(node)
1192 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1193 if not isinstance(child, Package) or \
1194 child.operation == 'uninstall':
1196 node_stack.append(child)
1198 return deep_system_deps
1200 class FakeVartree(portage.vartree):
1201 """This is implements an in-memory copy of a vartree instance that provides
1202 all the interfaces required for use by the depgraph. The vardb is locked
1203 during the constructor call just long enough to read a copy of the
1204 installed package information. This allows the depgraph to do it's
1205 dependency calculations without holding a lock on the vardb. It also
1206 allows things like vardb global updates to be done in memory so that the
1207 user doesn't necessarily need write access to the vardb in cases where
1208 global updates are necessary (updates are performed when necessary if there
1209 is not a matching ebuild in the tree)."""
1210 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1211 self._root_config = root_config
1212 if pkg_cache is None:
1214 real_vartree = root_config.trees["vartree"]
1215 portdb = root_config.trees["porttree"].dbapi
1216 self.root = real_vartree.root
1217 self.settings = real_vartree.settings
1218 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1219 if "_mtime_" not in mykeys:
1220 mykeys.append("_mtime_")
1221 self._db_keys = mykeys
1222 self._pkg_cache = pkg_cache
1223 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1224 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1226 # At least the parent needs to exist for the lock file.
1227 portage.util.ensure_dirs(vdb_path)
1228 except portage.exception.PortageException:
1232 if acquire_lock and os.access(vdb_path, os.W_OK):
1233 vdb_lock = portage.locks.lockdir(vdb_path)
1234 real_dbapi = real_vartree.dbapi
1236 for cpv in real_dbapi.cpv_all():
1237 cache_key = ("installed", self.root, cpv, "nomerge")
1238 pkg = self._pkg_cache.get(cache_key)
1240 metadata = pkg.metadata
1242 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1243 myslot = metadata["SLOT"]
1244 mycp = portage.dep_getkey(cpv)
1245 myslot_atom = "%s:%s" % (mycp, myslot)
1247 mycounter = long(metadata["COUNTER"])
1250 metadata["COUNTER"] = str(mycounter)
1251 other_counter = slot_counters.get(myslot_atom, None)
1252 if other_counter is not None:
1253 if other_counter > mycounter:
1255 slot_counters[myslot_atom] = mycounter
1257 pkg = Package(built=True, cpv=cpv,
1258 installed=True, metadata=metadata,
1259 root_config=root_config, type_name="installed")
1260 self._pkg_cache[pkg] = pkg
1261 self.dbapi.cpv_inject(pkg)
1262 real_dbapi.flush_cache()
1265 portage.locks.unlockdir(vdb_lock)
1266 # Populate the old-style virtuals using the cached values.
1267 if not self.settings.treeVirtuals:
1268 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1269 portage.getCPFromCPV, self.get_all_provides())
1271 # Intialize variables needed for lazy cache pulls of the live ebuild
1272 # metadata. This ensures that the vardb lock is released ASAP, without
1273 # being delayed in case cache generation is triggered.
1274 self._aux_get = self.dbapi.aux_get
1275 self.dbapi.aux_get = self._aux_get_wrapper
1276 self._match = self.dbapi.match
1277 self.dbapi.match = self._match_wrapper
1278 self._aux_get_history = set()
1279 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1280 self._portdb = portdb
1281 self._global_updates = None
1283 def _match_wrapper(self, cpv, use_cache=1):
1285 Make sure the metadata in Package instances gets updated for any
1286 cpv that is returned from a match() call, since the metadata can
1287 be accessed directly from the Package instance instead of via
1290 matches = self._match(cpv, use_cache=use_cache)
1292 if cpv in self._aux_get_history:
1294 self._aux_get_wrapper(cpv, [])
1297 def _aux_get_wrapper(self, pkg, wants):
1298 if pkg in self._aux_get_history:
1299 return self._aux_get(pkg, wants)
1300 self._aux_get_history.add(pkg)
1302 # Use the live ebuild metadata if possible.
1303 live_metadata = dict(izip(self._portdb_keys,
1304 self._portdb.aux_get(pkg, self._portdb_keys)))
1305 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1307 self.dbapi.aux_update(pkg, live_metadata)
1308 except (KeyError, portage.exception.PortageException):
1309 if self._global_updates is None:
1310 self._global_updates = \
1311 grab_global_updates(self._portdb.porttree_root)
1312 perform_global_updates(
1313 pkg, self.dbapi, self._global_updates)
1314 return self._aux_get(pkg, wants)
1316 def sync(self, acquire_lock=1):
1318 Call this method to synchronize state with the real vardb
1319 after one or more packages may have been installed or
1322 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1324 # At least the parent needs to exist for the lock file.
1325 portage.util.ensure_dirs(vdb_path)
1326 except portage.exception.PortageException:
1330 if acquire_lock and os.access(vdb_path, os.W_OK):
1331 vdb_lock = portage.locks.lockdir(vdb_path)
1335 portage.locks.unlockdir(vdb_lock)
1339 real_vardb = self._root_config.trees["vartree"].dbapi
1340 current_cpv_set = frozenset(real_vardb.cpv_all())
1341 pkg_vardb = self.dbapi
1342 aux_get_history = self._aux_get_history
1344 # Remove any packages that have been uninstalled.
1345 for pkg in list(pkg_vardb):
1346 if pkg.cpv not in current_cpv_set:
1347 pkg_vardb.cpv_remove(pkg)
1348 aux_get_history.discard(pkg.cpv)
1350 # Validate counters and timestamps.
1353 validation_keys = ["COUNTER", "_mtime_"]
1354 for cpv in current_cpv_set:
1356 pkg_hash_key = ("installed", root, cpv, "nomerge")
1357 pkg = pkg_vardb.get(pkg_hash_key)
1359 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1361 counter = long(counter)
1365 if counter != pkg.counter or \
1367 pkg_vardb.cpv_remove(pkg)
1368 aux_get_history.discard(pkg.cpv)
1372 pkg = self._pkg(cpv)
1374 other_counter = slot_counters.get(pkg.slot_atom)
1375 if other_counter is not None:
1376 if other_counter > pkg.counter:
1379 slot_counters[pkg.slot_atom] = pkg.counter
1380 pkg_vardb.cpv_inject(pkg)
1382 real_vardb.flush_cache()
1384 def _pkg(self, cpv):
1385 root_config = self._root_config
1386 real_vardb = root_config.trees["vartree"].dbapi
1387 pkg = Package(cpv=cpv, installed=True,
1388 metadata=izip(self._db_keys,
1389 real_vardb.aux_get(cpv, self._db_keys)),
1390 root_config=root_config,
1391 type_name="installed")
1394 mycounter = long(pkg.metadata["COUNTER"])
1397 pkg.metadata["COUNTER"] = str(mycounter)
1401 def grab_global_updates(portdir):
1402 from portage.update import grab_updates, parse_updates
1403 updpath = os.path.join(portdir, "profiles", "updates")
1405 rawupdates = grab_updates(updpath)
1406 except portage.exception.DirectoryNotFound:
1409 for mykey, mystat, mycontent in rawupdates:
1410 commands, errors = parse_updates(mycontent)
1411 upd_commands.extend(commands)
1414 def perform_global_updates(mycpv, mydb, mycommands):
1415 from portage.update import update_dbentries
1416 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1417 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1418 updates = update_dbentries(mycommands, aux_dict)
1420 mydb.aux_update(mycpv, updates)
1422 def visible(pkgsettings, pkg):
1424 Check if a package is visible. This can raise an InvalidDependString
1425 exception if LICENSE is invalid.
1426 TODO: optionally generate a list of masking reasons
1428 @returns: True if the package is visible, False otherwise.
1430 if not pkg.metadata["SLOT"]:
1432 if not pkg.installed:
1433 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1435 eapi = pkg.metadata["EAPI"]
1436 if not portage.eapi_is_supported(eapi):
1438 if not pkg.installed:
1439 if portage._eapi_is_deprecated(eapi):
1441 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1443 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1445 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1448 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1450 except portage.exception.InvalidDependString:
1454 def get_masking_status(pkg, pkgsettings, root_config):
1456 mreasons = portage.getmaskingstatus(
1457 pkg, settings=pkgsettings,
1458 portdb=root_config.trees["porttree"].dbapi)
1460 if not pkg.installed:
1461 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1462 mreasons.append("CHOST: %s" % \
1463 pkg.metadata["CHOST"])
1465 if not pkg.metadata["SLOT"]:
1466 mreasons.append("invalid: SLOT is undefined")
1470 def get_mask_info(root_config, cpv, pkgsettings,
1471 db, pkg_type, built, installed, db_keys):
1474 metadata = dict(izip(db_keys,
1475 db.aux_get(cpv, db_keys)))
1478 if metadata and not built:
1479 pkgsettings.setcpv(cpv, mydb=metadata)
1480 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1481 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1482 if metadata is None:
1483 mreasons = ["corruption"]
1485 eapi = metadata['EAPI']
1488 if not portage.eapi_is_supported(eapi):
1489 mreasons = ['EAPI %s' % eapi]
1491 pkg = Package(type_name=pkg_type, root_config=root_config,
1492 cpv=cpv, built=built, installed=installed, metadata=metadata)
1493 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1494 return metadata, mreasons
1496 def show_masked_packages(masked_packages):
1497 shown_licenses = set()
1498 shown_comments = set()
1499 # Maybe there is both an ebuild and a binary. Only
1500 # show one of them to avoid redundant appearance.
1502 have_eapi_mask = False
1503 for (root_config, pkgsettings, cpv,
1504 metadata, mreasons) in masked_packages:
1505 if cpv in shown_cpvs:
1508 comment, filename = None, None
1509 if "package.mask" in mreasons:
1510 comment, filename = \
1511 portage.getmaskingreason(
1512 cpv, metadata=metadata,
1513 settings=pkgsettings,
1514 portdb=root_config.trees["porttree"].dbapi,
1515 return_location=True)
1516 missing_licenses = []
1518 if not portage.eapi_is_supported(metadata["EAPI"]):
1519 have_eapi_mask = True
1521 missing_licenses = \
1522 pkgsettings._getMissingLicenses(
1524 except portage.exception.InvalidDependString:
1525 # This will have already been reported
1526 # above via mreasons.
1529 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1530 if comment and comment not in shown_comments:
1533 shown_comments.add(comment)
1534 portdb = root_config.trees["porttree"].dbapi
1535 for l in missing_licenses:
1536 l_path = portdb.findLicensePath(l)
1537 if l in shown_licenses:
1539 msg = ("A copy of the '%s' license" + \
1540 " is located at '%s'.") % (l, l_path)
1543 shown_licenses.add(l)
1544 return have_eapi_mask
1546 class Task(SlotObject):
1547 __slots__ = ("_hash_key", "_hash_value")
1549 def _get_hash_key(self):
1550 hash_key = getattr(self, "_hash_key", None)
1551 if hash_key is None:
1552 raise NotImplementedError(self)
1555 def __eq__(self, other):
1556 return self._get_hash_key() == other
1558 def __ne__(self, other):
1559 return self._get_hash_key() != other
1562 hash_value = getattr(self, "_hash_value", None)
1563 if hash_value is None:
1564 self._hash_value = hash(self._get_hash_key())
1565 return self._hash_value
1568 return len(self._get_hash_key())
1570 def __getitem__(self, key):
1571 return self._get_hash_key()[key]
1574 return iter(self._get_hash_key())
1576 def __contains__(self, key):
1577 return key in self._get_hash_key()
1580 return str(self._get_hash_key())
1582 class Blocker(Task):
1584 __hash__ = Task.__hash__
1585 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1587 def __init__(self, **kwargs):
1588 Task.__init__(self, **kwargs)
1589 self.cp = portage.dep_getkey(self.atom)
1591 def _get_hash_key(self):
1592 hash_key = getattr(self, "_hash_key", None)
1593 if hash_key is None:
1595 ("blocks", self.root, self.atom, self.eapi)
1596 return self._hash_key
1598 class Package(Task):
1600 __hash__ = Task.__hash__
1601 __slots__ = ("built", "cpv", "depth",
1602 "installed", "metadata", "onlydeps", "operation",
1603 "root_config", "type_name",
1604 "category", "counter", "cp", "cpv_split",
1605 "inherited", "iuse", "mtime",
1606 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1609 "CHOST", "COUNTER", "DEPEND", "EAPI",
1610 "INHERITED", "IUSE", "KEYWORDS",
1611 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1612 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1614 def __init__(self, **kwargs):
1615 Task.__init__(self, **kwargs)
1616 self.root = self.root_config.root
1617 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1618 self.cp = portage.cpv_getkey(self.cpv)
1621 # Avoid an InvalidAtom exception when creating slot_atom.
1622 # This package instance will be masked due to empty SLOT.
1624 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1625 self.category, self.pf = portage.catsplit(self.cpv)
1626 self.cpv_split = portage.catpkgsplit(self.cpv)
1627 self.pv_split = self.cpv_split[1:]
1631 __slots__ = ("__weakref__", "enabled")
1633 def __init__(self, use):
1634 self.enabled = frozenset(use)
1636 class _iuse(object):
1638 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1640 def __init__(self, tokens, iuse_implicit):
1641 self.tokens = tuple(tokens)
1642 self.iuse_implicit = iuse_implicit
1649 enabled.append(x[1:])
1651 disabled.append(x[1:])
1654 self.enabled = frozenset(enabled)
1655 self.disabled = frozenset(disabled)
1656 self.all = frozenset(chain(enabled, disabled, other))
1658 def __getattribute__(self, name):
1661 return object.__getattribute__(self, "regex")
1662 except AttributeError:
1663 all = object.__getattribute__(self, "all")
1664 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1665 # Escape anything except ".*" which is supposed
1666 # to pass through from _get_implicit_iuse()
1667 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1668 regex = "^(%s)$" % "|".join(regex)
1669 regex = regex.replace("\\.\\*", ".*")
1670 self.regex = re.compile(regex)
1671 return object.__getattribute__(self, name)
1673 def _get_hash_key(self):
1674 hash_key = getattr(self, "_hash_key", None)
1675 if hash_key is None:
1676 if self.operation is None:
1677 self.operation = "merge"
1678 if self.onlydeps or self.installed:
1679 self.operation = "nomerge"
1681 (self.type_name, self.root, self.cpv, self.operation)
1682 return self._hash_key
1684 def __lt__(self, other):
1685 if other.cp != self.cp:
1687 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1691 def __le__(self, other):
1692 if other.cp != self.cp:
1694 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1698 def __gt__(self, other):
1699 if other.cp != self.cp:
1701 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1705 def __ge__(self, other):
1706 if other.cp != self.cp:
1708 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1712 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1713 if not x.startswith("UNUSED_"))
1714 _all_metadata_keys.discard("CDEPEND")
1715 _all_metadata_keys.update(Package.metadata_keys)
1717 from portage.cache.mappings import slot_dict_class
1718 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1720 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1722 Detect metadata updates and synchronize Package attributes.
1725 __slots__ = ("_pkg",)
1726 _wrapped_keys = frozenset(
1727 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1729 def __init__(self, pkg, metadata):
1730 _PackageMetadataWrapperBase.__init__(self)
1732 self.update(metadata)
1734 def __setitem__(self, k, v):
1735 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1736 if k in self._wrapped_keys:
1737 getattr(self, "_set_" + k.lower())(k, v)
1739 def _set_inherited(self, k, v):
1740 if isinstance(v, basestring):
1741 v = frozenset(v.split())
1742 self._pkg.inherited = v
1744 def _set_iuse(self, k, v):
1745 self._pkg.iuse = self._pkg._iuse(
1746 v.split(), self._pkg.root_config.iuse_implicit)
1748 def _set_slot(self, k, v):
1751 def _set_use(self, k, v):
1752 self._pkg.use = self._pkg._use(v.split())
1754 def _set_counter(self, k, v):
1755 if isinstance(v, basestring):
1760 self._pkg.counter = v
1762 def _set__mtime_(self, k, v):
1763 if isinstance(v, basestring):
1770 class EbuildFetchonly(SlotObject):
1772 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1775 settings = self.settings
1777 portdb = pkg.root_config.trees["porttree"].dbapi
1778 ebuild_path = portdb.findname(pkg.cpv)
1779 settings.setcpv(pkg)
1780 debug = settings.get("PORTAGE_DEBUG") == "1"
1781 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1784 rval = self._execute_with_builddir()
1786 rval = portage.doebuild(ebuild_path, "fetch",
1787 settings["ROOT"], settings, debug=debug,
1788 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1789 mydbapi=portdb, tree="porttree")
1791 if rval != os.EX_OK:
1792 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1793 eerror(msg, phase="unpack", key=pkg.cpv)
1797 def _execute_with_builddir(self):
1798 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1799 # ensuring sane $PWD (bug #239560) and storing elog
1800 # messages. Use a private temp directory, in order
1801 # to avoid locking the main one.
1802 settings = self.settings
1803 global_tmpdir = settings["PORTAGE_TMPDIR"]
1804 from tempfile import mkdtemp
1806 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1808 if e.errno != portage.exception.PermissionDenied.errno:
1810 raise portage.exception.PermissionDenied(global_tmpdir)
1811 settings["PORTAGE_TMPDIR"] = private_tmpdir
1812 settings.backup_changes("PORTAGE_TMPDIR")
1814 retval = self._execute()
1816 settings["PORTAGE_TMPDIR"] = global_tmpdir
1817 settings.backup_changes("PORTAGE_TMPDIR")
1818 shutil.rmtree(private_tmpdir)
1822 settings = self.settings
1824 root_config = pkg.root_config
1825 portdb = root_config.trees["porttree"].dbapi
1826 ebuild_path = portdb.findname(pkg.cpv)
1827 debug = settings.get("PORTAGE_DEBUG") == "1"
1828 retval = portage.doebuild(ebuild_path, "fetch",
1829 self.settings["ROOT"], self.settings, debug=debug,
1830 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1831 mydbapi=portdb, tree="porttree")
1833 if retval != os.EX_OK:
1834 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1835 eerror(msg, phase="unpack", key=pkg.cpv)
1837 portage.elog.elog_process(self.pkg.cpv, self.settings)
1840 class PollConstants(object):
1843 Provides POLL* constants that are equivalent to those from the
1844 select module, for use by PollSelectAdapter.
1847 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1850 locals()[k] = getattr(select, k, v)
1854 class AsynchronousTask(SlotObject):
1856 Subclasses override _wait() and _poll() so that calls
1857 to public methods can be wrapped for implementing
1858 hooks such as exit listener notification.
1860 Sublasses should call self.wait() to notify exit listeners after
1861 the task is complete and self.returncode has been set.
1864 __slots__ = ("background", "cancelled", "returncode") + \
1865 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1869 Start an asynchronous task and then return as soon as possible.
1875 raise NotImplementedError(self)
1878 return self.returncode is None
1885 return self.returncode
1888 if self.returncode is None:
1891 return self.returncode
1894 return self.returncode
1897 self.cancelled = True
1900 def addStartListener(self, f):
1902 The function will be called with one argument, a reference to self.
1904 if self._start_listeners is None:
1905 self._start_listeners = []
1906 self._start_listeners.append(f)
1908 def removeStartListener(self, f):
1909 if self._start_listeners is None:
1911 self._start_listeners.remove(f)
1913 def _start_hook(self):
1914 if self._start_listeners is not None:
1915 start_listeners = self._start_listeners
1916 self._start_listeners = None
1918 for f in start_listeners:
1921 def addExitListener(self, f):
1923 The function will be called with one argument, a reference to self.
1925 if self._exit_listeners is None:
1926 self._exit_listeners = []
1927 self._exit_listeners.append(f)
1929 def removeExitListener(self, f):
1930 if self._exit_listeners is None:
1931 if self._exit_listener_stack is not None:
1932 self._exit_listener_stack.remove(f)
1934 self._exit_listeners.remove(f)
1936 def _wait_hook(self):
1938 Call this method after the task completes, just before returning
1939 the returncode from wait() or poll(). This hook is
1940 used to trigger exit listeners when the returncode first
1943 if self.returncode is not None and \
1944 self._exit_listeners is not None:
1946 # This prevents recursion, in case one of the
1947 # exit handlers triggers this method again by
1948 # calling wait(). Use a stack that gives
1949 # removeExitListener() an opportunity to consume
1950 # listeners from the stack, before they can get
1951 # called below. This is necessary because a call
1952 # to one exit listener may result in a call to
1953 # removeExitListener() for another listener on
1954 # the stack. That listener needs to be removed
1955 # from the stack since it would be inconsistent
1956 # to call it after it has been been passed into
1957 # removeExitListener().
1958 self._exit_listener_stack = self._exit_listeners
1959 self._exit_listeners = None
1961 self._exit_listener_stack.reverse()
1962 while self._exit_listener_stack:
1963 self._exit_listener_stack.pop()(self)
1965 class AbstractPollTask(AsynchronousTask):
1967 __slots__ = ("scheduler",) + \
1971 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1972 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1975 def _unregister(self):
1976 raise NotImplementedError(self)
1978 def _unregister_if_appropriate(self, event):
1979 if self._registered:
1980 if event & self._exceptional_events:
1983 elif event & PollConstants.POLLHUP:
1987 class PipeReader(AbstractPollTask):
1990 Reads output from one or more files and saves it in memory,
1991 for retrieval via the getvalue() method. This is driven by
1992 the scheduler's poll() loop, so it runs entirely within the
1996 __slots__ = ("input_files",) + \
1997 ("_read_data", "_reg_ids")
2000 self._reg_ids = set()
2001 self._read_data = []
2002 for k, f in self.input_files.iteritems():
2003 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
2004 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
2005 self._reg_ids.add(self.scheduler.register(f.fileno(),
2006 self._registered_events, self._output_handler))
2007 self._registered = True
2010 return self._registered
2013 if self.returncode is None:
2015 self.cancelled = True
2019 if self.returncode is not None:
2020 return self.returncode
2022 if self._registered:
2023 self.scheduler.schedule(self._reg_ids)
2026 self.returncode = os.EX_OK
2027 return self.returncode
2030 """Retrieve the entire contents"""
2031 if sys.hexversion >= 0x3000000:
2032 return bytes().join(self._read_data)
2033 return "".join(self._read_data)
2036 """Free the memory buffer."""
2037 self._read_data = None
2039 def _output_handler(self, fd, event):
2041 if event & PollConstants.POLLIN:
2043 for f in self.input_files.itervalues():
2044 if fd == f.fileno():
2047 buf = array.array('B')
2049 buf.fromfile(f, self._bufsize)
2054 self._read_data.append(buf.tostring())
2059 self._unregister_if_appropriate(event)
2060 return self._registered
2062 def _unregister(self):
2064 Unregister from the scheduler and close open files.
2067 self._registered = False
2069 if self._reg_ids is not None:
2070 for reg_id in self._reg_ids:
2071 self.scheduler.unregister(reg_id)
2072 self._reg_ids = None
2074 if self.input_files is not None:
2075 for f in self.input_files.itervalues():
2077 self.input_files = None
2079 class CompositeTask(AsynchronousTask):
2081 __slots__ = ("scheduler",) + ("_current_task",)
2084 return self._current_task is not None
2087 self.cancelled = True
2088 if self._current_task is not None:
2089 self._current_task.cancel()
2093 This does a loop calling self._current_task.poll()
2094 repeatedly as long as the value of self._current_task
2095 keeps changing. It calls poll() a maximum of one time
2096 for a given self._current_task instance. This is useful
2097 since calling poll() on a task can trigger advance to
2098 the next task could eventually lead to the returncode
2099 being set in cases when polling only a single task would
2100 not have the same effect.
2105 task = self._current_task
2106 if task is None or task is prev:
2107 # don't poll the same task more than once
2112 return self.returncode
2118 task = self._current_task
2120 # don't wait for the same task more than once
2123 # Before the task.wait() method returned, an exit
2124 # listener should have set self._current_task to either
2125 # a different task or None. Something is wrong.
2126 raise AssertionError("self._current_task has not " + \
2127 "changed since calling wait", self, task)
2131 return self.returncode
2133 def _assert_current(self, task):
2135 Raises an AssertionError if the given task is not the
2136 same one as self._current_task. This can be useful
2139 if task is not self._current_task:
2140 raise AssertionError("Unrecognized task: %s" % (task,))
2142 def _default_exit(self, task):
2144 Calls _assert_current() on the given task and then sets the
2145 composite returncode attribute if task.returncode != os.EX_OK.
2146 If the task failed then self._current_task will be set to None.
2147 Subclasses can use this as a generic task exit callback.
2150 @returns: The task.returncode attribute.
2152 self._assert_current(task)
2153 if task.returncode != os.EX_OK:
2154 self.returncode = task.returncode
2155 self._current_task = None
2156 return task.returncode
2158 def _final_exit(self, task):
2160 Assumes that task is the final task of this composite task.
2161 Calls _default_exit() and sets self.returncode to the task's
2162 returncode and sets self._current_task to None.
2164 self._default_exit(task)
2165 self._current_task = None
2166 self.returncode = task.returncode
2167 return self.returncode
2169 def _default_final_exit(self, task):
2171 This calls _final_exit() and then wait().
2173 Subclasses can use this as a generic final task exit callback.
2176 self._final_exit(task)
2179 def _start_task(self, task, exit_handler):
2181 Register exit handler for the given task, set it
2182 as self._current_task, and call task.start().
2184 Subclasses can use this as a generic way to start
2188 task.addExitListener(exit_handler)
2189 self._current_task = task
2192 class TaskSequence(CompositeTask):
2194 A collection of tasks that executes sequentially. Each task
2195 must have a addExitListener() method that can be used as
2196 a means to trigger movement from one task to the next.
2199 __slots__ = ("_task_queue",)
2201 def __init__(self, **kwargs):
2202 AsynchronousTask.__init__(self, **kwargs)
2203 self._task_queue = deque()
2205 def add(self, task):
2206 self._task_queue.append(task)
2209 self._start_next_task()
2212 self._task_queue.clear()
2213 CompositeTask.cancel(self)
2215 def _start_next_task(self):
2216 self._start_task(self._task_queue.popleft(),
2217 self._task_exit_handler)
2219 def _task_exit_handler(self, task):
2220 if self._default_exit(task) != os.EX_OK:
2222 elif self._task_queue:
2223 self._start_next_task()
2225 self._final_exit(task)
2228 class SubProcess(AbstractPollTask):
2230 __slots__ = ("pid",) + \
2231 ("_files", "_reg_id")
2233 # A file descriptor is required for the scheduler to monitor changes from
2234 # inside a poll() loop. When logging is not enabled, create a pipe just to
2235 # serve this purpose alone.
2239 if self.returncode is not None:
2240 return self.returncode
2241 if self.pid is None:
2242 return self.returncode
2243 if self._registered:
2244 return self.returncode
2247 retval = os.waitpid(self.pid, os.WNOHANG)
2249 if e.errno != errno.ECHILD:
2252 retval = (self.pid, 1)
2254 if retval == (0, 0):
2256 self._set_returncode(retval)
2257 return self.returncode
2262 os.kill(self.pid, signal.SIGTERM)
2264 if e.errno != errno.ESRCH:
2268 self.cancelled = True
2269 if self.pid is not None:
2271 return self.returncode
2274 return self.pid is not None and \
2275 self.returncode is None
2279 if self.returncode is not None:
2280 return self.returncode
2282 if self._registered:
2283 self.scheduler.schedule(self._reg_id)
2285 if self.returncode is not None:
2286 return self.returncode
2289 wait_retval = os.waitpid(self.pid, 0)
2291 if e.errno != errno.ECHILD:
2294 self._set_returncode((self.pid, 1))
2296 self._set_returncode(wait_retval)
2298 return self.returncode
2300 def _unregister(self):
2302 Unregister from the scheduler and close open files.
2305 self._registered = False
2307 if self._reg_id is not None:
2308 self.scheduler.unregister(self._reg_id)
2311 if self._files is not None:
2312 for f in self._files.itervalues():
2316 def _set_returncode(self, wait_retval):
2318 retval = wait_retval[1]
2320 if retval != os.EX_OK:
2322 retval = (retval & 0xff) << 8
2324 retval = retval >> 8
2326 self.returncode = retval
2328 class SpawnProcess(SubProcess):
2331 Constructor keyword args are passed into portage.process.spawn().
2332 The required "args" keyword argument will be passed as the first
2336 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2337 "uid", "gid", "groups", "umask", "logfile",
2338 "path_lookup", "pre_exec")
2340 __slots__ = ("args",) + \
2343 _file_names = ("log", "process", "stdout")
2344 _files_dict = slot_dict_class(_file_names, prefix="")
2351 if self.fd_pipes is None:
2353 fd_pipes = self.fd_pipes
2354 fd_pipes.setdefault(0, sys.stdin.fileno())
2355 fd_pipes.setdefault(1, sys.stdout.fileno())
2356 fd_pipes.setdefault(2, sys.stderr.fileno())
2358 # flush any pending output
2359 for fd in fd_pipes.itervalues():
2360 if fd == sys.stdout.fileno():
2362 if fd == sys.stderr.fileno():
2365 logfile = self.logfile
2366 self._files = self._files_dict()
2369 master_fd, slave_fd = self._pipe(fd_pipes)
2370 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2371 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2374 fd_pipes_orig = fd_pipes.copy()
2376 # TODO: Use job control functions like tcsetpgrp() to control
2377 # access to stdin. Until then, use /dev/null so that any
2378 # attempts to read from stdin will immediately return EOF
2379 # instead of blocking indefinitely.
2380 null_input = open('/dev/null', 'rb')
2381 fd_pipes[0] = null_input.fileno()
2383 fd_pipes[0] = fd_pipes_orig[0]
2385 files.process = os.fdopen(master_fd, 'rb')
2386 if logfile is not None:
2388 fd_pipes[1] = slave_fd
2389 fd_pipes[2] = slave_fd
2391 files.log = open(logfile, mode='ab')
2392 portage.util.apply_secpass_permissions(logfile,
2393 uid=portage.portage_uid, gid=portage.portage_gid,
2396 if not self.background:
2397 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2399 output_handler = self._output_handler
2403 # Create a dummy pipe so the scheduler can monitor
2404 # the process from inside a poll() loop.
2405 fd_pipes[self._dummy_pipe_fd] = slave_fd
2407 fd_pipes[1] = slave_fd
2408 fd_pipes[2] = slave_fd
2409 output_handler = self._dummy_handler
2412 for k in self._spawn_kwarg_names:
2413 v = getattr(self, k)
2417 kwargs["fd_pipes"] = fd_pipes
2418 kwargs["returnpid"] = True
2419 kwargs.pop("logfile", None)
2421 self._reg_id = self.scheduler.register(files.process.fileno(),
2422 self._registered_events, output_handler)
2423 self._registered = True
2425 retval = self._spawn(self.args, **kwargs)
2428 if null_input is not None:
2431 if isinstance(retval, int):
2434 self.returncode = retval
2438 self.pid = retval[0]
2439 portage.process.spawned_pids.remove(self.pid)
2441 def _pipe(self, fd_pipes):
2443 @type fd_pipes: dict
2444 @param fd_pipes: pipes from which to copy terminal size if desired.
2448 def _spawn(self, args, **kwargs):
2449 return portage.process.spawn(args, **kwargs)
2451 def _output_handler(self, fd, event):
2453 if event & PollConstants.POLLIN:
2456 buf = array.array('B')
2458 buf.fromfile(files.process, self._bufsize)
2463 if not self.background:
2464 write_successful = False
2468 if not write_successful:
2469 buf.tofile(files.stdout)
2470 write_successful = True
2471 files.stdout.flush()
2474 if e.errno != errno.EAGAIN:
2479 # Avoid a potentially infinite loop. In
2480 # most cases, the failure count is zero
2481 # and it's unlikely to exceed 1.
2484 # This means that a subprocess has put an inherited
2485 # stdio file descriptor (typically stdin) into
2486 # O_NONBLOCK mode. This is not acceptable (see bug
2487 # #264435), so revert it. We need to use a loop
2488 # here since there's a race condition due to
2489 # parallel processes being able to change the
2490 # flags on the inherited file descriptor.
2491 # TODO: When possible, avoid having child processes
2492 # inherit stdio file descriptors from portage
2493 # (maybe it can't be avoided with
2494 # PROPERTIES=interactive).
2495 fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2496 fcntl.fcntl(files.stdout.fileno(),
2497 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2499 buf.tofile(files.log)
2505 self._unregister_if_appropriate(event)
2506 return self._registered
2508 def _dummy_handler(self, fd, event):
2510 This method is mainly interested in detecting EOF, since
2511 the only purpose of the pipe is to allow the scheduler to
2512 monitor the process from inside a poll() loop.
2515 if event & PollConstants.POLLIN:
2517 buf = array.array('B')
2519 buf.fromfile(self._files.process, self._bufsize)
2529 self._unregister_if_appropriate(event)
2530 return self._registered
2532 class MiscFunctionsProcess(SpawnProcess):
2534 Spawns misc-functions.sh with an existing ebuild environment.
2537 __slots__ = ("commands", "phase", "pkg", "settings")
2540 settings = self.settings
2541 settings.pop("EBUILD_PHASE", None)
2542 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2543 misc_sh_binary = os.path.join(portage_bin_path,
2544 os.path.basename(portage.const.MISC_SH_BINARY))
2546 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2547 self.logfile = settings.get("PORTAGE_LOG_FILE")
2549 portage._doebuild_exit_status_unlink(
2550 settings.get("EBUILD_EXIT_STATUS_FILE"))
2552 SpawnProcess._start(self)
2554 def _spawn(self, args, **kwargs):
2555 settings = self.settings
2556 debug = settings.get("PORTAGE_DEBUG") == "1"
2557 return portage.spawn(" ".join(args), settings,
2558 debug=debug, **kwargs)
2560 def _set_returncode(self, wait_retval):
2561 SpawnProcess._set_returncode(self, wait_retval)
2562 self.returncode = portage._doebuild_exit_status_check_and_log(
2563 self.settings, self.phase, self.returncode)
2565 class EbuildFetcher(SpawnProcess):
2567 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2572 root_config = self.pkg.root_config
2573 portdb = root_config.trees["porttree"].dbapi
2574 ebuild_path = portdb.findname(self.pkg.cpv)
2575 settings = self.config_pool.allocate()
2576 settings.setcpv(self.pkg)
2578 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2579 # should not be touched since otherwise it could interfere with
2580 # another instance of the same cpv concurrently being built for a
2581 # different $ROOT (currently, builds only cooperate with prefetchers
2582 # that are spawned for the same $ROOT).
2583 if not self.prefetch:
2584 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2585 self._build_dir.lock()
2586 self._build_dir.clean_log()
2587 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2588 if self.logfile is None:
2589 self.logfile = settings.get("PORTAGE_LOG_FILE")
2595 # If any incremental variables have been overridden
2596 # via the environment, those values need to be passed
2597 # along here so that they are correctly considered by
2598 # the config instance in the subproccess.
2599 fetch_env = os.environ.copy()
2601 nocolor = settings.get("NOCOLOR")
2602 if nocolor is not None:
2603 fetch_env["NOCOLOR"] = nocolor
2605 fetch_env["PORTAGE_NICENESS"] = "0"
2607 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2609 ebuild_binary = os.path.join(
2610 settings["PORTAGE_BIN_PATH"], "ebuild")
2612 fetch_args = [ebuild_binary, ebuild_path, phase]
2613 debug = settings.get("PORTAGE_DEBUG") == "1"
2615 fetch_args.append("--debug")
2617 self.args = fetch_args
2618 self.env = fetch_env
2619 SpawnProcess._start(self)
2621 def _pipe(self, fd_pipes):
2622 """When appropriate, use a pty so that fetcher progress bars,
2623 like wget has, will work properly."""
2624 if self.background or not sys.stdout.isatty():
2625 # When the output only goes to a log file,
2626 # there's no point in creating a pty.
2628 stdout_pipe = fd_pipes.get(1)
2629 got_pty, master_fd, slave_fd = \
2630 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2631 return (master_fd, slave_fd)
2633 def _set_returncode(self, wait_retval):
2634 SpawnProcess._set_returncode(self, wait_retval)
2635 # Collect elog messages that might have been
2636 # created by the pkg_nofetch phase.
2637 if self._build_dir is not None:
2638 # Skip elog messages for prefetch, in order to avoid duplicates.
2639 if not self.prefetch and self.returncode != os.EX_OK:
2641 if self.logfile is not None:
2643 elog_out = open(self.logfile, 'a')
2644 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2645 if self.logfile is not None:
2646 msg += ", Log file:"
2647 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2648 if self.logfile is not None:
2649 eerror(" '%s'" % (self.logfile,),
2650 phase="unpack", key=self.pkg.cpv, out=elog_out)
2651 if elog_out is not None:
2653 if not self.prefetch:
2654 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2655 features = self._build_dir.settings.features
2656 if self.returncode == os.EX_OK:
2657 self._build_dir.clean_log()
2658 self._build_dir.unlock()
2659 self.config_pool.deallocate(self._build_dir.settings)
2660 self._build_dir = None
2662 class EbuildBuildDir(SlotObject):
2664 __slots__ = ("dir_path", "pkg", "settings",
2665 "locked", "_catdir", "_lock_obj")
2667 def __init__(self, **kwargs):
2668 SlotObject.__init__(self, **kwargs)
2673 This raises an AlreadyLocked exception if lock() is called
2674 while a lock is already held. In order to avoid this, call
2675 unlock() or check whether the "locked" attribute is True
2676 or False before calling lock().
2678 if self._lock_obj is not None:
2679 raise self.AlreadyLocked((self._lock_obj,))
2681 dir_path = self.dir_path
2682 if dir_path is None:
2683 root_config = self.pkg.root_config
2684 portdb = root_config.trees["porttree"].dbapi
2685 ebuild_path = portdb.findname(self.pkg.cpv)
2686 settings = self.settings
2687 settings.setcpv(self.pkg)
2688 debug = settings.get("PORTAGE_DEBUG") == "1"
2689 use_cache = 1 # always true
2690 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2691 self.settings, debug, use_cache, portdb)
2692 dir_path = self.settings["PORTAGE_BUILDDIR"]
2694 catdir = os.path.dirname(dir_path)
2695 self._catdir = catdir
2697 portage.util.ensure_dirs(os.path.dirname(catdir),
2698 gid=portage.portage_gid,
2702 catdir_lock = portage.locks.lockdir(catdir)
2703 portage.util.ensure_dirs(catdir,
2704 gid=portage.portage_gid,
2706 self._lock_obj = portage.locks.lockdir(dir_path)
2708 self.locked = self._lock_obj is not None
2709 if catdir_lock is not None:
2710 portage.locks.unlockdir(catdir_lock)
2712 def clean_log(self):
2713 """Discard existing log."""
2714 settings = self.settings
2716 for x in ('.logid', 'temp/build.log'):
2718 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2723 if self._lock_obj is None:
2726 portage.locks.unlockdir(self._lock_obj)
2727 self._lock_obj = None
2730 catdir = self._catdir
2733 catdir_lock = portage.locks.lockdir(catdir)
2739 if e.errno not in (errno.ENOENT,
2740 errno.ENOTEMPTY, errno.EEXIST):
2743 portage.locks.unlockdir(catdir_lock)
2745 class AlreadyLocked(portage.exception.PortageException):
2748 class EbuildBuild(CompositeTask):
2750 __slots__ = ("args_set", "config_pool", "find_blockers",
2751 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2752 "prefetcher", "settings", "world_atom") + \
2753 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2757 logger = self.logger
2760 settings = self.settings
2761 world_atom = self.world_atom
2762 root_config = pkg.root_config
2765 portdb = root_config.trees[tree].dbapi
2766 settings.setcpv(pkg)
2767 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2768 ebuild_path = portdb.findname(self.pkg.cpv)
2769 self._ebuild_path = ebuild_path
2771 prefetcher = self.prefetcher
2772 if prefetcher is None:
2774 elif not prefetcher.isAlive():
2776 elif prefetcher.poll() is None:
2778 waiting_msg = "Fetching files " + \
2779 "in the background. " + \
2780 "To view fetch progress, run `tail -f " + \
2781 "/var/log/emerge-fetch.log` in another " + \
2783 msg_prefix = colorize("GOOD", " * ")
2784 from textwrap import wrap
2785 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2786 for line in wrap(waiting_msg, 65))
2787 if not self.background:
2788 writemsg(waiting_msg, noiselevel=-1)
2790 self._current_task = prefetcher
2791 prefetcher.addExitListener(self._prefetch_exit)
2794 self._prefetch_exit(prefetcher)
2796 def _prefetch_exit(self, prefetcher):
2800 settings = self.settings
2803 fetcher = EbuildFetchonly(
2804 fetch_all=opts.fetch_all_uri,
2805 pkg=pkg, pretend=opts.pretend,
2807 retval = fetcher.execute()
2808 self.returncode = retval
2812 fetcher = EbuildFetcher(config_pool=self.config_pool,
2813 fetchall=opts.fetch_all_uri,
2814 fetchonly=opts.fetchonly,
2815 background=self.background,
2816 pkg=pkg, scheduler=self.scheduler)
2818 self._start_task(fetcher, self._fetch_exit)
2820 def _fetch_exit(self, fetcher):
2824 fetch_failed = False
2826 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2828 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2830 if fetch_failed and fetcher.logfile is not None and \
2831 os.path.exists(fetcher.logfile):
2832 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2834 if not fetch_failed and fetcher.logfile is not None:
2835 # Fetch was successful, so remove the fetch log.
2837 os.unlink(fetcher.logfile)
2841 if fetch_failed or opts.fetchonly:
2845 logger = self.logger
2847 pkg_count = self.pkg_count
2848 scheduler = self.scheduler
2849 settings = self.settings
2850 features = settings.features
2851 ebuild_path = self._ebuild_path
2852 system_set = pkg.root_config.sets["system"]
2854 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2855 self._build_dir.lock()
2857 # Cleaning is triggered before the setup
2858 # phase, in portage.doebuild().
2859 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2860 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2861 short_msg = "emerge: (%s of %s) %s Clean" % \
2862 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2863 logger.log(msg, short_msg=short_msg)
2865 #buildsyspkg: Check if we need to _force_ binary package creation
2866 self._issyspkg = "buildsyspkg" in features and \
2867 system_set.findAtomForPackage(pkg) and \
2870 if opts.buildpkg or self._issyspkg:
2872 self._buildpkg = True
2874 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2875 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2876 short_msg = "emerge: (%s of %s) %s Compile" % \
2877 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2878 logger.log(msg, short_msg=short_msg)
2881 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2882 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2883 short_msg = "emerge: (%s of %s) %s Compile" % \
2884 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2885 logger.log(msg, short_msg=short_msg)
2887 build = EbuildExecuter(background=self.background, pkg=pkg,
2888 scheduler=scheduler, settings=settings)
2889 self._start_task(build, self._build_exit)
2891 def _unlock_builddir(self):
2892 portage.elog.elog_process(self.pkg.cpv, self.settings)
2893 self._build_dir.unlock()
2895 def _build_exit(self, build):
2896 if self._default_exit(build) != os.EX_OK:
2897 self._unlock_builddir()
2902 buildpkg = self._buildpkg
2905 self._final_exit(build)
2910 msg = ">>> This is a system package, " + \
2911 "let's pack a rescue tarball.\n"
2913 log_path = self.settings.get("PORTAGE_LOG_FILE")
2914 if log_path is not None:
2915 log_file = open(log_path, 'a')
2921 if not self.background:
2922 portage.writemsg_stdout(msg, noiselevel=-1)
2924 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2925 scheduler=self.scheduler, settings=self.settings)
2927 self._start_task(packager, self._buildpkg_exit)
2929 def _buildpkg_exit(self, packager):
2931 Released build dir lock when there is a failure or
2932 when in buildpkgonly mode. Otherwise, the lock will
2933 be released when merge() is called.
2936 if self._default_exit(packager) != os.EX_OK:
2937 self._unlock_builddir()
2941 if self.opts.buildpkgonly:
2942 # Need to call "clean" phase for buildpkgonly mode
2943 portage.elog.elog_process(self.pkg.cpv, self.settings)
2945 clean_phase = EbuildPhase(background=self.background,
2946 pkg=self.pkg, phase=phase,
2947 scheduler=self.scheduler, settings=self.settings,
2949 self._start_task(clean_phase, self._clean_exit)
2952 # Continue holding the builddir lock until
2953 # after the package has been installed.
2954 self._current_task = None
2955 self.returncode = packager.returncode
2958 def _clean_exit(self, clean_phase):
2959 if self._final_exit(clean_phase) != os.EX_OK or \
2960 self.opts.buildpkgonly:
2961 self._unlock_builddir()
2966 Install the package and then clean up and release locks.
2967 Only call this after the build has completed successfully
2968 and neither fetchonly nor buildpkgonly mode are enabled.
2971 find_blockers = self.find_blockers
2972 ldpath_mtimes = self.ldpath_mtimes
2973 logger = self.logger
2975 pkg_count = self.pkg_count
2976 settings = self.settings
2977 world_atom = self.world_atom
2978 ebuild_path = self._ebuild_path
2981 merge = EbuildMerge(find_blockers=self.find_blockers,
2982 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2983 pkg_count=pkg_count, pkg_path=ebuild_path,
2984 scheduler=self.scheduler,
2985 settings=settings, tree=tree, world_atom=world_atom)
2987 msg = " === (%s of %s) Merging (%s::%s)" % \
2988 (pkg_count.curval, pkg_count.maxval,
2989 pkg.cpv, ebuild_path)
2990 short_msg = "emerge: (%s of %s) %s Merge" % \
2991 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2992 logger.log(msg, short_msg=short_msg)
2995 rval = merge.execute()
2997 self._unlock_builddir()
3001 class EbuildExecuter(CompositeTask):
3003 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
3005 _phases = ("prepare", "configure", "compile", "test", "install")
3007 _live_eclasses = frozenset([
3017 self._tree = "porttree"
3020 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
3021 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
3022 self._start_task(clean_phase, self._clean_phase_exit)
3024 def _clean_phase_exit(self, clean_phase):
3026 if self._default_exit(clean_phase) != os.EX_OK:
3031 scheduler = self.scheduler
3032 settings = self.settings
3035 # This initializes PORTAGE_LOG_FILE.
3036 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3038 setup_phase = EbuildPhase(background=self.background,
3039 pkg=pkg, phase="setup", scheduler=scheduler,
3040 settings=settings, tree=self._tree)
3042 setup_phase.addExitListener(self._setup_exit)
3043 self._current_task = setup_phase
3044 self.scheduler.scheduleSetup(setup_phase)
3046 def _setup_exit(self, setup_phase):
3048 if self._default_exit(setup_phase) != os.EX_OK:
3052 unpack_phase = EbuildPhase(background=self.background,
3053 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3054 settings=self.settings, tree=self._tree)
3056 if self._live_eclasses.intersection(self.pkg.inherited):
3057 # Serialize $DISTDIR access for live ebuilds since
3058 # otherwise they can interfere with eachother.
3060 unpack_phase.addExitListener(self._unpack_exit)
3061 self._current_task = unpack_phase
3062 self.scheduler.scheduleUnpack(unpack_phase)
3065 self._start_task(unpack_phase, self._unpack_exit)
3067 def _unpack_exit(self, unpack_phase):
3069 if self._default_exit(unpack_phase) != os.EX_OK:
3073 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3076 phases = self._phases
3077 eapi = pkg.metadata["EAPI"]
3078 if eapi in ("0", "1"):
3079 # skip src_prepare and src_configure
3082 for phase in phases:
3083 ebuild_phases.add(EbuildPhase(background=self.background,
3084 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3085 settings=self.settings, tree=self._tree))
3087 self._start_task(ebuild_phases, self._default_final_exit)
3089 class EbuildMetadataPhase(SubProcess):
3092 Asynchronous interface for the ebuild "depend" phase which is
3093 used to extract metadata from the ebuild.
3096 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3097 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3100 _file_names = ("ebuild",)
3101 _files_dict = slot_dict_class(_file_names, prefix="")
3105 settings = self.settings
3106 settings.setcpv(self.cpv)
3107 ebuild_path = self.ebuild_path
3110 if 'parse-eapi-glep-55' in settings.features:
3111 pf, eapi = portage._split_ebuild_name_glep55(
3112 os.path.basename(ebuild_path))
3113 if eapi is None and \
3114 'parse-eapi-ebuild-head' in settings.features:
3115 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3116 mode='r', encoding='utf_8', errors='replace'))
3118 if eapi is not None:
3119 if not portage.eapi_is_supported(eapi):
3120 self.metadata_callback(self.cpv, self.ebuild_path,
3121 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3122 self.returncode = os.EX_OK
3126 settings.configdict['pkg']['EAPI'] = eapi
3128 debug = settings.get("PORTAGE_DEBUG") == "1"
3132 if self.fd_pipes is not None:
3133 fd_pipes = self.fd_pipes.copy()
3137 fd_pipes.setdefault(0, sys.stdin.fileno())
3138 fd_pipes.setdefault(1, sys.stdout.fileno())
3139 fd_pipes.setdefault(2, sys.stderr.fileno())
3141 # flush any pending output
3142 for fd in fd_pipes.itervalues():
3143 if fd == sys.stdout.fileno():
3145 if fd == sys.stderr.fileno():
3148 fd_pipes_orig = fd_pipes.copy()
3149 self._files = self._files_dict()
3152 master_fd, slave_fd = os.pipe()
3153 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3154 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3156 fd_pipes[self._metadata_fd] = slave_fd
3158 self._raw_metadata = []
3159 files.ebuild = os.fdopen(master_fd, 'r')
3160 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3161 self._registered_events, self._output_handler)
3162 self._registered = True
3164 retval = portage.doebuild(ebuild_path, "depend",
3165 settings["ROOT"], settings, debug,
3166 mydbapi=self.portdb, tree="porttree",
3167 fd_pipes=fd_pipes, returnpid=True)
3171 if isinstance(retval, int):
3172 # doebuild failed before spawning
3174 self.returncode = retval
3178 self.pid = retval[0]
3179 portage.process.spawned_pids.remove(self.pid)
3181 def _output_handler(self, fd, event):
3183 if event & PollConstants.POLLIN:
3184 self._raw_metadata.append(self._files.ebuild.read())
3185 if not self._raw_metadata[-1]:
3189 self._unregister_if_appropriate(event)
3190 return self._registered
3192 def _set_returncode(self, wait_retval):
3193 SubProcess._set_returncode(self, wait_retval)
3194 if self.returncode == os.EX_OK:
3195 metadata_lines = "".join(self._raw_metadata).splitlines()
3196 if len(portage.auxdbkeys) != len(metadata_lines):
3197 # Don't trust bash's returncode if the
3198 # number of lines is incorrect.
3201 metadata = izip(portage.auxdbkeys, metadata_lines)
3202 self.metadata = self.metadata_callback(self.cpv,
3203 self.ebuild_path, self.repo_path, metadata,
3206 class EbuildProcess(SpawnProcess):
3208 __slots__ = ("phase", "pkg", "settings", "tree")
3211 # Don't open the log file during the clean phase since the
3212 # open file can result in an nfs lock on $T/build.log which
3213 # prevents the clean phase from removing $T.
3214 if self.phase not in ("clean", "cleanrm"):
3215 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3216 SpawnProcess._start(self)
3218 def _pipe(self, fd_pipes):
3219 stdout_pipe = fd_pipes.get(1)
3220 got_pty, master_fd, slave_fd = \
3221 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3222 return (master_fd, slave_fd)
3224 def _spawn(self, args, **kwargs):
3226 root_config = self.pkg.root_config
3228 mydbapi = root_config.trees[tree].dbapi
3229 settings = self.settings
3230 ebuild_path = settings["EBUILD"]
3231 debug = settings.get("PORTAGE_DEBUG") == "1"
3233 rval = portage.doebuild(ebuild_path, self.phase,
3234 root_config.root, settings, debug,
3235 mydbapi=mydbapi, tree=tree, **kwargs)
3239 def _set_returncode(self, wait_retval):
3240 SpawnProcess._set_returncode(self, wait_retval)
3242 if self.phase not in ("clean", "cleanrm"):
3243 self.returncode = portage._doebuild_exit_status_check_and_log(
3244 self.settings, self.phase, self.returncode)
3246 if self.phase == "test" and self.returncode != os.EX_OK and \
3247 "test-fail-continue" in self.settings.features:
3248 self.returncode = os.EX_OK
3250 portage._post_phase_userpriv_perms(self.settings)
3252 class EbuildPhase(CompositeTask):
3254 __slots__ = ("background", "pkg", "phase",
3255 "scheduler", "settings", "tree")
3257 _post_phase_cmds = portage._post_phase_cmds
3261 ebuild_process = EbuildProcess(background=self.background,
3262 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3263 settings=self.settings, tree=self.tree)
3265 self._start_task(ebuild_process, self._ebuild_exit)
3267 def _ebuild_exit(self, ebuild_process):
3269 if self.phase == "install":
3271 log_path = self.settings.get("PORTAGE_LOG_FILE")
3273 if self.background and log_path is not None:
3274 log_file = open(log_path, 'a')
3277 portage._check_build_log(self.settings, out=out)
3279 if log_file is not None:
3282 if self._default_exit(ebuild_process) != os.EX_OK:
3286 settings = self.settings
3288 if self.phase == "install":
3289 portage._post_src_install_chost_fix(settings)
3290 portage._post_src_install_uid_fix(settings)
3292 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3293 if post_phase_cmds is not None:
3294 post_phase = MiscFunctionsProcess(background=self.background,
3295 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3296 scheduler=self.scheduler, settings=settings)
3297 self._start_task(post_phase, self._post_phase_exit)
3300 self.returncode = ebuild_process.returncode
3301 self._current_task = None
3304 def _post_phase_exit(self, post_phase):
3305 if self._final_exit(post_phase) != os.EX_OK:
3306 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3308 self._current_task = None
3312 class EbuildBinpkg(EbuildProcess):
3314 This assumes that src_install() has successfully completed.
3316 __slots__ = ("_binpkg_tmpfile",)
3319 self.phase = "package"
3320 self.tree = "porttree"
3322 root_config = pkg.root_config
3323 portdb = root_config.trees["porttree"].dbapi
3324 bintree = root_config.trees["bintree"]
3325 ebuild_path = portdb.findname(self.pkg.cpv)
3326 settings = self.settings
3327 debug = settings.get("PORTAGE_DEBUG") == "1"
3329 bintree.prevent_collision(pkg.cpv)
3330 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3331 pkg.cpv + ".tbz2." + str(os.getpid()))
3332 self._binpkg_tmpfile = binpkg_tmpfile
3333 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3334 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3337 EbuildProcess._start(self)
3339 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3341 def _set_returncode(self, wait_retval):
3342 EbuildProcess._set_returncode(self, wait_retval)
3345 bintree = pkg.root_config.trees["bintree"]
3346 binpkg_tmpfile = self._binpkg_tmpfile
3347 if self.returncode == os.EX_OK:
3348 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3350 class EbuildMerge(SlotObject):
3352 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3353 "pkg", "pkg_count", "pkg_path", "pretend",
3354 "scheduler", "settings", "tree", "world_atom")
3357 root_config = self.pkg.root_config
3358 settings = self.settings
3359 retval = portage.merge(settings["CATEGORY"],
3360 settings["PF"], settings["D"],
3361 os.path.join(settings["PORTAGE_BUILDDIR"],
3362 "build-info"), root_config.root, settings,
3363 myebuild=settings["EBUILD"],
3364 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3365 vartree=root_config.trees["vartree"],
3366 prev_mtimes=self.ldpath_mtimes,
3367 scheduler=self.scheduler,
3368 blockers=self.find_blockers)
3370 if retval == os.EX_OK:
3371 self.world_atom(self.pkg)
3376 def _log_success(self):
3378 pkg_count = self.pkg_count
3379 pkg_path = self.pkg_path
3380 logger = self.logger
3381 if "noclean" not in self.settings.features:
3382 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3383 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3384 logger.log((" === (%s of %s) " + \
3385 "Post-Build Cleaning (%s::%s)") % \
3386 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3387 short_msg=short_msg)
3388 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3389 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3391 class PackageUninstall(AsynchronousTask):
3393 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3397 unmerge(self.pkg.root_config, self.opts, "unmerge",
3398 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3399 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3400 writemsg_level=self._writemsg_level)
3401 except UninstallFailure, e:
3402 self.returncode = e.status
3404 self.returncode = os.EX_OK
3407 def _writemsg_level(self, msg, level=0, noiselevel=0):
3409 log_path = self.settings.get("PORTAGE_LOG_FILE")
3410 background = self.background
3412 if log_path is None:
3413 if not (background and level < logging.WARNING):
3414 portage.util.writemsg_level(msg,
3415 level=level, noiselevel=noiselevel)
3418 portage.util.writemsg_level(msg,
3419 level=level, noiselevel=noiselevel)
3421 f = open(log_path, 'a')
3427 class Binpkg(CompositeTask):
3429 __slots__ = ("find_blockers",
3430 "ldpath_mtimes", "logger", "opts",
3431 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3432 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3433 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3435 def _writemsg_level(self, msg, level=0, noiselevel=0):
3437 if not self.background:
3438 portage.util.writemsg_level(msg,
3439 level=level, noiselevel=noiselevel)
3441 log_path = self.settings.get("PORTAGE_LOG_FILE")
3442 if log_path is not None:
3443 f = open(log_path, 'a')
3452 settings = self.settings
3453 settings.setcpv(pkg)
3454 self._tree = "bintree"
3455 self._bintree = self.pkg.root_config.trees[self._tree]
3456 self._verify = not self.opts.pretend
3458 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3459 "portage", pkg.category, pkg.pf)
3460 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3461 pkg=pkg, settings=settings)
3462 self._image_dir = os.path.join(dir_path, "image")
3463 self._infloc = os.path.join(dir_path, "build-info")
3464 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3465 settings["EBUILD"] = self._ebuild_path
3466 debug = settings.get("PORTAGE_DEBUG") == "1"
3467 portage.doebuild_environment(self._ebuild_path, "setup",
3468 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3469 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3471 # The prefetcher has already completed or it
3472 # could be running now. If it's running now,
3473 # wait for it to complete since it holds
3474 # a lock on the file being fetched. The
3475 # portage.locks functions are only designed
3476 # to work between separate processes. Since
3477 # the lock is held by the current process,
3478 # use the scheduler and fetcher methods to
3479 # synchronize with the fetcher.
3480 prefetcher = self.prefetcher
3481 if prefetcher is None:
3483 elif not prefetcher.isAlive():
3485 elif prefetcher.poll() is None:
3487 waiting_msg = ("Fetching '%s' " + \
3488 "in the background. " + \
3489 "To view fetch progress, run `tail -f " + \
3490 "/var/log/emerge-fetch.log` in another " + \
3491 "terminal.") % prefetcher.pkg_path
3492 msg_prefix = colorize("GOOD", " * ")
3493 from textwrap import wrap
3494 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3495 for line in wrap(waiting_msg, 65))
3496 if not self.background:
3497 writemsg(waiting_msg, noiselevel=-1)
3499 self._current_task = prefetcher
3500 prefetcher.addExitListener(self._prefetch_exit)
3503 self._prefetch_exit(prefetcher)
3505 def _prefetch_exit(self, prefetcher):
3508 pkg_count = self.pkg_count
3509 if not (self.opts.pretend or self.opts.fetchonly):
3510 self._build_dir.lock()
3511 # If necessary, discard old log so that we don't
3513 self._build_dir.clean_log()
3514 # Initialze PORTAGE_LOG_FILE.
3515 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3516 fetcher = BinpkgFetcher(background=self.background,
3517 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3518 pretend=self.opts.pretend, scheduler=self.scheduler)
3519 pkg_path = fetcher.pkg_path
3520 self._pkg_path = pkg_path
3522 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3524 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3525 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3526 short_msg = "emerge: (%s of %s) %s Fetch" % \
3527 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3528 self.logger.log(msg, short_msg=short_msg)
3529 self._start_task(fetcher, self._fetcher_exit)
3532 self._fetcher_exit(fetcher)
3534 def _fetcher_exit(self, fetcher):
3536 # The fetcher only has a returncode when
3537 # --getbinpkg is enabled.
3538 if fetcher.returncode is not None:
3539 self._fetched_pkg = True
3540 if self._default_exit(fetcher) != os.EX_OK:
3541 self._unlock_builddir()
3545 if self.opts.pretend:
3546 self._current_task = None
3547 self.returncode = os.EX_OK
3555 logfile = self.settings.get("PORTAGE_LOG_FILE")
3556 verifier = BinpkgVerifier(background=self.background,
3557 logfile=logfile, pkg=self.pkg)
3558 self._start_task(verifier, self._verifier_exit)
3561 self._verifier_exit(verifier)
3563 def _verifier_exit(self, verifier):
3564 if verifier is not None and \
3565 self._default_exit(verifier) != os.EX_OK:
3566 self._unlock_builddir()
3570 logger = self.logger
3572 pkg_count = self.pkg_count
3573 pkg_path = self._pkg_path
3575 if self._fetched_pkg:
3576 self._bintree.inject(pkg.cpv, filename=pkg_path)
3578 if self.opts.fetchonly:
3579 self._current_task = None
3580 self.returncode = os.EX_OK
3584 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3585 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3586 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3587 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3588 logger.log(msg, short_msg=short_msg)
3591 settings = self.settings
3592 ebuild_phase = EbuildPhase(background=self.background,
3593 pkg=pkg, phase=phase, scheduler=self.scheduler,
3594 settings=settings, tree=self._tree)
3596 self._start_task(ebuild_phase, self._clean_exit)
3598 def _clean_exit(self, clean_phase):
3599 if self._default_exit(clean_phase) != os.EX_OK:
3600 self._unlock_builddir()
3604 dir_path = self._build_dir.dir_path
3606 infloc = self._infloc
3608 pkg_path = self._pkg_path
3611 for mydir in (dir_path, self._image_dir, infloc):
3612 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3613 gid=portage.data.portage_gid, mode=dir_mode)
3615 # This initializes PORTAGE_LOG_FILE.
3616 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3617 self._writemsg_level(">>> Extracting info\n")
3619 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3620 check_missing_metadata = ("CATEGORY", "PF")
3621 missing_metadata = set()
3622 for k in check_missing_metadata:
3623 v = pkg_xpak.getfile(k)
3625 missing_metadata.add(k)
3627 pkg_xpak.unpackinfo(infloc)
3628 for k in missing_metadata:
3636 f = open(os.path.join(infloc, k), 'wb')
3642 # Store the md5sum in the vdb.
3643 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3645 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3649 # This gives bashrc users an opportunity to do various things
3650 # such as remove binary packages after they're installed.
3651 settings = self.settings
3652 settings.setcpv(self.pkg)
3653 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3654 settings.backup_changes("PORTAGE_BINPKG_FILE")
3657 setup_phase = EbuildPhase(background=self.background,
3658 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3659 settings=settings, tree=self._tree)
3661 setup_phase.addExitListener(self._setup_exit)
3662 self._current_task = setup_phase
3663 self.scheduler.scheduleSetup(setup_phase)
3665 def _setup_exit(self, setup_phase):
3666 if self._default_exit(setup_phase) != os.EX_OK:
3667 self._unlock_builddir()
3671 extractor = BinpkgExtractorAsync(background=self.background,
3672 image_dir=self._image_dir,
3673 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3674 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3675 self._start_task(extractor, self._extractor_exit)
3677 def _extractor_exit(self, extractor):
3678 if self._final_exit(extractor) != os.EX_OK:
3679 self._unlock_builddir()
3680 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3684 def _unlock_builddir(self):
3685 if self.opts.pretend or self.opts.fetchonly:
3687 portage.elog.elog_process(self.pkg.cpv, self.settings)
3688 self._build_dir.unlock()
3692 # This gives bashrc users an opportunity to do various things
3693 # such as remove binary packages after they're installed.
3694 settings = self.settings
3695 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3696 settings.backup_changes("PORTAGE_BINPKG_FILE")
3698 merge = EbuildMerge(find_blockers=self.find_blockers,
3699 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3700 pkg=self.pkg, pkg_count=self.pkg_count,
3701 pkg_path=self._pkg_path, scheduler=self.scheduler,
3702 settings=settings, tree=self._tree, world_atom=self.world_atom)
3705 retval = merge.execute()
3707 settings.pop("PORTAGE_BINPKG_FILE", None)
3708 self._unlock_builddir()
3711 class BinpkgFetcher(SpawnProcess):
3713 __slots__ = ("pkg", "pretend",
3714 "locked", "pkg_path", "_lock_obj")
3716 def __init__(self, **kwargs):
3717 SpawnProcess.__init__(self, **kwargs)
3719 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3727 pretend = self.pretend
3728 bintree = pkg.root_config.trees["bintree"]
3729 settings = bintree.settings
3730 use_locks = "distlocks" in settings.features
3731 pkg_path = self.pkg_path
3734 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3737 exists = os.path.exists(pkg_path)
3738 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3739 if not (pretend or resume):
3740 # Remove existing file or broken symlink.
3746 # urljoin doesn't work correctly with
3747 # unrecognized protocols like sftp
3748 if bintree._remote_has_index:
3749 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3751 rel_uri = pkg.cpv + ".tbz2"
3752 uri = bintree._remote_base_uri.rstrip("/") + \
3753 "/" + rel_uri.lstrip("/")
3755 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3756 "/" + pkg.pf + ".tbz2"
3759 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3760 self.returncode = os.EX_OK
3764 protocol = urlparse.urlparse(uri)[0]
3765 fcmd_prefix = "FETCHCOMMAND"
3767 fcmd_prefix = "RESUMECOMMAND"
3768 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3770 fcmd = settings.get(fcmd_prefix)
3773 "DISTDIR" : os.path.dirname(pkg_path),
3775 "FILE" : os.path.basename(pkg_path)
3778 fetch_env = dict(settings.iteritems())
3779 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3780 for x in shlex.split(fcmd)]
3782 if self.fd_pipes is None:
3784 fd_pipes = self.fd_pipes
3786 # Redirect all output to stdout since some fetchers like
3787 # wget pollute stderr (if portage detects a problem then it
3788 # can send it's own message to stderr).
3789 fd_pipes.setdefault(0, sys.stdin.fileno())
3790 fd_pipes.setdefault(1, sys.stdout.fileno())
3791 fd_pipes.setdefault(2, sys.stdout.fileno())
3793 self.args = fetch_args
3794 self.env = fetch_env
3795 SpawnProcess._start(self)
3797 def _set_returncode(self, wait_retval):
3798 SpawnProcess._set_returncode(self, wait_retval)
3799 if self.returncode == os.EX_OK:
3800 # If possible, update the mtime to match the remote package if
3801 # the fetcher didn't already do it automatically.
3802 bintree = self.pkg.root_config.trees["bintree"]
3803 if bintree._remote_has_index:
3804 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3805 if remote_mtime is not None:
3807 remote_mtime = long(remote_mtime)
3812 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3816 if remote_mtime != local_mtime:
3818 os.utime(self.pkg_path,
3819 (remote_mtime, remote_mtime))
3828 This raises an AlreadyLocked exception if lock() is called
3829 while a lock is already held. In order to avoid this, call
3830 unlock() or check whether the "locked" attribute is True
3831 or False before calling lock().
3833 if self._lock_obj is not None:
3834 raise self.AlreadyLocked((self._lock_obj,))
3836 self._lock_obj = portage.locks.lockfile(
3837 self.pkg_path, wantnewlockfile=1)
3840 class AlreadyLocked(portage.exception.PortageException):
3844 if self._lock_obj is None:
3846 portage.locks.unlockfile(self._lock_obj)
3847 self._lock_obj = None
3850 class BinpkgVerifier(AsynchronousTask):
3851 __slots__ = ("logfile", "pkg",)
3855 Note: Unlike a normal AsynchronousTask.start() method,
3856 this one does all work is synchronously. The returncode
3857 attribute will be set before it returns.
3861 root_config = pkg.root_config
3862 bintree = root_config.trees["bintree"]
3864 stdout_orig = sys.stdout
3865 stderr_orig = sys.stderr
3867 if self.background and self.logfile is not None:
3868 log_file = open(self.logfile, 'a')
3870 if log_file is not None:
3871 sys.stdout = log_file
3872 sys.stderr = log_file
3874 bintree.digestCheck(pkg)
3875 except portage.exception.FileNotFound:
3876 writemsg("!!! Fetching Binary failed " + \
3877 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3879 except portage.exception.DigestException, e:
3880 writemsg("\n!!! Digest verification failed:\n",
3882 writemsg("!!! %s\n" % e.value[0],
3884 writemsg("!!! Reason: %s\n" % e.value[1],
3886 writemsg("!!! Got: %s\n" % e.value[2],
3888 writemsg("!!! Expected: %s\n" % e.value[3],
3891 if rval != os.EX_OK:
3892 pkg_path = bintree.getname(pkg.cpv)
3893 head, tail = os.path.split(pkg_path)
3894 temp_filename = portage._checksum_failure_temp_file(head, tail)
3895 writemsg("File renamed to '%s'\n" % (temp_filename,),
3898 sys.stdout = stdout_orig
3899 sys.stderr = stderr_orig
3900 if log_file is not None:
3903 self.returncode = rval
3906 class BinpkgPrefetcher(CompositeTask):
3908 __slots__ = ("pkg",) + \
3909 ("pkg_path", "_bintree",)
3912 self._bintree = self.pkg.root_config.trees["bintree"]
3913 fetcher = BinpkgFetcher(background=self.background,
3914 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3915 scheduler=self.scheduler)
3916 self.pkg_path = fetcher.pkg_path
3917 self._start_task(fetcher, self._fetcher_exit)
3919 def _fetcher_exit(self, fetcher):
3921 if self._default_exit(fetcher) != os.EX_OK:
3925 verifier = BinpkgVerifier(background=self.background,
3926 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3927 self._start_task(verifier, self._verifier_exit)
3929 def _verifier_exit(self, verifier):
3930 if self._default_exit(verifier) != os.EX_OK:
3934 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3936 self._current_task = None
3937 self.returncode = os.EX_OK
3940 class BinpkgExtractorAsync(SpawnProcess):
3942 __slots__ = ("image_dir", "pkg", "pkg_path")
3944 _shell_binary = portage.const.BASH_BINARY
3947 self.args = [self._shell_binary, "-c",
3948 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3949 (portage._shell_quote(self.pkg_path),
3950 portage._shell_quote(self.image_dir))]
3952 self.env = self.pkg.root_config.settings.environ()
3953 SpawnProcess._start(self)
3955 class MergeListItem(CompositeTask):
3958 TODO: For parallel scheduling, everything here needs asynchronous
3959 execution support (start, poll, and wait methods).
3962 __slots__ = ("args_set",
3963 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3964 "find_blockers", "logger", "mtimedb", "pkg",
3965 "pkg_count", "pkg_to_replace", "prefetcher",
3966 "settings", "statusMessage", "world_atom") + \
3972 build_opts = self.build_opts
3975 # uninstall, executed by self.merge()
3976 self.returncode = os.EX_OK
3980 args_set = self.args_set
3981 find_blockers = self.find_blockers
3982 logger = self.logger
3983 mtimedb = self.mtimedb
3984 pkg_count = self.pkg_count
3985 scheduler = self.scheduler
3986 settings = self.settings
3987 world_atom = self.world_atom
3988 ldpath_mtimes = mtimedb["ldpath"]
3990 action_desc = "Emerging"
3992 if pkg.type_name == "binary":
3993 action_desc += " binary"
3995 if build_opts.fetchonly:
3996 action_desc = "Fetching"
3998 msg = "%s (%s of %s) %s" % \
4000 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4001 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
4002 colorize("GOOD", pkg.cpv))
4004 portdb = pkg.root_config.trees["porttree"].dbapi
4005 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
4006 if portdir_repo_name:
4007 pkg_repo_name = pkg.metadata.get("repository")
4008 if pkg_repo_name != portdir_repo_name:
4009 if not pkg_repo_name:
4010 pkg_repo_name = "unknown repo"
4011 msg += " from %s" % pkg_repo_name
4014 msg += " %s %s" % (preposition, pkg.root)
4016 if not build_opts.pretend:
4017 self.statusMessage(msg)
4018 logger.log(" >>> emerge (%s of %s) %s to %s" % \
4019 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
4021 if pkg.type_name == "ebuild":
4023 build = EbuildBuild(args_set=args_set,
4024 background=self.background,
4025 config_pool=self.config_pool,
4026 find_blockers=find_blockers,
4027 ldpath_mtimes=ldpath_mtimes, logger=logger,
4028 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
4029 prefetcher=self.prefetcher, scheduler=scheduler,
4030 settings=settings, world_atom=world_atom)
4032 self._install_task = build
4033 self._start_task(build, self._default_final_exit)
4036 elif pkg.type_name == "binary":
4038 binpkg = Binpkg(background=self.background,
4039 find_blockers=find_blockers,
4040 ldpath_mtimes=ldpath_mtimes, logger=logger,
4041 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4042 prefetcher=self.prefetcher, settings=settings,
4043 scheduler=scheduler, world_atom=world_atom)
4045 self._install_task = binpkg
4046 self._start_task(binpkg, self._default_final_exit)
4050 self._install_task.poll()
4051 return self.returncode
4054 self._install_task.wait()
4055 return self.returncode
4060 build_opts = self.build_opts
4061 find_blockers = self.find_blockers
4062 logger = self.logger
4063 mtimedb = self.mtimedb
4064 pkg_count = self.pkg_count
4065 prefetcher = self.prefetcher
4066 scheduler = self.scheduler
4067 settings = self.settings
4068 world_atom = self.world_atom
4069 ldpath_mtimes = mtimedb["ldpath"]
4072 if not (build_opts.buildpkgonly or \
4073 build_opts.fetchonly or build_opts.pretend):
4075 uninstall = PackageUninstall(background=self.background,
4076 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4077 pkg=pkg, scheduler=scheduler, settings=settings)
4080 retval = uninstall.wait()
4081 if retval != os.EX_OK:
4085 if build_opts.fetchonly or \
4086 build_opts.buildpkgonly:
4087 return self.returncode
4089 retval = self._install_task.install()
4092 class PackageMerge(AsynchronousTask):
4094 TODO: Implement asynchronous merge so that the scheduler can
4095 run while a merge is executing.
4098 __slots__ = ("merge",)
4102 pkg = self.merge.pkg
4103 pkg_count = self.merge.pkg_count
4106 action_desc = "Uninstalling"
4107 preposition = "from"
4110 action_desc = "Installing"
4112 counter_str = "(%s of %s) " % \
4113 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4114 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4119 colorize("GOOD", pkg.cpv))
4122 msg += " %s %s" % (preposition, pkg.root)
4124 if not self.merge.build_opts.fetchonly and \
4125 not self.merge.build_opts.pretend and \
4126 not self.merge.build_opts.buildpkgonly:
4127 self.merge.statusMessage(msg)
4129 self.returncode = self.merge.merge()
4132 class DependencyArg(object):
4133 def __init__(self, arg=None, root_config=None):
4135 self.root_config = root_config
4138 return str(self.arg)
4140 class AtomArg(DependencyArg):
4141 def __init__(self, atom=None, **kwargs):
4142 DependencyArg.__init__(self, **kwargs)
4144 if not isinstance(self.atom, portage.dep.Atom):
4145 self.atom = portage.dep.Atom(self.atom)
4146 self.set = (self.atom, )
4148 class PackageArg(DependencyArg):
4149 def __init__(self, package=None, **kwargs):
4150 DependencyArg.__init__(self, **kwargs)
4151 self.package = package
4152 self.atom = portage.dep.Atom("=" + package.cpv)
4153 self.set = (self.atom, )
4155 class SetArg(DependencyArg):
4156 def __init__(self, set=None, **kwargs):
4157 DependencyArg.__init__(self, **kwargs)
4159 self.name = self.arg[len(SETPREFIX):]
4161 class Dependency(SlotObject):
4162 __slots__ = ("atom", "blocker", "depth",
4163 "parent", "onlydeps", "priority", "root")
4164 def __init__(self, **kwargs):
4165 SlotObject.__init__(self, **kwargs)
4166 if self.priority is None:
4167 self.priority = DepPriority()
4168 if self.depth is None:
4171 class BlockerCache(portage.cache.mappings.MutableMapping):
4172 """This caches blockers of installed packages so that dep_check does not
4173 have to be done for every single installed package on every invocation of
4174 emerge. The cache is invalidated whenever it is detected that something
4175 has changed that might alter the results of dep_check() calls:
4176 1) the set of installed packages (including COUNTER) has changed
4177 2) the old-style virtuals have changed
4180 # Number of uncached packages to trigger cache update, since
4181 # it's wasteful to update it for every vdb change.
4182 _cache_threshold = 5
4184 class BlockerData(object):
4186 __slots__ = ("__weakref__", "atoms", "counter")
4188 def __init__(self, counter, atoms):
4189 self.counter = counter
4192 def __init__(self, myroot, vardb):
4194 self._virtuals = vardb.settings.getvirtuals()
4195 self._cache_filename = os.path.join(myroot,
4196 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4197 self._cache_version = "1"
4198 self._cache_data = None
4199 self._modified = set()
4204 f = open(self._cache_filename, mode='rb')
4205 mypickle = pickle.Unpickler(f)
4207 mypickle.find_global = None
4208 except AttributeError:
4209 # TODO: If py3k, override Unpickler.find_class().
4211 self._cache_data = mypickle.load()
4214 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4215 if isinstance(e, pickle.UnpicklingError):
4216 writemsg("!!! Error loading '%s': %s\n" % \
4217 (self._cache_filename, str(e)), noiselevel=-1)
4220 cache_valid = self._cache_data and \
4221 isinstance(self._cache_data, dict) and \
4222 self._cache_data.get("version") == self._cache_version and \
4223 isinstance(self._cache_data.get("blockers"), dict)
4225 # Validate all the atoms and counters so that
4226 # corruption is detected as soon as possible.
4227 invalid_items = set()
4228 for k, v in self._cache_data["blockers"].iteritems():
4229 if not isinstance(k, basestring):
4230 invalid_items.add(k)
4233 if portage.catpkgsplit(k) is None:
4234 invalid_items.add(k)
4236 except portage.exception.InvalidData:
4237 invalid_items.add(k)
4239 if not isinstance(v, tuple) or \
4241 invalid_items.add(k)
4244 if not isinstance(counter, (int, long)):
4245 invalid_items.add(k)
4247 if not isinstance(atoms, (list, tuple)):
4248 invalid_items.add(k)
4250 invalid_atom = False
4252 if not isinstance(atom, basestring):
4255 if atom[:1] != "!" or \
4256 not portage.isvalidatom(
4257 atom, allow_blockers=True):
4261 invalid_items.add(k)
4264 for k in invalid_items:
4265 del self._cache_data["blockers"][k]
4266 if not self._cache_data["blockers"]:
4270 self._cache_data = {"version":self._cache_version}
4271 self._cache_data["blockers"] = {}
4272 self._cache_data["virtuals"] = self._virtuals
4273 self._modified.clear()
4276 """If the current user has permission and the internal blocker cache
4277 been updated, save it to disk and mark it unmodified. This is called
4278 by emerge after it has proccessed blockers for all installed packages.
4279 Currently, the cache is only written if the user has superuser
4280 privileges (since that's required to obtain a lock), but all users
4281 have read access and benefit from faster blocker lookups (as long as
4282 the entire cache is still valid). The cache is stored as a pickled
4283 dict object with the following format:
4287 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4288 "virtuals" : vardb.settings.getvirtuals()
4291 if len(self._modified) >= self._cache_threshold and \
4294 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4295 pickle.dump(self._cache_data, f, protocol=2)
4297 portage.util.apply_secpass_permissions(
4298 self._cache_filename, gid=portage.portage_gid, mode=0644)
4299 except (IOError, OSError), e:
4301 self._modified.clear()
4303 def __setitem__(self, cpv, blocker_data):
4305 Update the cache and mark it as modified for a future call to
4308 @param cpv: Package for which to cache blockers.
4310 @param blocker_data: An object with counter and atoms attributes.
4311 @type blocker_data: BlockerData
4313 self._cache_data["blockers"][cpv] = \
4314 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4315 self._modified.add(cpv)
4318 if self._cache_data is None:
4319 # triggered by python-trace
4321 return iter(self._cache_data["blockers"])
4323 def __delitem__(self, cpv):
4324 del self._cache_data["blockers"][cpv]
4326 def __getitem__(self, cpv):
4329 @returns: An object with counter and atoms attributes.
4331 return self.BlockerData(*self._cache_data["blockers"][cpv])
4333 class BlockerDB(object):
4335 def __init__(self, root_config):
4336 self._root_config = root_config
4337 self._vartree = root_config.trees["vartree"]
4338 self._portdb = root_config.trees["porttree"].dbapi
4340 self._dep_check_trees = None
4341 self._fake_vartree = None
4343 def _get_fake_vartree(self, acquire_lock=0):
4344 fake_vartree = self._fake_vartree
4345 if fake_vartree is None:
4346 fake_vartree = FakeVartree(self._root_config,
4347 acquire_lock=acquire_lock)
4348 self._fake_vartree = fake_vartree
4349 self._dep_check_trees = { self._vartree.root : {
4350 "porttree" : fake_vartree,
4351 "vartree" : fake_vartree,
4354 fake_vartree.sync(acquire_lock=acquire_lock)
4357 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4358 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4359 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4360 settings = self._vartree.settings
4361 stale_cache = set(blocker_cache)
4362 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4363 dep_check_trees = self._dep_check_trees
4364 vardb = fake_vartree.dbapi
4365 installed_pkgs = list(vardb)
4367 for inst_pkg in installed_pkgs:
4368 stale_cache.discard(inst_pkg.cpv)
4369 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4370 if cached_blockers is not None and \
4371 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4372 cached_blockers = None
4373 if cached_blockers is not None:
4374 blocker_atoms = cached_blockers.atoms
4376 # Use aux_get() to trigger FakeVartree global
4377 # updates on *DEPEND when appropriate.
4378 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4380 portage.dep._dep_check_strict = False
4381 success, atoms = portage.dep_check(depstr,
4382 vardb, settings, myuse=inst_pkg.use.enabled,
4383 trees=dep_check_trees, myroot=inst_pkg.root)
4385 portage.dep._dep_check_strict = True
4387 pkg_location = os.path.join(inst_pkg.root,
4388 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4389 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4390 (pkg_location, atoms), noiselevel=-1)
4393 blocker_atoms = [atom for atom in atoms \
4394 if atom.startswith("!")]
4395 blocker_atoms.sort()
4396 counter = long(inst_pkg.metadata["COUNTER"])
4397 blocker_cache[inst_pkg.cpv] = \
4398 blocker_cache.BlockerData(counter, blocker_atoms)
4399 for cpv in stale_cache:
4400 del blocker_cache[cpv]
4401 blocker_cache.flush()
4403 blocker_parents = digraph()
4405 for pkg in installed_pkgs:
4406 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4407 blocker_atom = blocker_atom.lstrip("!")
4408 blocker_atoms.append(blocker_atom)
4409 blocker_parents.add(blocker_atom, pkg)
4411 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4412 blocking_pkgs = set()
4413 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4414 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4416 # Check for blockers in the other direction.
4417 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4419 portage.dep._dep_check_strict = False
4420 success, atoms = portage.dep_check(depstr,
4421 vardb, settings, myuse=new_pkg.use.enabled,
4422 trees=dep_check_trees, myroot=new_pkg.root)
4424 portage.dep._dep_check_strict = True
4426 # We should never get this far with invalid deps.
4427 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4430 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4433 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4434 for inst_pkg in installed_pkgs:
4436 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4437 except (portage.exception.InvalidDependString, StopIteration):
4439 blocking_pkgs.add(inst_pkg)
4441 return blocking_pkgs
4443 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4445 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4446 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4447 p_type, p_root, p_key, p_status = parent_node
4449 if p_status == "nomerge":
4450 category, pf = portage.catsplit(p_key)
4451 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4452 msg.append("Portage is unable to process the dependencies of the ")
4453 msg.append("'%s' package. " % p_key)
4454 msg.append("In order to correct this problem, the package ")
4455 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4456 msg.append("As a temporary workaround, the --nodeps option can ")
4457 msg.append("be used to ignore all dependencies. For reference, ")
4458 msg.append("the problematic dependencies can be found in the ")
4459 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4461 msg.append("This package can not be installed. ")
4462 msg.append("Please notify the '%s' package maintainer " % p_key)
4463 msg.append("about this problem.")
4465 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4466 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4468 class PackageVirtualDbapi(portage.dbapi):
4470 A dbapi-like interface class that represents the state of the installed
4471 package database as new packages are installed, replacing any packages
4472 that previously existed in the same slot. The main difference between
4473 this class and fakedbapi is that this one uses Package instances
4474 internally (passed in via cpv_inject() and cpv_remove() calls).
4476 def __init__(self, settings):
4477 portage.dbapi.__init__(self)
4478 self.settings = settings
4479 self._match_cache = {}
4485 Remove all packages.
4489 self._cp_map.clear()
4490 self._cpv_map.clear()
4493 obj = PackageVirtualDbapi(self.settings)
4494 obj._match_cache = self._match_cache.copy()
4495 obj._cp_map = self._cp_map.copy()
4496 for k, v in obj._cp_map.iteritems():
4497 obj._cp_map[k] = v[:]
4498 obj._cpv_map = self._cpv_map.copy()
4502 return self._cpv_map.itervalues()
4504 def __contains__(self, item):
4505 existing = self._cpv_map.get(item.cpv)
4506 if existing is not None and \
4511 def get(self, item, default=None):
4512 cpv = getattr(item, "cpv", None)
4516 type_name, root, cpv, operation = item
4518 existing = self._cpv_map.get(cpv)
4519 if existing is not None and \
4524 def match_pkgs(self, atom):
4525 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4527 def _clear_cache(self):
4528 if self._categories is not None:
4529 self._categories = None
4530 if self._match_cache:
4531 self._match_cache = {}
4533 def match(self, origdep, use_cache=1):
4534 result = self._match_cache.get(origdep)
4535 if result is not None:
4537 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4538 self._match_cache[origdep] = result
4541 def cpv_exists(self, cpv):
4542 return cpv in self._cpv_map
4544 def cp_list(self, mycp, use_cache=1):
4545 cachelist = self._match_cache.get(mycp)
4546 # cp_list() doesn't expand old-style virtuals
4547 if cachelist and cachelist[0].startswith(mycp):
4549 cpv_list = self._cp_map.get(mycp)
4550 if cpv_list is None:
4553 cpv_list = [pkg.cpv for pkg in cpv_list]
4554 self._cpv_sort_ascending(cpv_list)
4555 if not (not cpv_list and mycp.startswith("virtual/")):
4556 self._match_cache[mycp] = cpv_list
4560 return list(self._cp_map)
4563 return list(self._cpv_map)
4565 def cpv_inject(self, pkg):
4566 cp_list = self._cp_map.get(pkg.cp)
4569 self._cp_map[pkg.cp] = cp_list
4570 e_pkg = self._cpv_map.get(pkg.cpv)
4571 if e_pkg is not None:
4574 self.cpv_remove(e_pkg)
4575 for e_pkg in cp_list:
4576 if e_pkg.slot_atom == pkg.slot_atom:
4579 self.cpv_remove(e_pkg)
4582 self._cpv_map[pkg.cpv] = pkg
4585 def cpv_remove(self, pkg):
4586 old_pkg = self._cpv_map.get(pkg.cpv)
4589 self._cp_map[pkg.cp].remove(pkg)
4590 del self._cpv_map[pkg.cpv]
4593 def aux_get(self, cpv, wants):
4594 metadata = self._cpv_map[cpv].metadata
4595 return [metadata.get(x, "") for x in wants]
4597 def aux_update(self, cpv, values):
4598 self._cpv_map[cpv].metadata.update(values)
4601 class depgraph(object):
4603 pkg_tree_map = RootConfig.pkg_tree_map
4605 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4607 def __init__(self, settings, trees, myopts, myparams, spinner):
4608 self.settings = settings
4609 self.target_root = settings["ROOT"]
4610 self.myopts = myopts
4611 self.myparams = myparams
4613 if settings.get("PORTAGE_DEBUG", "") == "1":
4615 self.spinner = spinner
4616 self._running_root = trees["/"]["root_config"]
4617 self._opts_no_restart = Scheduler._opts_no_restart
4618 self.pkgsettings = {}
4619 # Maps slot atom to package for each Package added to the graph.
4620 self._slot_pkg_map = {}
4621 # Maps nodes to the reasons they were selected for reinstallation.
4622 self._reinstall_nodes = {}
4625 self._trees_orig = trees
4627 # Contains a filtered view of preferred packages that are selected
4628 # from available repositories.
4629 self._filtered_trees = {}
4630 # Contains installed packages and new packages that have been added
4632 self._graph_trees = {}
4633 # All Package instances
4634 self._pkg_cache = {}
4635 for myroot in trees:
4636 self.trees[myroot] = {}
4637 # Create a RootConfig instance that references
4638 # the FakeVartree instead of the real one.
4639 self.roots[myroot] = RootConfig(
4640 trees[myroot]["vartree"].settings,
4642 trees[myroot]["root_config"].setconfig)
4643 for tree in ("porttree", "bintree"):
4644 self.trees[myroot][tree] = trees[myroot][tree]
4645 self.trees[myroot]["vartree"] = \
4646 FakeVartree(trees[myroot]["root_config"],
4647 pkg_cache=self._pkg_cache)
4648 self.pkgsettings[myroot] = portage.config(
4649 clone=self.trees[myroot]["vartree"].settings)
4650 self._slot_pkg_map[myroot] = {}
4651 vardb = self.trees[myroot]["vartree"].dbapi
4652 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4653 "--buildpkgonly" not in self.myopts
4654 # This fakedbapi instance will model the state that the vdb will
4655 # have after new packages have been installed.
4656 fakedb = PackageVirtualDbapi(vardb.settings)
4657 if preload_installed_pkgs:
4659 self.spinner.update()
4660 # This triggers metadata updates via FakeVartree.
4661 vardb.aux_get(pkg.cpv, [])
4662 fakedb.cpv_inject(pkg)
4664 # Now that the vardb state is cached in our FakeVartree,
4665 # we won't be needing the real vartree cache for awhile.
4666 # To make some room on the heap, clear the vardbapi
4668 trees[myroot]["vartree"].dbapi._clear_cache()
4671 self.mydbapi[myroot] = fakedb
4674 graph_tree.dbapi = fakedb
4675 self._graph_trees[myroot] = {}
4676 self._filtered_trees[myroot] = {}
4677 # Substitute the graph tree for the vartree in dep_check() since we
4678 # want atom selections to be consistent with package selections
4679 # have already been made.
4680 self._graph_trees[myroot]["porttree"] = graph_tree
4681 self._graph_trees[myroot]["vartree"] = graph_tree
4682 def filtered_tree():
4684 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4685 self._filtered_trees[myroot]["porttree"] = filtered_tree
4687 # Passing in graph_tree as the vartree here could lead to better
4688 # atom selections in some cases by causing atoms for packages that
4689 # have been added to the graph to be preferred over other choices.
4690 # However, it can trigger atom selections that result in
4691 # unresolvable direct circular dependencies. For example, this
4692 # happens with gwydion-dylan which depends on either itself or
4693 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4694 # gwydion-dylan-bin needs to be selected in order to avoid a
4695 # an unresolvable direct circular dependency.
4697 # To solve the problem described above, pass in "graph_db" so that
4698 # packages that have been added to the graph are distinguishable
4699 # from other available packages and installed packages. Also, pass
4700 # the parent package into self._select_atoms() calls so that
4701 # unresolvable direct circular dependencies can be detected and
4702 # avoided when possible.
4703 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4704 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4707 portdb = self.trees[myroot]["porttree"].dbapi
4708 bindb = self.trees[myroot]["bintree"].dbapi
4709 vardb = self.trees[myroot]["vartree"].dbapi
4710 # (db, pkg_type, built, installed, db_keys)
4711 if "--usepkgonly" not in self.myopts:
4712 db_keys = list(portdb._aux_cache_keys)
4713 dbs.append((portdb, "ebuild", False, False, db_keys))
4714 if "--usepkg" in self.myopts:
4715 db_keys = list(bindb._aux_cache_keys)
4716 dbs.append((bindb, "binary", True, False, db_keys))
4717 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4718 dbs.append((vardb, "installed", True, True, db_keys))
4719 self._filtered_trees[myroot]["dbs"] = dbs
4720 if "--usepkg" in self.myopts:
4721 self.trees[myroot]["bintree"].populate(
4722 "--getbinpkg" in self.myopts,
4723 "--getbinpkgonly" in self.myopts)
4726 self.digraph=portage.digraph()
4727 # contains all sets added to the graph
4729 # contains atoms given as arguments
4730 self._sets["args"] = InternalPackageSet()
4731 # contains all atoms from all sets added to the graph, including
4732 # atoms given as arguments
4733 self._set_atoms = InternalPackageSet()
4734 self._atom_arg_map = {}
4735 # contains all nodes pulled in by self._set_atoms
4736 self._set_nodes = set()
4737 # Contains only Blocker -> Uninstall edges
4738 self._blocker_uninstalls = digraph()
4739 # Contains only Package -> Blocker edges
4740 self._blocker_parents = digraph()
4741 # Contains only irrelevant Package -> Blocker edges
4742 self._irrelevant_blockers = digraph()
4743 # Contains only unsolvable Package -> Blocker edges
4744 self._unsolvable_blockers = digraph()
4745 # Contains all Blocker -> Blocked Package edges
4746 self._blocked_pkgs = digraph()
4747 # Contains world packages that have been protected from
4748 # uninstallation but may not have been added to the graph
4749 # if the graph is not complete yet.
4750 self._blocked_world_pkgs = {}
4751 self._slot_collision_info = {}
4752 # Slot collision nodes are not allowed to block other packages since
4753 # blocker validation is only able to account for one package per slot.
4754 self._slot_collision_nodes = set()
4755 self._parent_atoms = {}
4756 self._slot_conflict_parent_atoms = set()
4757 self._serialized_tasks_cache = None
4758 self._scheduler_graph = None
4759 self._displayed_list = None
4760 self._pprovided_args = []
4761 self._missing_args = []
4762 self._masked_installed = set()
4763 self._unsatisfied_deps_for_display = []
4764 self._unsatisfied_blockers_for_display = None
4765 self._circular_deps_for_display = None
4766 self._dep_stack = []
4767 self._unsatisfied_deps = []
4768 self._initially_unsatisfied_deps = []
4769 self._ignored_deps = []
4770 self._required_set_names = set(["system", "world"])
4771 self._select_atoms = self._select_atoms_highest_available
4772 self._select_package = self._select_pkg_highest_available
4773 self._highest_pkg_cache = {}
4775 def _show_slot_collision_notice(self):
4776 """Show an informational message advising the user to mask one of the
4777 the packages. In some cases it may be possible to resolve this
4778 automatically, but support for backtracking (removal nodes that have
4779 already been selected) will be required in order to handle all possible
4783 if not self._slot_collision_info:
4786 self._show_merge_list()
4789 msg.append("\n!!! Multiple package instances within a single " + \
4790 "package slot have been pulled\n")
4791 msg.append("!!! into the dependency graph, resulting" + \
4792 " in a slot conflict:\n\n")
4794 # Max number of parents shown, to avoid flooding the display.
4796 explanation_columns = 70
4798 for (slot_atom, root), slot_nodes \
4799 in self._slot_collision_info.iteritems():
4800 msg.append(str(slot_atom))
4803 for node in slot_nodes:
4805 msg.append(str(node))
4806 parent_atoms = self._parent_atoms.get(node)
4809 # Prefer conflict atoms over others.
4810 for parent_atom in parent_atoms:
4811 if len(pruned_list) >= max_parents:
4813 if parent_atom in self._slot_conflict_parent_atoms:
4814 pruned_list.add(parent_atom)
4816 # If this package was pulled in by conflict atoms then
4817 # show those alone since those are the most interesting.
4819 # When generating the pruned list, prefer instances
4820 # of DependencyArg over instances of Package.
4821 for parent_atom in parent_atoms:
4822 if len(pruned_list) >= max_parents:
4824 parent, atom = parent_atom
4825 if isinstance(parent, DependencyArg):
4826 pruned_list.add(parent_atom)
4827 # Prefer Packages instances that themselves have been
4828 # pulled into collision slots.
4829 for parent_atom in parent_atoms:
4830 if len(pruned_list) >= max_parents:
4832 parent, atom = parent_atom
4833 if isinstance(parent, Package) and \
4834 (parent.slot_atom, parent.root) \
4835 in self._slot_collision_info:
4836 pruned_list.add(parent_atom)
4837 for parent_atom in parent_atoms:
4838 if len(pruned_list) >= max_parents:
4840 pruned_list.add(parent_atom)
4841 omitted_parents = len(parent_atoms) - len(pruned_list)
4842 parent_atoms = pruned_list
4843 msg.append(" pulled in by\n")
4844 for parent_atom in parent_atoms:
4845 parent, atom = parent_atom
4846 msg.append(2*indent)
4847 if isinstance(parent,
4848 (PackageArg, AtomArg)):
4849 # For PackageArg and AtomArg types, it's
4850 # redundant to display the atom attribute.
4851 msg.append(str(parent))
4853 # Display the specific atom from SetArg or
4855 msg.append("%s required by %s" % (atom, parent))
4858 msg.append(2*indent)
4859 msg.append("(and %d more)\n" % omitted_parents)
4861 msg.append(" (no parents)\n")
4863 explanation = self._slot_conflict_explanation(slot_nodes)
4866 msg.append(indent + "Explanation:\n\n")
4867 for line in textwrap.wrap(explanation, explanation_columns):
4868 msg.append(2*indent + line + "\n")
4871 sys.stderr.write("".join(msg))
4874 explanations_for_all = explanations == len(self._slot_collision_info)
4876 if explanations_for_all or "--quiet" in self.myopts:
4880 msg.append("It may be possible to solve this problem ")
4881 msg.append("by using package.mask to prevent one of ")
4882 msg.append("those packages from being selected. ")
4883 msg.append("However, it is also possible that conflicting ")
4884 msg.append("dependencies exist such that they are impossible to ")
4885 msg.append("satisfy simultaneously. If such a conflict exists in ")
4886 msg.append("the dependencies of two different packages, then those ")
4887 msg.append("packages can not be installed simultaneously.")
4889 from formatter import AbstractFormatter, DumbWriter
4890 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4892 f.add_flowing_data(x)
4896 msg.append("For more information, see MASKED PACKAGES ")
4897 msg.append("section in the emerge man page or refer ")
4898 msg.append("to the Gentoo Handbook.")
4900 f.add_flowing_data(x)
4904 def _slot_conflict_explanation(self, slot_nodes):
4906 When a slot conflict occurs due to USE deps, there are a few
4907 different cases to consider:
4909 1) New USE are correctly set but --newuse wasn't requested so an
4910 installed package with incorrect USE happened to get pulled
4911 into graph before the new one.
4913 2) New USE are incorrectly set but an installed package has correct
4914 USE so it got pulled into the graph, and a new instance also got
4915 pulled in due to --newuse or an upgrade.
4917 3) Multiple USE deps exist that can't be satisfied simultaneously,
4918 and multiple package instances got pulled into the same slot to
4919 satisfy the conflicting deps.
4921 Currently, explanations and suggested courses of action are generated
4922 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4925 if len(slot_nodes) != 2:
4926 # Suggestions are only implemented for
4927 # conflicts between two packages.
4930 all_conflict_atoms = self._slot_conflict_parent_atoms
4932 matched_atoms = None
4933 unmatched_node = None
4934 for node in slot_nodes:
4935 parent_atoms = self._parent_atoms.get(node)
4936 if not parent_atoms:
4937 # Normally, there are always parent atoms. If there are
4938 # none then something unexpected is happening and there's
4939 # currently no suggestion for this case.
4941 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4942 for parent_atom in conflict_atoms:
4943 parent, atom = parent_atom
4945 # Suggestions are currently only implemented for cases
4946 # in which all conflict atoms have USE deps.
4949 if matched_node is not None:
4950 # If conflict atoms match multiple nodes
4951 # then there's no suggestion.
4954 matched_atoms = conflict_atoms
4956 if unmatched_node is not None:
4957 # Neither node is matched by conflict atoms, and
4958 # there is no suggestion for this case.
4960 unmatched_node = node
4962 if matched_node is None or unmatched_node is None:
4963 # This shouldn't happen.
4966 if unmatched_node.installed and not matched_node.installed and \
4967 unmatched_node.cpv == matched_node.cpv:
4968 # If the conflicting packages are the same version then
4969 # --newuse should be all that's needed. If they are different
4970 # versions then there's some other problem.
4971 return "New USE are correctly set, but --newuse wasn't" + \
4972 " requested, so an installed package with incorrect USE " + \
4973 "happened to get pulled into the dependency graph. " + \
4974 "In order to solve " + \
4975 "this, either specify the --newuse option or explicitly " + \
4976 " reinstall '%s'." % matched_node.slot_atom
4978 if matched_node.installed and not unmatched_node.installed:
4979 atoms = sorted(set(atom for parent, atom in matched_atoms))
4980 explanation = ("New USE for '%s' are incorrectly set. " + \
4981 "In order to solve this, adjust USE to satisfy '%s'") % \
4982 (matched_node.slot_atom, atoms[0])
4984 for atom in atoms[1:-1]:
4985 explanation += ", '%s'" % (atom,)
4988 explanation += " and '%s'" % (atoms[-1],)
4994 def _process_slot_conflicts(self):
4996 Process slot conflict data to identify specific atoms which
4997 lead to conflict. These atoms only match a subset of the
4998 packages that have been pulled into a given slot.
5000 for (slot_atom, root), slot_nodes \
5001 in self._slot_collision_info.iteritems():
5003 all_parent_atoms = set()
5004 for pkg in slot_nodes:
5005 parent_atoms = self._parent_atoms.get(pkg)
5006 if not parent_atoms:
5008 all_parent_atoms.update(parent_atoms)
5010 for pkg in slot_nodes:
5011 parent_atoms = self._parent_atoms.get(pkg)
5012 if parent_atoms is None:
5013 parent_atoms = set()
5014 self._parent_atoms[pkg] = parent_atoms
5015 for parent_atom in all_parent_atoms:
5016 if parent_atom in parent_atoms:
5018 # Use package set for matching since it will match via
5019 # PROVIDE when necessary, while match_from_list does not.
5020 parent, atom = parent_atom
5021 atom_set = InternalPackageSet(
5022 initial_atoms=(atom,))
5023 if atom_set.findAtomForPackage(pkg):
5024 parent_atoms.add(parent_atom)
5026 self._slot_conflict_parent_atoms.add(parent_atom)
5028 def _reinstall_for_flags(self, forced_flags,
5029 orig_use, orig_iuse, cur_use, cur_iuse):
5030 """Return a set of flags that trigger reinstallation, or None if there
5031 are no such flags."""
5032 if "--newuse" in self.myopts:
5033 flags = set(orig_iuse.symmetric_difference(
5034 cur_iuse).difference(forced_flags))
5035 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5036 cur_iuse.intersection(cur_use)))
5039 elif "changed-use" == self.myopts.get("--reinstall"):
5040 flags = orig_iuse.intersection(orig_use).symmetric_difference(
5041 cur_iuse.intersection(cur_use))
5046 def _create_graph(self, allow_unsatisfied=False):
5047 dep_stack = self._dep_stack
5049 self.spinner.update()
5050 dep = dep_stack.pop()
5051 if isinstance(dep, Package):
5052 if not self._add_pkg_deps(dep,
5053 allow_unsatisfied=allow_unsatisfied):
5056 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5060 def _add_dep(self, dep, allow_unsatisfied=False):
5061 debug = "--debug" in self.myopts
5062 buildpkgonly = "--buildpkgonly" in self.myopts
5063 nodeps = "--nodeps" in self.myopts
5064 empty = "empty" in self.myparams
5065 deep = "deep" in self.myparams
5066 update = "--update" in self.myopts and dep.depth <= 1
5068 if not buildpkgonly and \
5070 dep.parent not in self._slot_collision_nodes:
5071 if dep.parent.onlydeps:
5072 # It's safe to ignore blockers if the
5073 # parent is an --onlydeps node.
5075 # The blocker applies to the root where
5076 # the parent is or will be installed.
5077 blocker = Blocker(atom=dep.atom,
5078 eapi=dep.parent.metadata["EAPI"],
5079 root=dep.parent.root)
5080 self._blocker_parents.add(blocker, dep.parent)
5082 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5083 onlydeps=dep.onlydeps)
5085 if dep.priority.optional:
5086 # This could be an unecessary build-time dep
5087 # pulled in by --with-bdeps=y.
5089 if allow_unsatisfied:
5090 self._unsatisfied_deps.append(dep)
5092 self._unsatisfied_deps_for_display.append(
5093 ((dep.root, dep.atom), {"myparent":dep.parent}))
5095 # In some cases, dep_check will return deps that shouldn't
5096 # be proccessed any further, so they are identified and
5097 # discarded here. Try to discard as few as possible since
5098 # discarded dependencies reduce the amount of information
5099 # available for optimization of merge order.
5100 if dep.priority.satisfied and \
5101 not dep_pkg.installed and \
5102 not (existing_node or empty or deep or update):
5104 if dep.root == self.target_root:
5106 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5107 except StopIteration:
5109 except portage.exception.InvalidDependString:
5110 if not dep_pkg.installed:
5111 # This shouldn't happen since the package
5112 # should have been masked.
5115 self._ignored_deps.append(dep)
5118 if not self._add_pkg(dep_pkg, dep):
5122 def _add_pkg(self, pkg, dep):
5129 myparent = dep.parent
5130 priority = dep.priority
5132 if priority is None:
5133 priority = DepPriority()
5135 Fills the digraph with nodes comprised of packages to merge.
5136 mybigkey is the package spec of the package to merge.
5137 myparent is the package depending on mybigkey ( or None )
5138 addme = Should we add this package to the digraph or are we just looking at it's deps?
5139 Think --onlydeps, we need to ignore packages in that case.
5142 #IUSE-aware emerge -> USE DEP aware depgraph
5143 #"no downgrade" emerge
5145 # Ensure that the dependencies of the same package
5146 # are never processed more than once.
5147 previously_added = pkg in self.digraph
5149 # select the correct /var database that we'll be checking against
5150 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5151 pkgsettings = self.pkgsettings[pkg.root]
5156 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5157 except portage.exception.InvalidDependString, e:
5158 if not pkg.installed:
5159 show_invalid_depstring_notice(
5160 pkg, pkg.metadata["PROVIDE"], str(e))
5164 if not pkg.onlydeps:
5165 if not pkg.installed and \
5166 "empty" not in self.myparams and \
5167 vardbapi.match(pkg.slot_atom):
5168 # Increase the priority of dependencies on packages that
5169 # are being rebuilt. This optimizes merge order so that
5170 # dependencies are rebuilt/updated as soon as possible,
5171 # which is needed especially when emerge is called by
5172 # revdep-rebuild since dependencies may be affected by ABI
5173 # breakage that has rendered them useless. Don't adjust
5174 # priority here when in "empty" mode since all packages
5175 # are being merged in that case.
5176 priority.rebuild = True
5178 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5179 slot_collision = False
5181 existing_node_matches = pkg.cpv == existing_node.cpv
5182 if existing_node_matches and \
5183 pkg != existing_node and \
5184 dep.atom is not None:
5185 # Use package set for matching since it will match via
5186 # PROVIDE when necessary, while match_from_list does not.
5187 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5188 if not atom_set.findAtomForPackage(existing_node):
5189 existing_node_matches = False
5190 if existing_node_matches:
5191 # The existing node can be reused.
5193 for parent_atom in arg_atoms:
5194 parent, atom = parent_atom
5195 self.digraph.add(existing_node, parent,
5197 self._add_parent_atom(existing_node, parent_atom)
5198 # If a direct circular dependency is not an unsatisfied
5199 # buildtime dependency then drop it here since otherwise
5200 # it can skew the merge order calculation in an unwanted
5202 if existing_node != myparent or \
5203 (priority.buildtime and not priority.satisfied):
5204 self.digraph.addnode(existing_node, myparent,
5206 if dep.atom is not None and dep.parent is not None:
5207 self._add_parent_atom(existing_node,
5208 (dep.parent, dep.atom))
5212 # A slot collision has occurred. Sometimes this coincides
5213 # with unresolvable blockers, so the slot collision will be
5214 # shown later if there are no unresolvable blockers.
5215 self._add_slot_conflict(pkg)
5216 slot_collision = True
5219 # Now add this node to the graph so that self.display()
5220 # can show use flags and --tree portage.output. This node is
5221 # only being partially added to the graph. It must not be
5222 # allowed to interfere with the other nodes that have been
5223 # added. Do not overwrite data for existing nodes in
5224 # self.mydbapi since that data will be used for blocker
5226 # Even though the graph is now invalid, continue to process
5227 # dependencies so that things like --fetchonly can still
5228 # function despite collisions.
5230 elif not previously_added:
5231 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5232 self.mydbapi[pkg.root].cpv_inject(pkg)
5233 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5235 if not pkg.installed:
5236 # Allow this package to satisfy old-style virtuals in case it
5237 # doesn't already. Any pre-existing providers will be preferred
5240 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5241 # For consistency, also update the global virtuals.
5242 settings = self.roots[pkg.root].settings
5244 settings.setinst(pkg.cpv, pkg.metadata)
5246 except portage.exception.InvalidDependString, e:
5247 show_invalid_depstring_notice(
5248 pkg, pkg.metadata["PROVIDE"], str(e))
5253 self._set_nodes.add(pkg)
5255 # Do this even when addme is False (--onlydeps) so that the
5256 # parent/child relationship is always known in case
5257 # self._show_slot_collision_notice() needs to be called later.
5258 self.digraph.add(pkg, myparent, priority=priority)
5259 if dep.atom is not None and dep.parent is not None:
5260 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5263 for parent_atom in arg_atoms:
5264 parent, atom = parent_atom
5265 self.digraph.add(pkg, parent, priority=priority)
5266 self._add_parent_atom(pkg, parent_atom)
5268 """ This section determines whether we go deeper into dependencies or not.
5269 We want to go deeper on a few occasions:
5270 Installing package A, we need to make sure package A's deps are met.
5271 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5272 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5274 dep_stack = self._dep_stack
5275 if "recurse" not in self.myparams:
5277 elif pkg.installed and \
5278 "deep" not in self.myparams:
5279 dep_stack = self._ignored_deps
5281 self.spinner.update()
5286 if not previously_added:
5287 dep_stack.append(pkg)
5290 def _add_parent_atom(self, pkg, parent_atom):
5291 parent_atoms = self._parent_atoms.get(pkg)
5292 if parent_atoms is None:
5293 parent_atoms = set()
5294 self._parent_atoms[pkg] = parent_atoms
5295 parent_atoms.add(parent_atom)
5297 def _add_slot_conflict(self, pkg):
5298 self._slot_collision_nodes.add(pkg)
5299 slot_key = (pkg.slot_atom, pkg.root)
5300 slot_nodes = self._slot_collision_info.get(slot_key)
5301 if slot_nodes is None:
5303 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5304 self._slot_collision_info[slot_key] = slot_nodes
5307 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5309 mytype = pkg.type_name
5312 metadata = pkg.metadata
5313 myuse = pkg.use.enabled
5315 depth = pkg.depth + 1
5316 removal_action = "remove" in self.myparams
5319 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5321 edepend[k] = metadata[k]
5323 if not pkg.built and \
5324 "--buildpkgonly" in self.myopts and \
5325 "deep" not in self.myparams and \
5326 "empty" not in self.myparams:
5327 edepend["RDEPEND"] = ""
5328 edepend["PDEPEND"] = ""
5329 bdeps_optional = False
5331 if pkg.built and not removal_action:
5332 if self.myopts.get("--with-bdeps", "n") == "y":
5333 # Pull in build time deps as requested, but marked them as
5334 # "optional" since they are not strictly required. This allows
5335 # more freedom in the merge order calculation for solving
5336 # circular dependencies. Don't convert to PDEPEND since that
5337 # could make --with-bdeps=y less effective if it is used to
5338 # adjust merge order to prevent built_with_use() calls from
5340 bdeps_optional = True
5342 # built packages do not have build time dependencies.
5343 edepend["DEPEND"] = ""
5345 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5346 edepend["DEPEND"] = ""
5349 root_deps = self.myopts.get("--root-deps")
5350 if root_deps is not None:
5351 if root_deps is True:
5353 elif root_deps == "rdeps":
5354 edepend["DEPEND"] = ""
5357 (bdeps_root, edepend["DEPEND"],
5358 self._priority(buildtime=(not bdeps_optional),
5359 optional=bdeps_optional)),
5360 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5361 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5364 debug = "--debug" in self.myopts
5365 strict = mytype != "installed"
5367 for dep_root, dep_string, dep_priority in deps:
5372 print "Parent: ", jbigkey
5373 print "Depstring:", dep_string
5374 print "Priority:", dep_priority
5375 vardb = self.roots[dep_root].trees["vartree"].dbapi
5377 selected_atoms = self._select_atoms(dep_root,
5378 dep_string, myuse=myuse, parent=pkg, strict=strict,
5379 priority=dep_priority)
5380 except portage.exception.InvalidDependString, e:
5381 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5384 print "Candidates:", selected_atoms
5386 for atom in selected_atoms:
5389 atom = portage.dep.Atom(atom)
5391 mypriority = dep_priority.copy()
5392 if not atom.blocker and vardb.match(atom):
5393 mypriority.satisfied = True
5395 if not self._add_dep(Dependency(atom=atom,
5396 blocker=atom.blocker, depth=depth, parent=pkg,
5397 priority=mypriority, root=dep_root),
5398 allow_unsatisfied=allow_unsatisfied):
5401 except portage.exception.InvalidAtom, e:
5402 show_invalid_depstring_notice(
5403 pkg, dep_string, str(e))
5405 if not pkg.installed:
5409 print "Exiting...", jbigkey
5410 except portage.exception.AmbiguousPackageName, e:
5412 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5413 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5415 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5416 portage.writemsg("\n", noiselevel=-1)
5417 if mytype == "binary":
5419 "!!! This binary package cannot be installed: '%s'\n" % \
5420 mykey, noiselevel=-1)
5421 elif mytype == "ebuild":
5422 portdb = self.roots[myroot].trees["porttree"].dbapi
5423 myebuild, mylocation = portdb.findname2(mykey)
5424 portage.writemsg("!!! This ebuild cannot be installed: " + \
5425 "'%s'\n" % myebuild, noiselevel=-1)
5426 portage.writemsg("!!! Please notify the package maintainer " + \
5427 "that atoms must be fully-qualified.\n", noiselevel=-1)
5431 def _priority(self, **kwargs):
5432 if "remove" in self.myparams:
5433 priority_constructor = UnmergeDepPriority
5435 priority_constructor = DepPriority
5436 return priority_constructor(**kwargs)
5438 def _dep_expand(self, root_config, atom_without_category):
5440 @param root_config: a root config instance
5441 @type root_config: RootConfig
5442 @param atom_without_category: an atom without a category component
5443 @type atom_without_category: String
5445 @returns: a list of atoms containing categories (possibly empty)
5447 null_cp = portage.dep_getkey(insert_category_into_atom(
5448 atom_without_category, "null"))
5449 cat, atom_pn = portage.catsplit(null_cp)
5451 dbs = self._filtered_trees[root_config.root]["dbs"]
5453 for db, pkg_type, built, installed, db_keys in dbs:
5454 for cat in db.categories:
5455 if db.cp_list("%s/%s" % (cat, atom_pn)):
5459 for cat in categories:
5460 deps.append(insert_category_into_atom(
5461 atom_without_category, cat))
5464 def _have_new_virt(self, root, atom_cp):
5466 for db, pkg_type, built, installed, db_keys in \
5467 self._filtered_trees[root]["dbs"]:
5468 if db.cp_list(atom_cp):
5473 def _iter_atoms_for_pkg(self, pkg):
5474 # TODO: add multiple $ROOT support
5475 if pkg.root != self.target_root:
5477 atom_arg_map = self._atom_arg_map
5478 root_config = self.roots[pkg.root]
5479 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5480 atom_cp = portage.dep_getkey(atom)
5481 if atom_cp != pkg.cp and \
5482 self._have_new_virt(pkg.root, atom_cp):
5484 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5485 visible_pkgs.reverse() # descending order
5487 for visible_pkg in visible_pkgs:
5488 if visible_pkg.cp != atom_cp:
5490 if pkg >= visible_pkg:
5491 # This is descending order, and we're not
5492 # interested in any versions <= pkg given.
5494 if pkg.slot_atom != visible_pkg.slot_atom:
5495 higher_slot = visible_pkg
5497 if higher_slot is not None:
5499 for arg in atom_arg_map[(atom, pkg.root)]:
5500 if isinstance(arg, PackageArg) and \
5505 def select_files(self, myfiles):
5506 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5507 appropriate depgraph and return a favorite list."""
5508 debug = "--debug" in self.myopts
5509 root_config = self.roots[self.target_root]
5510 sets = root_config.sets
5511 getSetAtoms = root_config.setconfig.getSetAtoms
5513 myroot = self.target_root
5514 dbs = self._filtered_trees[myroot]["dbs"]
5515 vardb = self.trees[myroot]["vartree"].dbapi
5516 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5517 portdb = self.trees[myroot]["porttree"].dbapi
5518 bindb = self.trees[myroot]["bintree"].dbapi
5519 pkgsettings = self.pkgsettings[myroot]
5521 onlydeps = "--onlydeps" in self.myopts
5524 ext = os.path.splitext(x)[1]
5526 if not os.path.exists(x):
5528 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5529 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5530 elif os.path.exists(
5531 os.path.join(pkgsettings["PKGDIR"], x)):
5532 x = os.path.join(pkgsettings["PKGDIR"], x)
5534 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5535 print "!!! Please ensure the tbz2 exists as specified.\n"
5536 return 0, myfavorites
5537 mytbz2=portage.xpak.tbz2(x)
5538 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5539 if os.path.realpath(x) != \
5540 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5541 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5542 return 0, myfavorites
5543 db_keys = list(bindb._aux_cache_keys)
5544 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5545 pkg = Package(type_name="binary", root_config=root_config,
5546 cpv=mykey, built=True, metadata=metadata,
5548 self._pkg_cache[pkg] = pkg
5549 args.append(PackageArg(arg=x, package=pkg,
5550 root_config=root_config))
5551 elif ext==".ebuild":
5552 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5553 pkgdir = os.path.dirname(ebuild_path)
5554 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5555 cp = pkgdir[len(tree_root)+1:]
5556 e = portage.exception.PackageNotFound(
5557 ("%s is not in a valid portage tree " + \
5558 "hierarchy or does not exist") % x)
5559 if not portage.isvalidatom(cp):
5561 cat = portage.catsplit(cp)[0]
5562 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5563 if not portage.isvalidatom("="+mykey):
5565 ebuild_path = portdb.findname(mykey)
5567 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5568 cp, os.path.basename(ebuild_path)):
5569 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5570 return 0, myfavorites
5571 if mykey not in portdb.xmatch(
5572 "match-visible", portage.dep_getkey(mykey)):
5573 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5574 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5575 print colorize("BAD", "*** page for details.")
5576 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5579 raise portage.exception.PackageNotFound(
5580 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5581 db_keys = list(portdb._aux_cache_keys)
5582 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5583 pkg = Package(type_name="ebuild", root_config=root_config,
5584 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5585 pkgsettings.setcpv(pkg)
5586 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5587 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5588 self._pkg_cache[pkg] = pkg
5589 args.append(PackageArg(arg=x, package=pkg,
5590 root_config=root_config))
5591 elif x.startswith(os.path.sep):
5592 if not x.startswith(myroot):
5593 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5594 " $ROOT.\n") % x, noiselevel=-1)
5596 # Queue these up since it's most efficient to handle
5597 # multiple files in a single iter_owners() call.
5598 lookup_owners.append(x)
5600 if x in ("system", "world"):
5602 if x.startswith(SETPREFIX):
5603 s = x[len(SETPREFIX):]
5605 raise portage.exception.PackageSetNotFound(s)
5608 # Recursively expand sets so that containment tests in
5609 # self._get_parent_sets() properly match atoms in nested
5610 # sets (like if world contains system).
5611 expanded_set = InternalPackageSet(
5612 initial_atoms=getSetAtoms(s))
5613 self._sets[s] = expanded_set
5614 args.append(SetArg(arg=x, set=expanded_set,
5615 root_config=root_config))
5617 if not is_valid_package_atom(x):
5618 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5620 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5621 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5623 # Don't expand categories or old-style virtuals here unless
5624 # necessary. Expansion of old-style virtuals here causes at
5625 # least the following problems:
5626 # 1) It's more difficult to determine which set(s) an atom
5627 # came from, if any.
5628 # 2) It takes away freedom from the resolver to choose other
5629 # possible expansions when necessary.
5631 args.append(AtomArg(arg=x, atom=x,
5632 root_config=root_config))
5634 expanded_atoms = self._dep_expand(root_config, x)
5635 installed_cp_set = set()
5636 for atom in expanded_atoms:
5637 atom_cp = portage.dep_getkey(atom)
5638 if vardb.cp_list(atom_cp):
5639 installed_cp_set.add(atom_cp)
5641 if len(installed_cp_set) > 1:
5642 non_virtual_cps = set()
5643 for atom_cp in installed_cp_set:
5644 if not atom_cp.startswith("virtual/"):
5645 non_virtual_cps.add(atom_cp)
5646 if len(non_virtual_cps) == 1:
5647 installed_cp_set = non_virtual_cps
5649 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5650 installed_cp = iter(installed_cp_set).next()
5651 expanded_atoms = [atom for atom in expanded_atoms \
5652 if portage.dep_getkey(atom) == installed_cp]
5654 if len(expanded_atoms) > 1:
5657 ambiguous_package_name(x, expanded_atoms, root_config,
5658 self.spinner, self.myopts)
5659 return False, myfavorites
5661 atom = expanded_atoms[0]
5663 null_atom = insert_category_into_atom(x, "null")
5664 null_cp = portage.dep_getkey(null_atom)
5665 cat, atom_pn = portage.catsplit(null_cp)
5666 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5668 # Allow the depgraph to choose which virtual.
5669 atom = insert_category_into_atom(x, "virtual")
5671 atom = insert_category_into_atom(x, "null")
5673 args.append(AtomArg(arg=x, atom=atom,
5674 root_config=root_config))
5678 search_for_multiple = False
5679 if len(lookup_owners) > 1:
5680 search_for_multiple = True
5682 for x in lookup_owners:
5683 if not search_for_multiple and os.path.isdir(x):
5684 search_for_multiple = True
5685 relative_paths.append(x[len(myroot):])
5688 for pkg, relative_path in \
5689 real_vardb._owners.iter_owners(relative_paths):
5690 owners.add(pkg.mycpv)
5691 if not search_for_multiple:
5695 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5696 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5700 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5702 # portage now masks packages with missing slot, but it's
5703 # possible that one was installed by an older version
5704 atom = portage.cpv_getkey(cpv)
5706 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5707 args.append(AtomArg(arg=atom, atom=atom,
5708 root_config=root_config))
5710 if "--update" in self.myopts:
5711 # In some cases, the greedy slots behavior can pull in a slot that
5712 # the user would want to uninstall due to it being blocked by a
5713 # newer version in a different slot. Therefore, it's necessary to
5714 # detect and discard any that should be uninstalled. Each time
5715 # that arguments are updated, package selections are repeated in
5716 # order to ensure consistency with the current arguments:
5718 # 1) Initialize args
5719 # 2) Select packages and generate initial greedy atoms
5720 # 3) Update args with greedy atoms
5721 # 4) Select packages and generate greedy atoms again, while
5722 # accounting for any blockers between selected packages
5723 # 5) Update args with revised greedy atoms
5725 self._set_args(args)
5728 greedy_args.append(arg)
5729 if not isinstance(arg, AtomArg):
5731 for atom in self._greedy_slots(arg.root_config, arg.atom):
5733 AtomArg(arg=arg.arg, atom=atom,
5734 root_config=arg.root_config))
5736 self._set_args(greedy_args)
5739 # Revise greedy atoms, accounting for any blockers
5740 # between selected packages.
5741 revised_greedy_args = []
5743 revised_greedy_args.append(arg)
5744 if not isinstance(arg, AtomArg):
5746 for atom in self._greedy_slots(arg.root_config, arg.atom,
5747 blocker_lookahead=True):
5748 revised_greedy_args.append(
5749 AtomArg(arg=arg.arg, atom=atom,
5750 root_config=arg.root_config))
5751 args = revised_greedy_args
5752 del revised_greedy_args
5754 self._set_args(args)
5756 myfavorites = set(myfavorites)
5758 if isinstance(arg, (AtomArg, PackageArg)):
5759 myfavorites.add(arg.atom)
5760 elif isinstance(arg, SetArg):
5761 myfavorites.add(arg.arg)
5762 myfavorites = list(myfavorites)
5764 pprovideddict = pkgsettings.pprovideddict
5766 portage.writemsg("\n", noiselevel=-1)
5767 # Order needs to be preserved since a feature of --nodeps
5768 # is to allow the user to force a specific merge order.
5772 for atom in arg.set:
5773 self.spinner.update()
5774 dep = Dependency(atom=atom, onlydeps=onlydeps,
5775 root=myroot, parent=arg)
5776 atom_cp = portage.dep_getkey(atom)
5778 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5779 if pprovided and portage.match_from_list(atom, pprovided):
5780 # A provided package has been specified on the command line.
5781 self._pprovided_args.append((arg, atom))
5783 if isinstance(arg, PackageArg):
5784 if not self._add_pkg(arg.package, dep) or \
5785 not self._create_graph():
5786 sys.stderr.write(("\n\n!!! Problem resolving " + \
5787 "dependencies for %s\n") % arg.arg)
5788 return 0, myfavorites
5791 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5792 (arg, atom), noiselevel=-1)
5793 pkg, existing_node = self._select_package(
5794 myroot, atom, onlydeps=onlydeps)
5796 if not (isinstance(arg, SetArg) and \
5797 arg.name in ("system", "world")):
5798 self._unsatisfied_deps_for_display.append(
5799 ((myroot, atom), {}))
5800 return 0, myfavorites
5801 self._missing_args.append((arg, atom))
5803 if atom_cp != pkg.cp:
5804 # For old-style virtuals, we need to repeat the
5805 # package.provided check against the selected package.
5806 expanded_atom = atom.replace(atom_cp, pkg.cp)
5807 pprovided = pprovideddict.get(pkg.cp)
5809 portage.match_from_list(expanded_atom, pprovided):
5810 # A provided package has been
5811 # specified on the command line.
5812 self._pprovided_args.append((arg, atom))
5814 if pkg.installed and "selective" not in self.myparams:
5815 self._unsatisfied_deps_for_display.append(
5816 ((myroot, atom), {}))
5817 # Previous behavior was to bail out in this case, but
5818 # since the dep is satisfied by the installed package,
5819 # it's more friendly to continue building the graph
5820 # and just show a warning message. Therefore, only bail
5821 # out here if the atom is not from either the system or
5823 if not (isinstance(arg, SetArg) and \
5824 arg.name in ("system", "world")):
5825 return 0, myfavorites
5827 # Add the selected package to the graph as soon as possible
5828 # so that later dep_check() calls can use it as feedback
5829 # for making more consistent atom selections.
5830 if not self._add_pkg(pkg, dep):
5831 if isinstance(arg, SetArg):
5832 sys.stderr.write(("\n\n!!! Problem resolving " + \
5833 "dependencies for %s from %s\n") % \
5836 sys.stderr.write(("\n\n!!! Problem resolving " + \
5837 "dependencies for %s\n") % atom)
5838 return 0, myfavorites
5840 except portage.exception.MissingSignature, e:
5841 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5842 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5843 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5844 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5845 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5846 return 0, myfavorites
5847 except portage.exception.InvalidSignature, e:
5848 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5849 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5850 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5851 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5852 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5853 return 0, myfavorites
5854 except SystemExit, e:
5855 raise # Needed else can't exit
5856 except Exception, e:
5857 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5858 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5861 # Now that the root packages have been added to the graph,
5862 # process the dependencies.
5863 if not self._create_graph():
5864 return 0, myfavorites
5867 if "--usepkgonly" in self.myopts:
5868 for xs in self.digraph.all_nodes():
5869 if not isinstance(xs, Package):
5871 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5875 print "Missing binary for:",xs[2]
5879 except self._unknown_internal_error:
5880 return False, myfavorites
5882 # We're true here unless we are missing binaries.
5883 return (not missing,myfavorites)
5885 def _set_args(self, args):
5887 Create the "args" package set from atoms and packages given as
5888 arguments. This method can be called multiple times if necessary.
5889 The package selection cache is automatically invalidated, since
5890 arguments influence package selections.
5892 args_set = self._sets["args"]
5895 if not isinstance(arg, (AtomArg, PackageArg)):
5898 if atom in args_set:
5902 self._set_atoms.clear()
5903 self._set_atoms.update(chain(*self._sets.itervalues()))
5904 atom_arg_map = self._atom_arg_map
5905 atom_arg_map.clear()
5907 for atom in arg.set:
5908 atom_key = (atom, arg.root_config.root)
5909 refs = atom_arg_map.get(atom_key)
5912 atom_arg_map[atom_key] = refs
5916 # Invalidate the package selection cache, since
5917 # arguments influence package selections.
5918 self._highest_pkg_cache.clear()
5919 for trees in self._filtered_trees.itervalues():
5920 trees["porttree"].dbapi._clear_cache()
5922 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5924 Return a list of slot atoms corresponding to installed slots that
5925 differ from the slot of the highest visible match. When
5926 blocker_lookahead is True, slot atoms that would trigger a blocker
5927 conflict are automatically discarded, potentially allowing automatic
5928 uninstallation of older slots when appropriate.
5930 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5931 if highest_pkg is None:
5933 vardb = root_config.trees["vartree"].dbapi
5935 for cpv in vardb.match(atom):
5936 # don't mix new virtuals with old virtuals
5937 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5938 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5940 slots.add(highest_pkg.metadata["SLOT"])
5944 slots.remove(highest_pkg.metadata["SLOT"])
5947 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5948 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5949 if pkg is not None and \
5950 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5951 greedy_pkgs.append(pkg)
5954 if not blocker_lookahead:
5955 return [pkg.slot_atom for pkg in greedy_pkgs]
5958 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5959 for pkg in greedy_pkgs + [highest_pkg]:
5960 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5962 atoms = self._select_atoms(
5963 pkg.root, dep_str, pkg.use.enabled,
5964 parent=pkg, strict=True)
5965 except portage.exception.InvalidDependString:
5967 blocker_atoms = (x for x in atoms if x.blocker)
5968 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5970 if highest_pkg not in blockers:
5973 # filter packages with invalid deps
5974 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5976 # filter packages that conflict with highest_pkg
5977 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5978 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5979 blockers[pkg].findAtomForPackage(highest_pkg))]
5984 # If two packages conflict, discard the lower version.
5985 discard_pkgs = set()
5986 greedy_pkgs.sort(reverse=True)
5987 for i in xrange(len(greedy_pkgs) - 1):
5988 pkg1 = greedy_pkgs[i]
5989 if pkg1 in discard_pkgs:
5991 for j in xrange(i + 1, len(greedy_pkgs)):
5992 pkg2 = greedy_pkgs[j]
5993 if pkg2 in discard_pkgs:
5995 if blockers[pkg1].findAtomForPackage(pkg2) or \
5996 blockers[pkg2].findAtomForPackage(pkg1):
5998 discard_pkgs.add(pkg2)
6000 return [pkg.slot_atom for pkg in greedy_pkgs \
6001 if pkg not in discard_pkgs]
6003 def _select_atoms_from_graph(self, *pargs, **kwargs):
6005 Prefer atoms matching packages that have already been
6006 added to the graph or those that are installed and have
6007 not been scheduled for replacement.
6009 kwargs["trees"] = self._graph_trees
6010 return self._select_atoms_highest_available(*pargs, **kwargs)
6012 def _select_atoms_highest_available(self, root, depstring,
6013 myuse=None, parent=None, strict=True, trees=None, priority=None):
6014 """This will raise InvalidDependString if necessary. If trees is
6015 None then self._filtered_trees is used."""
6016 pkgsettings = self.pkgsettings[root]
6018 trees = self._filtered_trees
6019 if not getattr(priority, "buildtime", False):
6020 # The parent should only be passed to dep_check() for buildtime
6021 # dependencies since that's the only case when it's appropriate
6022 # to trigger the circular dependency avoidance code which uses it.
6023 # It's important not to trigger the same circular dependency
6024 # avoidance code for runtime dependencies since it's not needed
6025 # and it can promote an incorrect package choice.
6029 if parent is not None:
6030 trees[root]["parent"] = parent
6032 portage.dep._dep_check_strict = False
6033 mycheck = portage.dep_check(depstring, None,
6034 pkgsettings, myuse=myuse,
6035 myroot=root, trees=trees)
6037 if parent is not None:
6038 trees[root].pop("parent")
6039 portage.dep._dep_check_strict = True
6041 raise portage.exception.InvalidDependString(mycheck[1])
6042 selected_atoms = mycheck[1]
6043 return selected_atoms
6045 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6046 atom = portage.dep.Atom(atom)
6047 atom_set = InternalPackageSet(initial_atoms=(atom,))
6048 atom_without_use = atom
6050 atom_without_use = portage.dep.remove_slot(atom)
6052 atom_without_use += ":" + atom.slot
6053 atom_without_use = portage.dep.Atom(atom_without_use)
6054 xinfo = '"%s"' % atom
6057 # Discard null/ from failed cpv_expand category expansion.
6058 xinfo = xinfo.replace("null/", "")
6059 masked_packages = []
6061 masked_pkg_instances = set()
6062 missing_licenses = []
6063 have_eapi_mask = False
6064 pkgsettings = self.pkgsettings[root]
6065 implicit_iuse = pkgsettings._get_implicit_iuse()
6066 root_config = self.roots[root]
6067 portdb = self.roots[root].trees["porttree"].dbapi
6068 dbs = self._filtered_trees[root]["dbs"]
6069 for db, pkg_type, built, installed, db_keys in dbs:
6073 if hasattr(db, "xmatch"):
6074 cpv_list = db.xmatch("match-all", atom_without_use)
6076 cpv_list = db.match(atom_without_use)
6079 for cpv in cpv_list:
6080 metadata, mreasons = get_mask_info(root_config, cpv,
6081 pkgsettings, db, pkg_type, built, installed, db_keys)
6082 if metadata is not None:
6083 pkg = Package(built=built, cpv=cpv,
6084 installed=installed, metadata=metadata,
6085 root_config=root_config)
6086 if pkg.cp != atom.cp:
6087 # A cpv can be returned from dbapi.match() as an
6088 # old-style virtual match even in cases when the
6089 # package does not actually PROVIDE the virtual.
6090 # Filter out any such false matches here.
6091 if not atom_set.findAtomForPackage(pkg):
6094 masked_pkg_instances.add(pkg)
6096 missing_use.append(pkg)
6099 masked_packages.append(
6100 (root_config, pkgsettings, cpv, metadata, mreasons))
6102 missing_use_reasons = []
6103 missing_iuse_reasons = []
6104 for pkg in missing_use:
6105 use = pkg.use.enabled
6106 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6107 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6109 for x in atom.use.required:
6110 if iuse_re.match(x) is None:
6111 missing_iuse.append(x)
6114 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6115 missing_iuse_reasons.append((pkg, mreasons))
6117 need_enable = sorted(atom.use.enabled.difference(use))
6118 need_disable = sorted(atom.use.disabled.intersection(use))
6119 if need_enable or need_disable:
6121 changes.extend(colorize("red", "+" + x) \
6122 for x in need_enable)
6123 changes.extend(colorize("blue", "-" + x) \
6124 for x in need_disable)
6125 mreasons.append("Change USE: %s" % " ".join(changes))
6126 missing_use_reasons.append((pkg, mreasons))
6128 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6129 in missing_use_reasons if pkg not in masked_pkg_instances]
6131 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6132 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6134 show_missing_use = False
6135 if unmasked_use_reasons:
6136 # Only show the latest version.
6137 show_missing_use = unmasked_use_reasons[:1]
6138 elif unmasked_iuse_reasons:
6139 if missing_use_reasons:
6140 # All packages with required IUSE are masked,
6141 # so display a normal masking message.
6144 show_missing_use = unmasked_iuse_reasons
6146 if show_missing_use:
6147 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6148 print "!!! One of the following packages is required to complete your request:"
6149 for pkg, mreasons in show_missing_use:
6150 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6152 elif masked_packages:
6154 colorize("BAD", "All ebuilds that could satisfy ") + \
6155 colorize("INFORM", xinfo) + \
6156 colorize("BAD", " have been masked.")
6157 print "!!! One of the following masked packages is required to complete your request:"
6158 have_eapi_mask = show_masked_packages(masked_packages)
6161 msg = ("The current version of portage supports " + \
6162 "EAPI '%s'. You must upgrade to a newer version" + \
6163 " of portage before EAPI masked packages can" + \
6164 " be installed.") % portage.const.EAPI
6165 from textwrap import wrap
6166 for line in wrap(msg, 75):
6171 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6173 # Show parent nodes and the argument that pulled them in.
6174 traversed_nodes = set()
6177 while node is not None:
6178 traversed_nodes.add(node)
6179 msg.append('(dependency required by "%s" [%s])' % \
6180 (colorize('INFORM', str(node.cpv)), node.type_name))
6181 # When traversing to parents, prefer arguments over packages
6182 # since arguments are root nodes. Never traverse the same
6183 # package twice, in order to prevent an infinite loop.
6184 selected_parent = None
6185 for parent in self.digraph.parent_nodes(node):
6186 if isinstance(parent, DependencyArg):
6187 msg.append('(dependency required by "%s" [argument])' % \
6188 (colorize('INFORM', str(parent))))
6189 selected_parent = None
6191 if parent not in traversed_nodes:
6192 selected_parent = parent
6193 node = selected_parent
6199 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6200 cache_key = (root, atom, onlydeps)
6201 ret = self._highest_pkg_cache.get(cache_key)
6204 if pkg and not existing:
6205 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6206 if existing and existing == pkg:
6207 # Update the cache to reflect that the
6208 # package has been added to the graph.
6210 self._highest_pkg_cache[cache_key] = ret
6212 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6213 self._highest_pkg_cache[cache_key] = ret
6216 settings = pkg.root_config.settings
6217 if visible(settings, pkg) and not (pkg.installed and \
6218 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6219 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6222 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6223 root_config = self.roots[root]
6224 pkgsettings = self.pkgsettings[root]
6225 dbs = self._filtered_trees[root]["dbs"]
6226 vardb = self.roots[root].trees["vartree"].dbapi
6227 portdb = self.roots[root].trees["porttree"].dbapi
6228 # List of acceptable packages, ordered by type preference.
6229 matched_packages = []
6230 highest_version = None
6231 if not isinstance(atom, portage.dep.Atom):
6232 atom = portage.dep.Atom(atom)
6234 atom_set = InternalPackageSet(initial_atoms=(atom,))
6235 existing_node = None
6237 usepkgonly = "--usepkgonly" in self.myopts
6238 empty = "empty" in self.myparams
6239 selective = "selective" in self.myparams
6241 noreplace = "--noreplace" in self.myopts
6242 # Behavior of the "selective" parameter depends on
6243 # whether or not a package matches an argument atom.
6244 # If an installed package provides an old-style
6245 # virtual that is no longer provided by an available
6246 # package, the installed package may match an argument
6247 # atom even though none of the available packages do.
6248 # Therefore, "selective" logic does not consider
6249 # whether or not an installed package matches an
6250 # argument atom. It only considers whether or not
6251 # available packages match argument atoms, which is
6252 # represented by the found_available_arg flag.
6253 found_available_arg = False
6254 for find_existing_node in True, False:
6257 for db, pkg_type, built, installed, db_keys in dbs:
6260 if installed and not find_existing_node:
6261 want_reinstall = reinstall or empty or \
6262 (found_available_arg and not selective)
6263 if want_reinstall and matched_packages:
6265 if hasattr(db, "xmatch"):
6266 cpv_list = db.xmatch("match-all", atom)
6268 cpv_list = db.match(atom)
6270 # USE=multislot can make an installed package appear as if
6271 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6272 # won't do any good as long as USE=multislot is enabled since
6273 # the newly built package still won't have the expected slot.
6274 # Therefore, assume that such SLOT dependencies are already
6275 # satisfied rather than forcing a rebuild.
6276 if installed and not cpv_list and atom.slot:
6277 for cpv in db.match(atom.cp):
6278 slot_available = False
6279 for other_db, other_type, other_built, \
6280 other_installed, other_keys in dbs:
6283 other_db.aux_get(cpv, ["SLOT"])[0]:
6284 slot_available = True
6288 if not slot_available:
6290 inst_pkg = self._pkg(cpv, "installed",
6291 root_config, installed=installed)
6292 # Remove the slot from the atom and verify that
6293 # the package matches the resulting atom.
6294 atom_without_slot = portage.dep.remove_slot(atom)
6296 atom_without_slot += str(atom.use)
6297 atom_without_slot = portage.dep.Atom(atom_without_slot)
6298 if portage.match_from_list(
6299 atom_without_slot, [inst_pkg]):
6300 cpv_list = [inst_pkg.cpv]
6305 pkg_status = "merge"
6306 if installed or onlydeps:
6307 pkg_status = "nomerge"
6310 for cpv in cpv_list:
6311 # Make --noreplace take precedence over --newuse.
6312 if not installed and noreplace and \
6313 cpv in vardb.match(atom):
6314 # If the installed version is masked, it may
6315 # be necessary to look at lower versions,
6316 # in case there is a visible downgrade.
6318 reinstall_for_flags = None
6319 cache_key = (pkg_type, root, cpv, pkg_status)
6320 calculated_use = True
6321 pkg = self._pkg_cache.get(cache_key)
6323 calculated_use = False
6325 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6328 pkg = Package(built=built, cpv=cpv,
6329 installed=installed, metadata=metadata,
6330 onlydeps=onlydeps, root_config=root_config,
6332 metadata = pkg.metadata
6334 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6335 if not built and ("?" in metadata["LICENSE"] or \
6336 "?" in metadata["PROVIDE"]):
6337 # This is avoided whenever possible because
6338 # it's expensive. It only needs to be done here
6339 # if it has an effect on visibility.
6340 pkgsettings.setcpv(pkg)
6341 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6342 calculated_use = True
6343 self._pkg_cache[pkg] = pkg
6345 if not installed or (built and matched_packages):
6346 # Only enforce visibility on installed packages
6347 # if there is at least one other visible package
6348 # available. By filtering installed masked packages
6349 # here, packages that have been masked since they
6350 # were installed can be automatically downgraded
6351 # to an unmasked version.
6353 if not visible(pkgsettings, pkg):
6355 except portage.exception.InvalidDependString:
6359 # Enable upgrade or downgrade to a version
6360 # with visible KEYWORDS when the installed
6361 # version is masked by KEYWORDS, but never
6362 # reinstall the same exact version only due
6363 # to a KEYWORDS mask.
6364 if built and matched_packages:
6366 different_version = None
6367 for avail_pkg in matched_packages:
6368 if not portage.dep.cpvequal(
6369 pkg.cpv, avail_pkg.cpv):
6370 different_version = avail_pkg
6372 if different_version is not None:
6375 pkgsettings._getMissingKeywords(
6376 pkg.cpv, pkg.metadata):
6379 # If the ebuild no longer exists or it's
6380 # keywords have been dropped, reject built
6381 # instances (installed or binary).
6382 # If --usepkgonly is enabled, assume that
6383 # the ebuild status should be ignored.
6387 pkg.cpv, "ebuild", root_config)
6388 except portage.exception.PackageNotFound:
6391 if not visible(pkgsettings, pkg_eb):
6394 if not pkg.built and not calculated_use:
6395 # This is avoided whenever possible because
6397 pkgsettings.setcpv(pkg)
6398 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6400 if pkg.cp != atom.cp:
6401 # A cpv can be returned from dbapi.match() as an
6402 # old-style virtual match even in cases when the
6403 # package does not actually PROVIDE the virtual.
6404 # Filter out any such false matches here.
6405 if not atom_set.findAtomForPackage(pkg):
6409 if root == self.target_root:
6411 # Ebuild USE must have been calculated prior
6412 # to this point, in case atoms have USE deps.
6413 myarg = self._iter_atoms_for_pkg(pkg).next()
6414 except StopIteration:
6416 except portage.exception.InvalidDependString:
6418 # masked by corruption
6420 if not installed and myarg:
6421 found_available_arg = True
6423 if atom.use and not pkg.built:
6424 use = pkg.use.enabled
6425 if atom.use.enabled.difference(use):
6427 if atom.use.disabled.intersection(use):
6429 if pkg.cp == atom_cp:
6430 if highest_version is None:
6431 highest_version = pkg
6432 elif pkg > highest_version:
6433 highest_version = pkg
6434 # At this point, we've found the highest visible
6435 # match from the current repo. Any lower versions
6436 # from this repo are ignored, so this so the loop
6437 # will always end with a break statement below
6439 if find_existing_node:
6440 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6443 if portage.dep.match_from_list(atom, [e_pkg]):
6444 if highest_version and \
6445 e_pkg.cp == atom_cp and \
6446 e_pkg < highest_version and \
6447 e_pkg.slot_atom != highest_version.slot_atom:
6448 # There is a higher version available in a
6449 # different slot, so this existing node is
6453 matched_packages.append(e_pkg)
6454 existing_node = e_pkg
6456 # Compare built package to current config and
6457 # reject the built package if necessary.
6458 if built and not installed and \
6459 ("--newuse" in self.myopts or \
6460 "--reinstall" in self.myopts):
6461 iuses = pkg.iuse.all
6462 old_use = pkg.use.enabled
6464 pkgsettings.setcpv(myeb)
6466 pkgsettings.setcpv(pkg)
6467 now_use = pkgsettings["PORTAGE_USE"].split()
6468 forced_flags = set()
6469 forced_flags.update(pkgsettings.useforce)
6470 forced_flags.update(pkgsettings.usemask)
6472 if myeb and not usepkgonly:
6473 cur_iuse = myeb.iuse.all
6474 if self._reinstall_for_flags(forced_flags,
6478 # Compare current config to installed package
6479 # and do not reinstall if possible.
6480 if not installed and \
6481 ("--newuse" in self.myopts or \
6482 "--reinstall" in self.myopts) and \
6483 cpv in vardb.match(atom):
6484 pkgsettings.setcpv(pkg)
6485 forced_flags = set()
6486 forced_flags.update(pkgsettings.useforce)
6487 forced_flags.update(pkgsettings.usemask)
6488 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6489 old_iuse = set(filter_iuse_defaults(
6490 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6491 cur_use = pkg.use.enabled
6492 cur_iuse = pkg.iuse.all
6493 reinstall_for_flags = \
6494 self._reinstall_for_flags(
6495 forced_flags, old_use, old_iuse,
6497 if reinstall_for_flags:
6501 matched_packages.append(pkg)
6502 if reinstall_for_flags:
6503 self._reinstall_nodes[pkg] = \
6507 if not matched_packages:
6510 if "--debug" in self.myopts:
6511 for pkg in matched_packages:
6512 portage.writemsg("%s %s\n" % \
6513 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6515 # Filter out any old-style virtual matches if they are
6516 # mixed with new-style virtual matches.
6517 cp = portage.dep_getkey(atom)
6518 if len(matched_packages) > 1 and \
6519 "virtual" == portage.catsplit(cp)[0]:
6520 for pkg in matched_packages:
6523 # Got a new-style virtual, so filter
6524 # out any old-style virtuals.
6525 matched_packages = [pkg for pkg in matched_packages \
6529 if len(matched_packages) > 1:
6530 bestmatch = portage.best(
6531 [pkg.cpv for pkg in matched_packages])
6532 matched_packages = [pkg for pkg in matched_packages \
6533 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6535 # ordered by type preference ("ebuild" type is the last resort)
6536 return matched_packages[-1], existing_node
6538 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6540 Select packages that have already been added to the graph or
6541 those that are installed and have not been scheduled for
6544 graph_db = self._graph_trees[root]["porttree"].dbapi
6545 matches = graph_db.match_pkgs(atom)
6548 pkg = matches[-1] # highest match
6549 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6550 return pkg, in_graph
6552 def _complete_graph(self):
6554 Add any deep dependencies of required sets (args, system, world) that
6555 have not been pulled into the graph yet. This ensures that the graph
6556 is consistent such that initially satisfied deep dependencies are not
6557 broken in the new graph. Initially unsatisfied dependencies are
6558 irrelevant since we only want to avoid breaking dependencies that are
6561 Since this method can consume enough time to disturb users, it is
6562 currently only enabled by the --complete-graph option.
6564 if "--buildpkgonly" in self.myopts or \
6565 "recurse" not in self.myparams:
6568 if "complete" not in self.myparams:
6569 # Skip this to avoid consuming enough time to disturb users.
6572 # Put the depgraph into a mode that causes it to only
6573 # select packages that have already been added to the
6574 # graph or those that are installed and have not been
6575 # scheduled for replacement. Also, toggle the "deep"
6576 # parameter so that all dependencies are traversed and
6578 self._select_atoms = self._select_atoms_from_graph
6579 self._select_package = self._select_pkg_from_graph
6580 already_deep = "deep" in self.myparams
6581 if not already_deep:
6582 self.myparams.add("deep")
6584 for root in self.roots:
6585 required_set_names = self._required_set_names.copy()
6586 if root == self.target_root and \
6587 (already_deep or "empty" in self.myparams):
6588 required_set_names.difference_update(self._sets)
6589 if not required_set_names and not self._ignored_deps:
6591 root_config = self.roots[root]
6592 setconfig = root_config.setconfig
6594 # Reuse existing SetArg instances when available.
6595 for arg in self.digraph.root_nodes():
6596 if not isinstance(arg, SetArg):
6598 if arg.root_config != root_config:
6600 if arg.name in required_set_names:
6602 required_set_names.remove(arg.name)
6603 # Create new SetArg instances only when necessary.
6604 for s in required_set_names:
6605 expanded_set = InternalPackageSet(
6606 initial_atoms=setconfig.getSetAtoms(s))
6607 atom = SETPREFIX + s
6608 args.append(SetArg(arg=atom, set=expanded_set,
6609 root_config=root_config))
6610 vardb = root_config.trees["vartree"].dbapi
6612 for atom in arg.set:
6613 self._dep_stack.append(
6614 Dependency(atom=atom, root=root, parent=arg))
6615 if self._ignored_deps:
6616 self._dep_stack.extend(self._ignored_deps)
6617 self._ignored_deps = []
6618 if not self._create_graph(allow_unsatisfied=True):
6620 # Check the unsatisfied deps to see if any initially satisfied deps
6621 # will become unsatisfied due to an upgrade. Initially unsatisfied
6622 # deps are irrelevant since we only want to avoid breaking deps
6623 # that are initially satisfied.
6624 while self._unsatisfied_deps:
6625 dep = self._unsatisfied_deps.pop()
6626 matches = vardb.match_pkgs(dep.atom)
6628 self._initially_unsatisfied_deps.append(dep)
6630 # An scheduled installation broke a deep dependency.
6631 # Add the installed package to the graph so that it
6632 # will be appropriately reported as a slot collision
6633 # (possibly solvable via backtracking).
6634 pkg = matches[-1] # highest match
6635 if not self._add_pkg(pkg, dep):
6637 if not self._create_graph(allow_unsatisfied=True):
6641 def _pkg(self, cpv, type_name, root_config, installed=False):
6643 Get a package instance from the cache, or create a new
6644 one if necessary. Raises KeyError from aux_get if it
6645 failures for some reason (package does not exist or is
6650 operation = "nomerge"
6651 pkg = self._pkg_cache.get(
6652 (type_name, root_config.root, cpv, operation))
6654 tree_type = self.pkg_tree_map[type_name]
6655 db = root_config.trees[tree_type].dbapi
6656 db_keys = list(self._trees_orig[root_config.root][
6657 tree_type].dbapi._aux_cache_keys)
6659 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6661 raise portage.exception.PackageNotFound(cpv)
6662 pkg = Package(cpv=cpv, metadata=metadata,
6663 root_config=root_config, installed=installed)
6664 if type_name == "ebuild":
6665 settings = self.pkgsettings[root_config.root]
6666 settings.setcpv(pkg)
6667 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6668 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6669 self._pkg_cache[pkg] = pkg
6672 def validate_blockers(self):
6673 """Remove any blockers from the digraph that do not match any of the
6674 packages within the graph. If necessary, create hard deps to ensure
6675 correct merge order such that mutually blocking packages are never
6676 installed simultaneously."""
6678 if "--buildpkgonly" in self.myopts or \
6679 "--nodeps" in self.myopts:
6682 #if "deep" in self.myparams:
6684 # Pull in blockers from all installed packages that haven't already
6685 # been pulled into the depgraph. This is not enabled by default
6686 # due to the performance penalty that is incurred by all the
6687 # additional dep_check calls that are required.
6689 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6690 for myroot in self.trees:
6691 vardb = self.trees[myroot]["vartree"].dbapi
6692 portdb = self.trees[myroot]["porttree"].dbapi
6693 pkgsettings = self.pkgsettings[myroot]
6694 final_db = self.mydbapi[myroot]
6696 blocker_cache = BlockerCache(myroot, vardb)
6697 stale_cache = set(blocker_cache)
6700 stale_cache.discard(cpv)
6701 pkg_in_graph = self.digraph.contains(pkg)
6703 # Check for masked installed packages. Only warn about
6704 # packages that are in the graph in order to avoid warning
6705 # about those that will be automatically uninstalled during
6706 # the merge process or by --depclean.
6708 if pkg_in_graph and not visible(pkgsettings, pkg):
6709 self._masked_installed.add(pkg)
6711 blocker_atoms = None
6717 self._blocker_parents.child_nodes(pkg))
6722 self._irrelevant_blockers.child_nodes(pkg))
6725 if blockers is not None:
6726 blockers = set(str(blocker.atom) \
6727 for blocker in blockers)
6729 # If this node has any blockers, create a "nomerge"
6730 # node for it so that they can be enforced.
6731 self.spinner.update()
6732 blocker_data = blocker_cache.get(cpv)
6733 if blocker_data is not None and \
6734 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6737 # If blocker data from the graph is available, use
6738 # it to validate the cache and update the cache if
6740 if blocker_data is not None and \
6741 blockers is not None:
6742 if not blockers.symmetric_difference(
6743 blocker_data.atoms):
6747 if blocker_data is None and \
6748 blockers is not None:
6749 # Re-use the blockers from the graph.
6750 blocker_atoms = sorted(blockers)
6751 counter = long(pkg.metadata["COUNTER"])
6753 blocker_cache.BlockerData(counter, blocker_atoms)
6754 blocker_cache[pkg.cpv] = blocker_data
6758 blocker_atoms = blocker_data.atoms
6760 # Use aux_get() to trigger FakeVartree global
6761 # updates on *DEPEND when appropriate.
6762 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6763 # It is crucial to pass in final_db here in order to
6764 # optimize dep_check calls by eliminating atoms via
6765 # dep_wordreduce and dep_eval calls.
6767 portage.dep._dep_check_strict = False
6769 success, atoms = portage.dep_check(depstr,
6770 final_db, pkgsettings, myuse=pkg.use.enabled,
6771 trees=self._graph_trees, myroot=myroot)
6772 except Exception, e:
6773 if isinstance(e, SystemExit):
6775 # This is helpful, for example, if a ValueError
6776 # is thrown from cpv_expand due to multiple
6777 # matches (this can happen if an atom lacks a
6779 show_invalid_depstring_notice(
6780 pkg, depstr, str(e))
6784 portage.dep._dep_check_strict = True
6786 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6787 if replacement_pkg and \
6788 replacement_pkg[0].operation == "merge":
6789 # This package is being replaced anyway, so
6790 # ignore invalid dependencies so as not to
6791 # annoy the user too much (otherwise they'd be
6792 # forced to manually unmerge it first).
6794 show_invalid_depstring_notice(pkg, depstr, atoms)
6796 blocker_atoms = [myatom for myatom in atoms \
6797 if myatom.startswith("!")]
6798 blocker_atoms.sort()
6799 counter = long(pkg.metadata["COUNTER"])
6800 blocker_cache[cpv] = \
6801 blocker_cache.BlockerData(counter, blocker_atoms)
6804 for atom in blocker_atoms:
6805 blocker = Blocker(atom=portage.dep.Atom(atom),
6806 eapi=pkg.metadata["EAPI"], root=myroot)
6807 self._blocker_parents.add(blocker, pkg)
6808 except portage.exception.InvalidAtom, e:
6809 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6810 show_invalid_depstring_notice(
6811 pkg, depstr, "Invalid Atom: %s" % (e,))
6813 for cpv in stale_cache:
6814 del blocker_cache[cpv]
6815 blocker_cache.flush()
6818 # Discard any "uninstall" tasks scheduled by previous calls
6819 # to this method, since those tasks may not make sense given
6820 # the current graph state.
6821 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6822 if previous_uninstall_tasks:
6823 self._blocker_uninstalls = digraph()
6824 self.digraph.difference_update(previous_uninstall_tasks)
6826 for blocker in self._blocker_parents.leaf_nodes():
6827 self.spinner.update()
6828 root_config = self.roots[blocker.root]
6829 virtuals = root_config.settings.getvirtuals()
6830 myroot = blocker.root
6831 initial_db = self.trees[myroot]["vartree"].dbapi
6832 final_db = self.mydbapi[myroot]
6834 provider_virtual = False
6835 if blocker.cp in virtuals and \
6836 not self._have_new_virt(blocker.root, blocker.cp):
6837 provider_virtual = True
6839 # Use this to check PROVIDE for each matched package
6841 atom_set = InternalPackageSet(
6842 initial_atoms=[blocker.atom])
6844 if provider_virtual:
6846 for provider_entry in virtuals[blocker.cp]:
6848 portage.dep_getkey(provider_entry)
6849 atoms.append(blocker.atom.replace(
6850 blocker.cp, provider_cp))
6852 atoms = [blocker.atom]
6854 blocked_initial = set()
6856 for pkg in initial_db.match_pkgs(atom):
6857 if atom_set.findAtomForPackage(pkg):
6858 blocked_initial.add(pkg)
6860 blocked_final = set()
6862 for pkg in final_db.match_pkgs(atom):
6863 if atom_set.findAtomForPackage(pkg):
6864 blocked_final.add(pkg)
6866 if not blocked_initial and not blocked_final:
6867 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6868 self._blocker_parents.remove(blocker)
6869 # Discard any parents that don't have any more blockers.
6870 for pkg in parent_pkgs:
6871 self._irrelevant_blockers.add(blocker, pkg)
6872 if not self._blocker_parents.child_nodes(pkg):
6873 self._blocker_parents.remove(pkg)
6875 for parent in self._blocker_parents.parent_nodes(blocker):
6876 unresolved_blocks = False
6877 depends_on_order = set()
6878 for pkg in blocked_initial:
6879 if pkg.slot_atom == parent.slot_atom:
6880 # TODO: Support blocks within slots in cases where it
6881 # might make sense. For example, a new version might
6882 # require that the old version be uninstalled at build
6885 if parent.installed:
6886 # Two currently installed packages conflict with
6887 # eachother. Ignore this case since the damage
6888 # is already done and this would be likely to
6889 # confuse users if displayed like a normal blocker.
6892 self._blocked_pkgs.add(pkg, blocker)
6894 if parent.operation == "merge":
6895 # Maybe the blocked package can be replaced or simply
6896 # unmerged to resolve this block.
6897 depends_on_order.add((pkg, parent))
6899 # None of the above blocker resolutions techniques apply,
6900 # so apparently this one is unresolvable.
6901 unresolved_blocks = True
6902 for pkg in blocked_final:
6903 if pkg.slot_atom == parent.slot_atom:
6904 # TODO: Support blocks within slots.
6906 if parent.operation == "nomerge" and \
6907 pkg.operation == "nomerge":
6908 # This blocker will be handled the next time that a
6909 # merge of either package is triggered.
6912 self._blocked_pkgs.add(pkg, blocker)
6914 # Maybe the blocking package can be
6915 # unmerged to resolve this block.
6916 if parent.operation == "merge" and pkg.installed:
6917 depends_on_order.add((pkg, parent))
6919 elif parent.operation == "nomerge":
6920 depends_on_order.add((parent, pkg))
6922 # None of the above blocker resolutions techniques apply,
6923 # so apparently this one is unresolvable.
6924 unresolved_blocks = True
6926 # Make sure we don't unmerge any package that have been pulled
6928 if not unresolved_blocks and depends_on_order:
6929 for inst_pkg, inst_task in depends_on_order:
6930 if self.digraph.contains(inst_pkg) and \
6931 self.digraph.parent_nodes(inst_pkg):
6932 unresolved_blocks = True
6935 if not unresolved_blocks and depends_on_order:
6936 for inst_pkg, inst_task in depends_on_order:
6937 uninst_task = Package(built=inst_pkg.built,
6938 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6939 metadata=inst_pkg.metadata,
6940 operation="uninstall",
6941 root_config=inst_pkg.root_config,
6942 type_name=inst_pkg.type_name)
6943 self._pkg_cache[uninst_task] = uninst_task
6944 # Enforce correct merge order with a hard dep.
6945 self.digraph.addnode(uninst_task, inst_task,
6946 priority=BlockerDepPriority.instance)
6947 # Count references to this blocker so that it can be
6948 # invalidated after nodes referencing it have been
6950 self._blocker_uninstalls.addnode(uninst_task, blocker)
6951 if not unresolved_blocks and not depends_on_order:
6952 self._irrelevant_blockers.add(blocker, parent)
6953 self._blocker_parents.remove_edge(blocker, parent)
6954 if not self._blocker_parents.parent_nodes(blocker):
6955 self._blocker_parents.remove(blocker)
6956 if not self._blocker_parents.child_nodes(parent):
6957 self._blocker_parents.remove(parent)
6958 if unresolved_blocks:
6959 self._unsolvable_blockers.add(blocker, parent)
6963 def _accept_blocker_conflicts(self):
6965 for x in ("--buildpkgonly", "--fetchonly",
6966 "--fetch-all-uri", "--nodeps"):
6967 if x in self.myopts:
6972 def _merge_order_bias(self, mygraph):
6974 For optimal leaf node selection, promote deep system runtime deps and
6975 order nodes from highest to lowest overall reference count.
6979 for node in mygraph.order:
6980 node_info[node] = len(mygraph.parent_nodes(node))
6981 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6983 def cmp_merge_preference(node1, node2):
6985 if node1.operation == 'uninstall':
6986 if node2.operation == 'uninstall':
6990 if node2.operation == 'uninstall':
6991 if node1.operation == 'uninstall':
6995 node1_sys = node1 in deep_system_deps
6996 node2_sys = node2 in deep_system_deps
6997 if node1_sys != node2_sys:
7002 return node_info[node2] - node_info[node1]
7004 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
7006 def altlist(self, reversed=False):
7008 while self._serialized_tasks_cache is None:
7009 self._resolve_conflicts()
7011 self._serialized_tasks_cache, self._scheduler_graph = \
7012 self._serialize_tasks()
7013 except self._serialize_tasks_retry:
7016 retlist = self._serialized_tasks_cache[:]
7021 def schedulerGraph(self):
7023 The scheduler graph is identical to the normal one except that
7024 uninstall edges are reversed in specific cases that require
7025 conflicting packages to be temporarily installed simultaneously.
7026 This is intended for use by the Scheduler in it's parallelization
7027 logic. It ensures that temporary simultaneous installation of
7028 conflicting packages is avoided when appropriate (especially for
7029 !!atom blockers), but allowed in specific cases that require it.
7031 Note that this method calls break_refs() which alters the state of
7032 internal Package instances such that this depgraph instance should
7033 not be used to perform any more calculations.
7035 if self._scheduler_graph is None:
7037 self.break_refs(self._scheduler_graph.order)
7038 return self._scheduler_graph
7040 def break_refs(self, nodes):
7042 Take a mergelist like that returned from self.altlist() and
7043 break any references that lead back to the depgraph. This is
7044 useful if you want to hold references to packages without
7045 also holding the depgraph on the heap.
7048 if hasattr(node, "root_config"):
7049 # The FakeVartree references the _package_cache which
7050 # references the depgraph. So that Package instances don't
7051 # hold the depgraph and FakeVartree on the heap, replace
7052 # the RootConfig that references the FakeVartree with the
7053 # original RootConfig instance which references the actual
7055 node.root_config = \
7056 self._trees_orig[node.root_config.root]["root_config"]
7058 def _resolve_conflicts(self):
7059 if not self._complete_graph():
7060 raise self._unknown_internal_error()
7062 if not self.validate_blockers():
7063 raise self._unknown_internal_error()
7065 if self._slot_collision_info:
7066 self._process_slot_conflicts()
7068 def _serialize_tasks(self):
7070 if "--debug" in self.myopts:
7071 writemsg("\ndigraph:\n\n", noiselevel=-1)
7072 self.digraph.debug_print()
7073 writemsg("\n", noiselevel=-1)
7075 scheduler_graph = self.digraph.copy()
7077 if '--nodeps' in self.myopts:
7078 # Preserve the package order given on the command line.
7079 return ([node for node in scheduler_graph \
7080 if isinstance(node, Package) \
7081 and node.operation == 'merge'], scheduler_graph)
7083 mygraph=self.digraph.copy()
7084 # Prune "nomerge" root nodes if nothing depends on them, since
7085 # otherwise they slow down merge order calculation. Don't remove
7086 # non-root nodes since they help optimize merge order in some cases
7087 # such as revdep-rebuild.
7088 removed_nodes = set()
7090 for node in mygraph.root_nodes():
7091 if not isinstance(node, Package) or \
7092 node.installed or node.onlydeps:
7093 removed_nodes.add(node)
7095 self.spinner.update()
7096 mygraph.difference_update(removed_nodes)
7097 if not removed_nodes:
7099 removed_nodes.clear()
7100 self._merge_order_bias(mygraph)
7101 def cmp_circular_bias(n1, n2):
7103 RDEPEND is stronger than PDEPEND and this function
7104 measures such a strength bias within a circular
7105 dependency relationship.
7107 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7108 ignore_priority=priority_range.ignore_medium_soft)
7109 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7110 ignore_priority=priority_range.ignore_medium_soft)
7111 if n1_n2_medium == n2_n1_medium:
7116 myblocker_uninstalls = self._blocker_uninstalls.copy()
7118 # Contains uninstall tasks that have been scheduled to
7119 # occur after overlapping blockers have been installed.
7120 scheduled_uninstalls = set()
7121 # Contains any Uninstall tasks that have been ignored
7122 # in order to avoid the circular deps code path. These
7123 # correspond to blocker conflicts that could not be
7125 ignored_uninstall_tasks = set()
7126 have_uninstall_task = False
7127 complete = "complete" in self.myparams
7130 def get_nodes(**kwargs):
7132 Returns leaf nodes excluding Uninstall instances
7133 since those should be executed as late as possible.
7135 return [node for node in mygraph.leaf_nodes(**kwargs) \
7136 if isinstance(node, Package) and \
7137 (node.operation != "uninstall" or \
7138 node in scheduled_uninstalls)]
7140 # sys-apps/portage needs special treatment if ROOT="/"
7141 running_root = self._running_root.root
7142 from portage.const import PORTAGE_PACKAGE_ATOM
7143 runtime_deps = InternalPackageSet(
7144 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7145 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7146 PORTAGE_PACKAGE_ATOM)
7147 replacement_portage = self.mydbapi[running_root].match_pkgs(
7148 PORTAGE_PACKAGE_ATOM)
7151 running_portage = running_portage[0]
7153 running_portage = None
7155 if replacement_portage:
7156 replacement_portage = replacement_portage[0]
7158 replacement_portage = None
7160 if replacement_portage == running_portage:
7161 replacement_portage = None
7163 if replacement_portage is not None:
7164 # update from running_portage to replacement_portage asap
7165 asap_nodes.append(replacement_portage)
7167 if running_portage is not None:
7169 portage_rdepend = self._select_atoms_highest_available(
7170 running_root, running_portage.metadata["RDEPEND"],
7171 myuse=running_portage.use.enabled,
7172 parent=running_portage, strict=False)
7173 except portage.exception.InvalidDependString, e:
7174 portage.writemsg("!!! Invalid RDEPEND in " + \
7175 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7176 (running_root, running_portage.cpv, e), noiselevel=-1)
7178 portage_rdepend = []
7179 runtime_deps.update(atom for atom in portage_rdepend \
7180 if not atom.startswith("!"))
7182 def gather_deps(ignore_priority, mergeable_nodes,
7183 selected_nodes, node):
7185 Recursively gather a group of nodes that RDEPEND on
7186 eachother. This ensures that they are merged as a group
7187 and get their RDEPENDs satisfied as soon as possible.
7189 if node in selected_nodes:
7191 if node not in mergeable_nodes:
7193 if node == replacement_portage and \
7194 mygraph.child_nodes(node,
7195 ignore_priority=priority_range.ignore_medium_soft):
7196 # Make sure that portage always has all of it's
7197 # RDEPENDs installed first.
7199 selected_nodes.add(node)
7200 for child in mygraph.child_nodes(node,
7201 ignore_priority=ignore_priority):
7202 if not gather_deps(ignore_priority,
7203 mergeable_nodes, selected_nodes, child):
7207 def ignore_uninst_or_med(priority):
7208 if priority is BlockerDepPriority.instance:
7210 return priority_range.ignore_medium(priority)
7212 def ignore_uninst_or_med_soft(priority):
7213 if priority is BlockerDepPriority.instance:
7215 return priority_range.ignore_medium_soft(priority)
7217 tree_mode = "--tree" in self.myopts
7218 # Tracks whether or not the current iteration should prefer asap_nodes
7219 # if available. This is set to False when the previous iteration
7220 # failed to select any nodes. It is reset whenever nodes are
7221 # successfully selected.
7224 # Controls whether or not the current iteration should drop edges that
7225 # are "satisfied" by installed packages, in order to solve circular
7226 # dependencies. The deep runtime dependencies of installed packages are
7227 # not checked in this case (bug #199856), so it must be avoided
7228 # whenever possible.
7229 drop_satisfied = False
7231 # State of variables for successive iterations that loosen the
7232 # criteria for node selection.
7234 # iteration prefer_asap drop_satisfied
7239 # If no nodes are selected on the last iteration, it is due to
7240 # unresolved blockers or circular dependencies.
7242 while not mygraph.empty():
7243 self.spinner.update()
7244 selected_nodes = None
7245 ignore_priority = None
7246 if drop_satisfied or (prefer_asap and asap_nodes):
7247 priority_range = DepPrioritySatisfiedRange
7249 priority_range = DepPriorityNormalRange
7250 if prefer_asap and asap_nodes:
7251 # ASAP nodes are merged before their soft deps. Go ahead and
7252 # select root nodes here if necessary, since it's typical for
7253 # the parent to have been removed from the graph already.
7254 asap_nodes = [node for node in asap_nodes \
7255 if mygraph.contains(node)]
7256 for node in asap_nodes:
7257 if not mygraph.child_nodes(node,
7258 ignore_priority=priority_range.ignore_soft):
7259 selected_nodes = [node]
7260 asap_nodes.remove(node)
7262 if not selected_nodes and \
7263 not (prefer_asap and asap_nodes):
7264 for i in xrange(priority_range.NONE,
7265 priority_range.MEDIUM_SOFT + 1):
7266 ignore_priority = priority_range.ignore_priority[i]
7267 nodes = get_nodes(ignore_priority=ignore_priority)
7269 # If there is a mix of uninstall nodes with other
7270 # types, save the uninstall nodes for later since
7271 # sometimes a merge node will render an uninstall
7272 # node unnecessary (due to occupying the same slot),
7273 # and we want to avoid executing a separate uninstall
7274 # task in that case.
7276 good_uninstalls = []
7277 with_some_uninstalls_excluded = []
7279 if node.operation == "uninstall":
7280 slot_node = self.mydbapi[node.root
7281 ].match_pkgs(node.slot_atom)
7283 slot_node[0].operation == "merge":
7285 good_uninstalls.append(node)
7286 with_some_uninstalls_excluded.append(node)
7288 nodes = good_uninstalls
7289 elif with_some_uninstalls_excluded:
7290 nodes = with_some_uninstalls_excluded
7294 if ignore_priority is None and not tree_mode:
7295 # Greedily pop all of these nodes since no
7296 # relationship has been ignored. This optimization
7297 # destroys --tree output, so it's disabled in tree
7299 selected_nodes = nodes
7301 # For optimal merge order:
7302 # * Only pop one node.
7303 # * Removing a root node (node without a parent)
7304 # will not produce a leaf node, so avoid it.
7305 # * It's normal for a selected uninstall to be a
7306 # root node, so don't check them for parents.
7308 if node.operation == "uninstall" or \
7309 mygraph.parent_nodes(node):
7310 selected_nodes = [node]
7316 if not selected_nodes:
7317 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7319 mergeable_nodes = set(nodes)
7320 if prefer_asap and asap_nodes:
7322 for i in xrange(priority_range.SOFT,
7323 priority_range.MEDIUM_SOFT + 1):
7324 ignore_priority = priority_range.ignore_priority[i]
7326 if not mygraph.parent_nodes(node):
7328 selected_nodes = set()
7329 if gather_deps(ignore_priority,
7330 mergeable_nodes, selected_nodes, node):
7333 selected_nodes = None
7337 if prefer_asap and asap_nodes and not selected_nodes:
7338 # We failed to find any asap nodes to merge, so ignore
7339 # them for the next iteration.
7343 if selected_nodes and ignore_priority is not None:
7344 # Try to merge ignored medium_soft deps as soon as possible
7345 # if they're not satisfied by installed packages.
7346 for node in selected_nodes:
7347 children = set(mygraph.child_nodes(node))
7348 soft = children.difference(
7349 mygraph.child_nodes(node,
7350 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7351 medium_soft = children.difference(
7352 mygraph.child_nodes(node,
7354 DepPrioritySatisfiedRange.ignore_medium_soft))
7355 medium_soft.difference_update(soft)
7356 for child in medium_soft:
7357 if child in selected_nodes:
7359 if child in asap_nodes:
7361 asap_nodes.append(child)
7363 if selected_nodes and len(selected_nodes) > 1:
7364 if not isinstance(selected_nodes, list):
7365 selected_nodes = list(selected_nodes)
7366 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7368 if not selected_nodes and not myblocker_uninstalls.is_empty():
7369 # An Uninstall task needs to be executed in order to
7370 # avoid conflict if possible.
7373 priority_range = DepPrioritySatisfiedRange
7375 priority_range = DepPriorityNormalRange
7377 mergeable_nodes = get_nodes(
7378 ignore_priority=ignore_uninst_or_med)
7380 min_parent_deps = None
7382 for task in myblocker_uninstalls.leaf_nodes():
7383 # Do some sanity checks so that system or world packages
7384 # don't get uninstalled inappropriately here (only really
7385 # necessary when --complete-graph has not been enabled).
7387 if task in ignored_uninstall_tasks:
7390 if task in scheduled_uninstalls:
7391 # It's been scheduled but it hasn't
7392 # been executed yet due to dependence
7393 # on installation of blocking packages.
7396 root_config = self.roots[task.root]
7397 inst_pkg = self._pkg_cache[
7398 ("installed", task.root, task.cpv, "nomerge")]
7400 if self.digraph.contains(inst_pkg):
7403 forbid_overlap = False
7404 heuristic_overlap = False
7405 for blocker in myblocker_uninstalls.parent_nodes(task):
7406 if blocker.eapi in ("0", "1"):
7407 heuristic_overlap = True
7408 elif blocker.atom.blocker.overlap.forbid:
7409 forbid_overlap = True
7411 if forbid_overlap and running_root == task.root:
7414 if heuristic_overlap and running_root == task.root:
7415 # Never uninstall sys-apps/portage or it's essential
7416 # dependencies, except through replacement.
7418 runtime_dep_atoms = \
7419 list(runtime_deps.iterAtomsForPackage(task))
7420 except portage.exception.InvalidDependString, e:
7421 portage.writemsg("!!! Invalid PROVIDE in " + \
7422 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7423 (task.root, task.cpv, e), noiselevel=-1)
7427 # Don't uninstall a runtime dep if it appears
7428 # to be the only suitable one installed.
7430 vardb = root_config.trees["vartree"].dbapi
7431 for atom in runtime_dep_atoms:
7432 other_version = None
7433 for pkg in vardb.match_pkgs(atom):
7434 if pkg.cpv == task.cpv and \
7435 pkg.metadata["COUNTER"] == \
7436 task.metadata["COUNTER"]:
7440 if other_version is None:
7446 # For packages in the system set, don't take
7447 # any chances. If the conflict can't be resolved
7448 # by a normal replacement operation then abort.
7451 for atom in root_config.sets[
7452 "system"].iterAtomsForPackage(task):
7455 except portage.exception.InvalidDependString, e:
7456 portage.writemsg("!!! Invalid PROVIDE in " + \
7457 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7458 (task.root, task.cpv, e), noiselevel=-1)
7464 # Note that the world check isn't always
7465 # necessary since self._complete_graph() will
7466 # add all packages from the system and world sets to the
7467 # graph. This just allows unresolved conflicts to be
7468 # detected as early as possible, which makes it possible
7469 # to avoid calling self._complete_graph() when it is
7470 # unnecessary due to blockers triggering an abortion.
7472 # For packages in the world set, go ahead an uninstall
7473 # when necessary, as long as the atom will be satisfied
7474 # in the final state.
7475 graph_db = self.mydbapi[task.root]
7478 for atom in root_config.sets[
7479 "world"].iterAtomsForPackage(task):
7481 for pkg in graph_db.match_pkgs(atom):
7488 self._blocked_world_pkgs[inst_pkg] = atom
7490 except portage.exception.InvalidDependString, e:
7491 portage.writemsg("!!! Invalid PROVIDE in " + \
7492 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7493 (task.root, task.cpv, e), noiselevel=-1)
7499 # Check the deps of parent nodes to ensure that
7500 # the chosen task produces a leaf node. Maybe
7501 # this can be optimized some more to make the
7502 # best possible choice, but the current algorithm
7503 # is simple and should be near optimal for most
7505 mergeable_parent = False
7507 for parent in mygraph.parent_nodes(task):
7508 parent_deps.update(mygraph.child_nodes(parent,
7509 ignore_priority=priority_range.ignore_medium_soft))
7510 if parent in mergeable_nodes and \
7511 gather_deps(ignore_uninst_or_med_soft,
7512 mergeable_nodes, set(), parent):
7513 mergeable_parent = True
7515 if not mergeable_parent:
7518 parent_deps.remove(task)
7519 if min_parent_deps is None or \
7520 len(parent_deps) < min_parent_deps:
7521 min_parent_deps = len(parent_deps)
7524 if uninst_task is not None:
7525 # The uninstall is performed only after blocking
7526 # packages have been merged on top of it. File
7527 # collisions between blocking packages are detected
7528 # and removed from the list of files to be uninstalled.
7529 scheduled_uninstalls.add(uninst_task)
7530 parent_nodes = mygraph.parent_nodes(uninst_task)
7532 # Reverse the parent -> uninstall edges since we want
7533 # to do the uninstall after blocking packages have
7534 # been merged on top of it.
7535 mygraph.remove(uninst_task)
7536 for blocked_pkg in parent_nodes:
7537 mygraph.add(blocked_pkg, uninst_task,
7538 priority=BlockerDepPriority.instance)
7539 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7540 scheduler_graph.add(blocked_pkg, uninst_task,
7541 priority=BlockerDepPriority.instance)
7543 # Reset the state variables for leaf node selection and
7544 # continue trying to select leaf nodes.
7546 drop_satisfied = False
7549 if not selected_nodes:
7550 # Only select root nodes as a last resort. This case should
7551 # only trigger when the graph is nearly empty and the only
7552 # remaining nodes are isolated (no parents or children). Since
7553 # the nodes must be isolated, ignore_priority is not needed.
7554 selected_nodes = get_nodes()
7556 if not selected_nodes and not drop_satisfied:
7557 drop_satisfied = True
7560 if not selected_nodes and not myblocker_uninstalls.is_empty():
7561 # If possible, drop an uninstall task here in order to avoid
7562 # the circular deps code path. The corresponding blocker will
7563 # still be counted as an unresolved conflict.
7565 for node in myblocker_uninstalls.leaf_nodes():
7567 mygraph.remove(node)
7572 ignored_uninstall_tasks.add(node)
7575 if uninst_task is not None:
7576 # Reset the state variables for leaf node selection and
7577 # continue trying to select leaf nodes.
7579 drop_satisfied = False
7582 if not selected_nodes:
7583 self._circular_deps_for_display = mygraph
7584 raise self._unknown_internal_error()
7586 # At this point, we've succeeded in selecting one or more nodes, so
7587 # reset state variables for leaf node selection.
7589 drop_satisfied = False
7591 mygraph.difference_update(selected_nodes)
7593 for node in selected_nodes:
7594 if isinstance(node, Package) and \
7595 node.operation == "nomerge":
7598 # Handle interactions between blockers
7599 # and uninstallation tasks.
7600 solved_blockers = set()
7602 if isinstance(node, Package) and \
7603 "uninstall" == node.operation:
7604 have_uninstall_task = True
7607 vardb = self.trees[node.root]["vartree"].dbapi
7608 previous_cpv = vardb.match(node.slot_atom)
7610 # The package will be replaced by this one, so remove
7611 # the corresponding Uninstall task if necessary.
7612 previous_cpv = previous_cpv[0]
7614 ("installed", node.root, previous_cpv, "uninstall")
7616 mygraph.remove(uninst_task)
7620 if uninst_task is not None and \
7621 uninst_task not in ignored_uninstall_tasks and \
7622 myblocker_uninstalls.contains(uninst_task):
7623 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7624 myblocker_uninstalls.remove(uninst_task)
7625 # Discard any blockers that this Uninstall solves.
7626 for blocker in blocker_nodes:
7627 if not myblocker_uninstalls.child_nodes(blocker):
7628 myblocker_uninstalls.remove(blocker)
7629 solved_blockers.add(blocker)
7631 retlist.append(node)
7633 if (isinstance(node, Package) and \
7634 "uninstall" == node.operation) or \
7635 (uninst_task is not None and \
7636 uninst_task in scheduled_uninstalls):
7637 # Include satisfied blockers in the merge list
7638 # since the user might be interested and also
7639 # it serves as an indicator that blocking packages
7640 # will be temporarily installed simultaneously.
7641 for blocker in solved_blockers:
7642 retlist.append(Blocker(atom=blocker.atom,
7643 root=blocker.root, eapi=blocker.eapi,
7646 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7647 for node in myblocker_uninstalls.root_nodes():
7648 unsolvable_blockers.add(node)
7650 for blocker in unsolvable_blockers:
7651 retlist.append(blocker)
7653 # If any Uninstall tasks need to be executed in order
7654 # to avoid a conflict, complete the graph with any
7655 # dependencies that may have been initially
7656 # neglected (to ensure that unsafe Uninstall tasks
7657 # are properly identified and blocked from execution).
7658 if have_uninstall_task and \
7660 not unsolvable_blockers:
7661 self.myparams.add("complete")
7662 raise self._serialize_tasks_retry("")
7664 if unsolvable_blockers and \
7665 not self._accept_blocker_conflicts():
7666 self._unsatisfied_blockers_for_display = unsolvable_blockers
7667 self._serialized_tasks_cache = retlist[:]
7668 self._scheduler_graph = scheduler_graph
7669 raise self._unknown_internal_error()
7671 if self._slot_collision_info and \
7672 not self._accept_blocker_conflicts():
7673 self._serialized_tasks_cache = retlist[:]
7674 self._scheduler_graph = scheduler_graph
7675 raise self._unknown_internal_error()
7677 return retlist, scheduler_graph
7679 def _show_circular_deps(self, mygraph):
7680 # No leaf nodes are available, so we have a circular
7681 # dependency panic situation. Reduce the noise level to a
7682 # minimum via repeated elimination of root nodes since they
7683 # have no parents and thus can not be part of a cycle.
7685 root_nodes = mygraph.root_nodes(
7686 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7689 mygraph.difference_update(root_nodes)
7690 # Display the USE flags that are enabled on nodes that are part
7691 # of dependency cycles in case that helps the user decide to
7692 # disable some of them.
7694 tempgraph = mygraph.copy()
7695 while not tempgraph.empty():
7696 nodes = tempgraph.leaf_nodes()
7698 node = tempgraph.order[0]
7701 display_order.append(node)
7702 tempgraph.remove(node)
7703 display_order.reverse()
7704 self.myopts.pop("--quiet", None)
7705 self.myopts.pop("--verbose", None)
7706 self.myopts["--tree"] = True
7707 portage.writemsg("\n\n", noiselevel=-1)
7708 self.display(display_order)
7709 prefix = colorize("BAD", " * ")
7710 portage.writemsg("\n", noiselevel=-1)
7711 portage.writemsg(prefix + "Error: circular dependencies:\n",
7713 portage.writemsg("\n", noiselevel=-1)
7714 mygraph.debug_print()
7715 portage.writemsg("\n", noiselevel=-1)
7716 portage.writemsg(prefix + "Note that circular dependencies " + \
7717 "can often be avoided by temporarily\n", noiselevel=-1)
7718 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7719 "optional dependencies.\n", noiselevel=-1)
7721 def _show_merge_list(self):
7722 if self._serialized_tasks_cache is not None and \
7723 not (self._displayed_list and \
7724 (self._displayed_list == self._serialized_tasks_cache or \
7725 self._displayed_list == \
7726 list(reversed(self._serialized_tasks_cache)))):
7727 display_list = self._serialized_tasks_cache[:]
7728 if "--tree" in self.myopts:
7729 display_list.reverse()
7730 self.display(display_list)
7732 def _show_unsatisfied_blockers(self, blockers):
7733 self._show_merge_list()
7734 msg = "Error: The above package list contains " + \
7735 "packages which cannot be installed " + \
7736 "at the same time on the same system."
7737 prefix = colorize("BAD", " * ")
7738 from textwrap import wrap
7739 portage.writemsg("\n", noiselevel=-1)
7740 for line in wrap(msg, 70):
7741 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7743 # Display the conflicting packages along with the packages
7744 # that pulled them in. This is helpful for troubleshooting
7745 # cases in which blockers don't solve automatically and
7746 # the reasons are not apparent from the normal merge list
7750 for blocker in blockers:
7751 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7752 self._blocker_parents.parent_nodes(blocker)):
7753 parent_atoms = self._parent_atoms.get(pkg)
7754 if not parent_atoms:
7755 atom = self._blocked_world_pkgs.get(pkg)
7756 if atom is not None:
7757 parent_atoms = set([("@world", atom)])
7759 conflict_pkgs[pkg] = parent_atoms
7762 # Reduce noise by pruning packages that are only
7763 # pulled in by other conflict packages.
7765 for pkg, parent_atoms in conflict_pkgs.iteritems():
7766 relevant_parent = False
7767 for parent, atom in parent_atoms:
7768 if parent not in conflict_pkgs:
7769 relevant_parent = True
7771 if not relevant_parent:
7772 pruned_pkgs.add(pkg)
7773 for pkg in pruned_pkgs:
7774 del conflict_pkgs[pkg]
7780 # Max number of parents shown, to avoid flooding the display.
7782 for pkg, parent_atoms in conflict_pkgs.iteritems():
7786 # Prefer packages that are not directly involved in a conflict.
7787 for parent_atom in parent_atoms:
7788 if len(pruned_list) >= max_parents:
7790 parent, atom = parent_atom
7791 if parent not in conflict_pkgs:
7792 pruned_list.add(parent_atom)
7794 for parent_atom in parent_atoms:
7795 if len(pruned_list) >= max_parents:
7797 pruned_list.add(parent_atom)
7799 omitted_parents = len(parent_atoms) - len(pruned_list)
7800 msg.append(indent + "%s pulled in by\n" % pkg)
7802 for parent_atom in pruned_list:
7803 parent, atom = parent_atom
7804 msg.append(2*indent)
7805 if isinstance(parent,
7806 (PackageArg, AtomArg)):
7807 # For PackageArg and AtomArg types, it's
7808 # redundant to display the atom attribute.
7809 msg.append(str(parent))
7811 # Display the specific atom from SetArg or
7813 msg.append("%s required by %s" % (atom, parent))
7817 msg.append(2*indent)
7818 msg.append("(and %d more)\n" % omitted_parents)
7822 sys.stderr.write("".join(msg))
7825 if "--quiet" not in self.myopts:
7826 show_blocker_docs_link()
7828 def display(self, mylist, favorites=[], verbosity=None):
7830 # This is used to prevent display_problems() from
7831 # redundantly displaying this exact same merge list
7832 # again via _show_merge_list().
7833 self._displayed_list = mylist
7835 if verbosity is None:
7836 verbosity = ("--quiet" in self.myopts and 1 or \
7837 "--verbose" in self.myopts and 3 or 2)
7838 favorites_set = InternalPackageSet(favorites)
7839 oneshot = "--oneshot" in self.myopts or \
7840 "--onlydeps" in self.myopts
7841 columns = "--columns" in self.myopts
7846 counters = PackageCounters()
7848 if verbosity == 1 and "--verbose" not in self.myopts:
7849 def create_use_string(*args):
7852 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7854 is_new, reinst_flags,
7855 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7856 alphabetical=("--alphabetical" in self.myopts)):
7864 cur_iuse = set(cur_iuse)
7865 enabled_flags = cur_iuse.intersection(cur_use)
7866 removed_iuse = set(old_iuse).difference(cur_iuse)
7867 any_iuse = cur_iuse.union(old_iuse)
7868 any_iuse = list(any_iuse)
7870 for flag in any_iuse:
7873 reinst_flag = reinst_flags and flag in reinst_flags
7874 if flag in enabled_flags:
7876 if is_new or flag in old_use and \
7877 (all_flags or reinst_flag):
7878 flag_str = red(flag)
7879 elif flag not in old_iuse:
7880 flag_str = yellow(flag) + "%*"
7881 elif flag not in old_use:
7882 flag_str = green(flag) + "*"
7883 elif flag in removed_iuse:
7884 if all_flags or reinst_flag:
7885 flag_str = yellow("-" + flag) + "%"
7888 flag_str = "(" + flag_str + ")"
7889 removed.append(flag_str)
7892 if is_new or flag in old_iuse and \
7893 flag not in old_use and \
7894 (all_flags or reinst_flag):
7895 flag_str = blue("-" + flag)
7896 elif flag not in old_iuse:
7897 flag_str = yellow("-" + flag)
7898 if flag not in iuse_forced:
7900 elif flag in old_use:
7901 flag_str = green("-" + flag) + "*"
7903 if flag in iuse_forced:
7904 flag_str = "(" + flag_str + ")"
7906 enabled.append(flag_str)
7908 disabled.append(flag_str)
7911 ret = " ".join(enabled)
7913 ret = " ".join(enabled + disabled + removed)
7915 ret = '%s="%s" ' % (name, ret)
7918 repo_display = RepoDisplay(self.roots)
7922 mygraph = self.digraph.copy()
7924 # If there are any Uninstall instances, add the corresponding
7925 # blockers to the digraph (useful for --tree display).
7927 executed_uninstalls = set(node for node in mylist \
7928 if isinstance(node, Package) and node.operation == "unmerge")
7930 for uninstall in self._blocker_uninstalls.leaf_nodes():
7931 uninstall_parents = \
7932 self._blocker_uninstalls.parent_nodes(uninstall)
7933 if not uninstall_parents:
7936 # Remove the corresponding "nomerge" node and substitute
7937 # the Uninstall node.
7938 inst_pkg = self._pkg_cache[
7939 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7941 mygraph.remove(inst_pkg)
7946 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7948 inst_pkg_blockers = []
7950 # Break the Package -> Uninstall edges.
7951 mygraph.remove(uninstall)
7953 # Resolution of a package's blockers
7954 # depend on it's own uninstallation.
7955 for blocker in inst_pkg_blockers:
7956 mygraph.add(uninstall, blocker)
7958 # Expand Package -> Uninstall edges into
7959 # Package -> Blocker -> Uninstall edges.
7960 for blocker in uninstall_parents:
7961 mygraph.add(uninstall, blocker)
7962 for parent in self._blocker_parents.parent_nodes(blocker):
7963 if parent != inst_pkg:
7964 mygraph.add(blocker, parent)
7966 # If the uninstall task did not need to be executed because
7967 # of an upgrade, display Blocker -> Upgrade edges since the
7968 # corresponding Blocker -> Uninstall edges will not be shown.
7970 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7971 if upgrade_node is not None and \
7972 uninstall not in executed_uninstalls:
7973 for blocker in uninstall_parents:
7974 mygraph.add(upgrade_node, blocker)
7976 unsatisfied_blockers = []
7981 if isinstance(x, Blocker) and not x.satisfied:
7982 unsatisfied_blockers.append(x)
7985 if "--tree" in self.myopts:
7986 depth = len(tree_nodes)
7987 while depth and graph_key not in \
7988 mygraph.child_nodes(tree_nodes[depth-1]):
7991 tree_nodes = tree_nodes[:depth]
7992 tree_nodes.append(graph_key)
7993 display_list.append((x, depth, True))
7994 shown_edges.add((graph_key, tree_nodes[depth-1]))
7996 traversed_nodes = set() # prevent endless circles
7997 traversed_nodes.add(graph_key)
7998 def add_parents(current_node, ordered):
8000 # Do not traverse to parents if this node is an
8001 # an argument or a direct member of a set that has
8002 # been specified as an argument (system or world).
8003 if current_node not in self._set_nodes:
8004 parent_nodes = mygraph.parent_nodes(current_node)
8006 child_nodes = set(mygraph.child_nodes(current_node))
8007 selected_parent = None
8008 # First, try to avoid a direct cycle.
8009 for node in parent_nodes:
8010 if not isinstance(node, (Blocker, Package)):
8012 if node not in traversed_nodes and \
8013 node not in child_nodes:
8014 edge = (current_node, node)
8015 if edge in shown_edges:
8017 selected_parent = node
8019 if not selected_parent:
8020 # A direct cycle is unavoidable.
8021 for node in parent_nodes:
8022 if not isinstance(node, (Blocker, Package)):
8024 if node not in traversed_nodes:
8025 edge = (current_node, node)
8026 if edge in shown_edges:
8028 selected_parent = node
8031 shown_edges.add((current_node, selected_parent))
8032 traversed_nodes.add(selected_parent)
8033 add_parents(selected_parent, False)
8034 display_list.append((current_node,
8035 len(tree_nodes), ordered))
8036 tree_nodes.append(current_node)
8038 add_parents(graph_key, True)
8040 display_list.append((x, depth, True))
8041 mylist = display_list
8042 for x in unsatisfied_blockers:
8043 mylist.append((x, 0, True))
8045 last_merge_depth = 0
8046 for i in xrange(len(mylist)-1,-1,-1):
8047 graph_key, depth, ordered = mylist[i]
8048 if not ordered and depth == 0 and i > 0 \
8049 and graph_key == mylist[i-1][0] and \
8050 mylist[i-1][1] == 0:
8051 # An ordered node got a consecutive duplicate when the tree was
8055 if ordered and graph_key[-1] != "nomerge":
8056 last_merge_depth = depth
8058 if depth >= last_merge_depth or \
8059 i < len(mylist) - 1 and \
8060 depth >= mylist[i+1][1]:
8063 from portage import flatten
8064 from portage.dep import use_reduce, paren_reduce
8065 # files to fetch list - avoids counting a same file twice
8066 # in size display (verbose mode)
8069 # Use this set to detect when all the "repoadd" strings are "[0]"
8070 # and disable the entire repo display in this case.
8073 for mylist_index in xrange(len(mylist)):
8074 x, depth, ordered = mylist[mylist_index]
8078 portdb = self.trees[myroot]["porttree"].dbapi
8079 bindb = self.trees[myroot]["bintree"].dbapi
8080 vardb = self.trees[myroot]["vartree"].dbapi
8081 vartree = self.trees[myroot]["vartree"]
8082 pkgsettings = self.pkgsettings[myroot]
8085 indent = " " * depth
8087 if isinstance(x, Blocker):
8089 blocker_style = "PKG_BLOCKER_SATISFIED"
8090 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8092 blocker_style = "PKG_BLOCKER"
8093 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8095 counters.blocks += 1
8097 counters.blocks_satisfied += 1
8098 resolved = portage.key_expand(
8099 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8100 if "--columns" in self.myopts and "--quiet" in self.myopts:
8101 addl += " " + colorize(blocker_style, resolved)
8103 addl = "[%s %s] %s%s" % \
8104 (colorize(blocker_style, "blocks"),
8105 addl, indent, colorize(blocker_style, resolved))
8106 block_parents = self._blocker_parents.parent_nodes(x)
8107 block_parents = set([pnode[2] for pnode in block_parents])
8108 block_parents = ", ".join(block_parents)
8110 addl += colorize(blocker_style,
8111 " (\"%s\" is blocking %s)") % \
8112 (str(x.atom).lstrip("!"), block_parents)
8114 addl += colorize(blocker_style,
8115 " (is blocking %s)") % block_parents
8116 if isinstance(x, Blocker) and x.satisfied:
8121 blockers.append(addl)
8124 pkg_merge = ordered and pkg_status == "merge"
8125 if not pkg_merge and pkg_status == "merge":
8126 pkg_status = "nomerge"
8127 built = pkg_type != "ebuild"
8128 installed = pkg_type == "installed"
8130 metadata = pkg.metadata
8132 repo_name = metadata["repository"]
8133 if pkg_type == "ebuild":
8134 ebuild_path = portdb.findname(pkg_key)
8135 if not ebuild_path: # shouldn't happen
8136 raise portage.exception.PackageNotFound(pkg_key)
8137 repo_path_real = os.path.dirname(os.path.dirname(
8138 os.path.dirname(ebuild_path)))
8140 repo_path_real = portdb.getRepositoryPath(repo_name)
8141 pkg_use = list(pkg.use.enabled)
8143 restrict = flatten(use_reduce(paren_reduce(
8144 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8145 except portage.exception.InvalidDependString, e:
8146 if not pkg.installed:
8147 show_invalid_depstring_notice(x,
8148 pkg.metadata["RESTRICT"], str(e))
8152 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8153 "fetch" in restrict:
8156 counters.restrict_fetch += 1
8157 if portdb.fetch_check(pkg_key, pkg_use):
8160 counters.restrict_fetch_satisfied += 1
8162 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8163 #param is used for -u, where you still *do* want to see when something is being upgraded.
8166 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8167 if vardb.cpv_exists(pkg_key):
8168 addl=" "+yellow("R")+fetch+" "
8171 counters.reinst += 1
8172 elif pkg_status == "uninstall":
8173 counters.uninst += 1
8174 # filter out old-style virtual matches
8175 elif installed_versions and \
8176 portage.cpv_getkey(installed_versions[0]) == \
8177 portage.cpv_getkey(pkg_key):
8178 myinslotlist = vardb.match(pkg.slot_atom)
8179 # If this is the first install of a new-style virtual, we
8180 # need to filter out old-style virtual matches.
8181 if myinslotlist and \
8182 portage.cpv_getkey(myinslotlist[0]) != \
8183 portage.cpv_getkey(pkg_key):
8186 myoldbest = myinslotlist[:]
8188 if not portage.dep.cpvequal(pkg_key,
8189 portage.best([pkg_key] + myoldbest)):
8191 addl += turquoise("U")+blue("D")
8193 counters.downgrades += 1
8196 addl += turquoise("U") + " "
8198 counters.upgrades += 1
8200 # New slot, mark it new.
8201 addl = " " + green("NS") + fetch + " "
8202 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8204 counters.newslot += 1
8206 if "--changelog" in self.myopts:
8207 inst_matches = vardb.match(pkg.slot_atom)
8209 changelogs.extend(self.calc_changelog(
8210 portdb.findname(pkg_key),
8211 inst_matches[0], pkg_key))
8213 addl = " " + green("N") + " " + fetch + " "
8222 forced_flags = set()
8223 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8224 forced_flags.update(pkgsettings.useforce)
8225 forced_flags.update(pkgsettings.usemask)
8227 cur_use = [flag for flag in pkg.use.enabled \
8228 if flag in pkg.iuse.all]
8229 cur_iuse = sorted(pkg.iuse.all)
8231 if myoldbest and myinslotlist:
8232 previous_cpv = myoldbest[0]
8234 previous_cpv = pkg.cpv
8235 if vardb.cpv_exists(previous_cpv):
8236 old_iuse, old_use = vardb.aux_get(
8237 previous_cpv, ["IUSE", "USE"])
8238 old_iuse = list(set(
8239 filter_iuse_defaults(old_iuse.split())))
8241 old_use = old_use.split()
8248 old_use = [flag for flag in old_use if flag in old_iuse]
8250 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8252 use_expand.reverse()
8253 use_expand_hidden = \
8254 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8256 def map_to_use_expand(myvals, forcedFlags=False,
8260 for exp in use_expand:
8263 for val in myvals[:]:
8264 if val.startswith(exp.lower()+"_"):
8265 if val in forced_flags:
8266 forced[exp].add(val[len(exp)+1:])
8267 ret[exp].append(val[len(exp)+1:])
8270 forced["USE"] = [val for val in myvals \
8271 if val in forced_flags]
8273 for exp in use_expand_hidden:
8279 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8280 # are the only thing that triggered reinstallation.
8281 reinst_flags_map = {}
8282 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8283 reinst_expand_map = None
8284 if reinstall_for_flags:
8285 reinst_flags_map = map_to_use_expand(
8286 list(reinstall_for_flags), removeHidden=False)
8287 for k in list(reinst_flags_map):
8288 if not reinst_flags_map[k]:
8289 del reinst_flags_map[k]
8290 if not reinst_flags_map.get("USE"):
8291 reinst_expand_map = reinst_flags_map.copy()
8292 reinst_expand_map.pop("USE", None)
8293 if reinst_expand_map and \
8294 not set(reinst_expand_map).difference(
8296 use_expand_hidden = \
8297 set(use_expand_hidden).difference(
8300 cur_iuse_map, iuse_forced = \
8301 map_to_use_expand(cur_iuse, forcedFlags=True)
8302 cur_use_map = map_to_use_expand(cur_use)
8303 old_iuse_map = map_to_use_expand(old_iuse)
8304 old_use_map = map_to_use_expand(old_use)
8307 use_expand.insert(0, "USE")
8309 for key in use_expand:
8310 if key in use_expand_hidden:
8312 verboseadd += create_use_string(key.upper(),
8313 cur_iuse_map[key], iuse_forced[key],
8314 cur_use_map[key], old_iuse_map[key],
8315 old_use_map[key], is_new,
8316 reinst_flags_map.get(key))
8321 if pkg_type == "ebuild" and pkg_merge:
8323 myfilesdict = portdb.getfetchsizes(pkg_key,
8324 useflags=pkg_use, debug=self.edebug)
8325 except portage.exception.InvalidDependString, e:
8326 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8327 show_invalid_depstring_notice(x, src_uri, str(e))
8330 if myfilesdict is None:
8331 myfilesdict="[empty/missing/bad digest]"
8333 for myfetchfile in myfilesdict:
8334 if myfetchfile not in myfetchlist:
8335 mysize+=myfilesdict[myfetchfile]
8336 myfetchlist.append(myfetchfile)
8338 counters.totalsize += mysize
8339 verboseadd += format_size(mysize)
8342 # assign index for a previous version in the same slot
8343 has_previous = False
8344 repo_name_prev = None
8345 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8347 slot_matches = vardb.match(slot_atom)
8350 repo_name_prev = vardb.aux_get(slot_matches[0],
8353 # now use the data to generate output
8354 if pkg.installed or not has_previous:
8355 repoadd = repo_display.repoStr(repo_path_real)
8357 repo_path_prev = None
8359 repo_path_prev = portdb.getRepositoryPath(
8361 if repo_path_prev == repo_path_real:
8362 repoadd = repo_display.repoStr(repo_path_real)
8364 repoadd = "%s=>%s" % (
8365 repo_display.repoStr(repo_path_prev),
8366 repo_display.repoStr(repo_path_real))
8368 repoadd_set.add(repoadd)
8370 xs = [portage.cpv_getkey(pkg_key)] + \
8371 list(portage.catpkgsplit(pkg_key)[2:])
8378 if "COLUMNWIDTH" in self.settings:
8380 mywidth = int(self.settings["COLUMNWIDTH"])
8381 except ValueError, e:
8382 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8384 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8385 self.settings["COLUMNWIDTH"], noiselevel=-1)
8387 oldlp = mywidth - 30
8390 # Convert myoldbest from a list to a string.
8394 for pos, key in enumerate(myoldbest):
8395 key = portage.catpkgsplit(key)[2] + \
8396 "-" + portage.catpkgsplit(key)[3]
8397 if key[-3:] == "-r0":
8399 myoldbest[pos] = key
8400 myoldbest = blue("["+", ".join(myoldbest)+"]")
8403 root_config = self.roots[myroot]
8404 system_set = root_config.sets["system"]
8405 world_set = root_config.sets["world"]
8410 pkg_system = system_set.findAtomForPackage(pkg)
8411 pkg_world = world_set.findAtomForPackage(pkg)
8412 if not (oneshot or pkg_world) and \
8413 myroot == self.target_root and \
8414 favorites_set.findAtomForPackage(pkg):
8415 # Maybe it will be added to world now.
8416 if create_world_atom(pkg, favorites_set, root_config):
8418 except portage.exception.InvalidDependString:
8419 # This is reported elsewhere if relevant.
8422 def pkgprint(pkg_str):
8425 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8427 return colorize("PKG_MERGE_WORLD", pkg_str)
8429 return colorize("PKG_MERGE", pkg_str)
8430 elif pkg_status == "uninstall":
8431 return colorize("PKG_UNINSTALL", pkg_str)
8434 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8436 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8438 return colorize("PKG_NOMERGE", pkg_str)
8441 properties = flatten(use_reduce(paren_reduce(
8442 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8443 except portage.exception.InvalidDependString, e:
8444 if not pkg.installed:
8445 show_invalid_depstring_notice(pkg,
8446 pkg.metadata["PROPERTIES"], str(e))
8450 interactive = "interactive" in properties
8451 if interactive and pkg.operation == "merge":
8452 addl = colorize("WARN", "I") + addl[1:]
8454 counters.interactive += 1
8459 if "--columns" in self.myopts:
8460 if "--quiet" in self.myopts:
8461 myprint=addl+" "+indent+pkgprint(pkg_cp)
8462 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8463 myprint=myprint+myoldbest
8464 myprint=myprint+darkgreen("to "+x[1])
8468 myprint = "[%s] %s%s" % \
8469 (pkgprint(pkg_status.ljust(13)),
8470 indent, pkgprint(pkg.cp))
8472 myprint = "[%s %s] %s%s" % \
8473 (pkgprint(pkg.type_name), addl,
8474 indent, pkgprint(pkg.cp))
8475 if (newlp-nc_len(myprint)) > 0:
8476 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8477 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8478 if (oldlp-nc_len(myprint)) > 0:
8479 myprint=myprint+" "*(oldlp-nc_len(myprint))
8480 myprint=myprint+myoldbest
8481 myprint += darkgreen("to " + pkg.root)
8484 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8486 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8487 myprint += indent + pkgprint(pkg_key) + " " + \
8488 myoldbest + darkgreen("to " + myroot)
8490 if "--columns" in self.myopts:
8491 if "--quiet" in self.myopts:
8492 myprint=addl+" "+indent+pkgprint(pkg_cp)
8493 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8494 myprint=myprint+myoldbest
8498 myprint = "[%s] %s%s" % \
8499 (pkgprint(pkg_status.ljust(13)),
8500 indent, pkgprint(pkg.cp))
8502 myprint = "[%s %s] %s%s" % \
8503 (pkgprint(pkg.type_name), addl,
8504 indent, pkgprint(pkg.cp))
8505 if (newlp-nc_len(myprint)) > 0:
8506 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8507 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8508 if (oldlp-nc_len(myprint)) > 0:
8509 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8510 myprint += myoldbest
8513 myprint = "[%s] %s%s %s" % \
8514 (pkgprint(pkg_status.ljust(13)),
8515 indent, pkgprint(pkg.cpv),
8518 myprint = "[%s %s] %s%s %s" % \
8519 (pkgprint(pkg_type), addl, indent,
8520 pkgprint(pkg.cpv), myoldbest)
8522 if columns and pkg.operation == "uninstall":
8524 p.append((myprint, verboseadd, repoadd))
8526 if "--tree" not in self.myopts and \
8527 "--quiet" not in self.myopts and \
8528 not self._opts_no_restart.intersection(self.myopts) and \
8529 pkg.root == self._running_root.root and \
8530 portage.match_from_list(
8531 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8532 not vardb.cpv_exists(pkg.cpv) and \
8533 "--quiet" not in self.myopts:
8534 if mylist_index < len(mylist) - 1:
8535 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8536 p.append(colorize("WARN", " then resume the merge."))
8539 show_repos = repoadd_set and repoadd_set != set(["0"])
8542 if isinstance(x, basestring):
8543 out.write("%s\n" % (x,))
8546 myprint, verboseadd, repoadd = x
8549 myprint += " " + verboseadd
8551 if show_repos and repoadd:
8552 myprint += " " + teal("[%s]" % repoadd)
8554 out.write("%s\n" % (myprint,))
8563 sys.stdout.write(str(repo_display))
8565 if "--changelog" in self.myopts:
8567 for revision,text in changelogs:
8568 print bold('*'+revision)
8569 sys.stdout.write(text)
8574 def display_problems(self):
8576 Display problems with the dependency graph such as slot collisions.
8577 This is called internally by display() to show the problems _after_
8578 the merge list where it is most likely to be seen, but if display()
8579 is not going to be called then this method should be called explicitly
8580 to ensure that the user is notified of problems with the graph.
8582 All output goes to stderr, except for unsatisfied dependencies which
8583 go to stdout for parsing by programs such as autounmask.
8586 # Note that show_masked_packages() sends it's output to
8587 # stdout, and some programs such as autounmask parse the
8588 # output in cases when emerge bails out. However, when
8589 # show_masked_packages() is called for installed packages
8590 # here, the message is a warning that is more appropriate
8591 # to send to stderr, so temporarily redirect stdout to
8592 # stderr. TODO: Fix output code so there's a cleaner way
8593 # to redirect everything to stderr.
8598 sys.stdout = sys.stderr
8599 self._display_problems()
8605 # This goes to stdout for parsing by programs like autounmask.
8606 for pargs, kwargs in self._unsatisfied_deps_for_display:
8607 self._show_unsatisfied_dep(*pargs, **kwargs)
8609 def _display_problems(self):
8610 if self._circular_deps_for_display is not None:
8611 self._show_circular_deps(
8612 self._circular_deps_for_display)
8614 # The user is only notified of a slot conflict if
8615 # there are no unresolvable blocker conflicts.
8616 if self._unsatisfied_blockers_for_display is not None:
8617 self._show_unsatisfied_blockers(
8618 self._unsatisfied_blockers_for_display)
8620 self._show_slot_collision_notice()
8622 # TODO: Add generic support for "set problem" handlers so that
8623 # the below warnings aren't special cases for world only.
8625 if self._missing_args:
8626 world_problems = False
8627 if "world" in self._sets:
8628 # Filter out indirect members of world (from nested sets)
8629 # since only direct members of world are desired here.
8630 world_set = self.roots[self.target_root].sets["world"]
8631 for arg, atom in self._missing_args:
8632 if arg.name == "world" and atom in world_set:
8633 world_problems = True
8637 sys.stderr.write("\n!!! Problems have been " + \
8638 "detected with your world file\n")
8639 sys.stderr.write("!!! Please run " + \
8640 green("emaint --check world")+"\n\n")
8642 if self._missing_args:
8643 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8644 " Ebuilds for the following packages are either all\n")
8645 sys.stderr.write(colorize("BAD", "!!!") + \
8646 " masked or don't exist:\n")
8647 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8648 self._missing_args) + "\n")
8650 if self._pprovided_args:
8652 for arg, atom in self._pprovided_args:
8653 if isinstance(arg, SetArg):
8655 arg_atom = (atom, atom)
8658 arg_atom = (arg.arg, atom)
8659 refs = arg_refs.setdefault(arg_atom, [])
8660 if parent not in refs:
8663 msg.append(bad("\nWARNING: "))
8664 if len(self._pprovided_args) > 1:
8665 msg.append("Requested packages will not be " + \
8666 "merged because they are listed in\n")
8668 msg.append("A requested package will not be " + \
8669 "merged because it is listed in\n")
8670 msg.append("package.provided:\n\n")
8671 problems_sets = set()
8672 for (arg, atom), refs in arg_refs.iteritems():
8675 problems_sets.update(refs)
8677 ref_string = ", ".join(["'%s'" % name for name in refs])
8678 ref_string = " pulled in by " + ref_string
8679 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8681 if "world" in problems_sets:
8682 msg.append("This problem can be solved in one of the following ways:\n\n")
8683 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8684 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8685 msg.append(" C) Remove offending entries from package.provided.\n\n")
8686 msg.append("The best course of action depends on the reason that an offending\n")
8687 msg.append("package.provided entry exists.\n\n")
8688 sys.stderr.write("".join(msg))
8690 masked_packages = []
8691 for pkg in self._masked_installed:
8692 root_config = pkg.root_config
8693 pkgsettings = self.pkgsettings[pkg.root]
8694 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8695 masked_packages.append((root_config, pkgsettings,
8696 pkg.cpv, pkg.metadata, mreasons))
8698 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8699 " The following installed packages are masked:\n")
8700 show_masked_packages(masked_packages)
8704 def calc_changelog(self,ebuildpath,current,next):
8705 if ebuildpath == None or not os.path.exists(ebuildpath):
8707 current = '-'.join(portage.catpkgsplit(current)[1:])
8708 if current.endswith('-r0'):
8709 current = current[:-3]
8710 next = '-'.join(portage.catpkgsplit(next)[1:])
8711 if next.endswith('-r0'):
8713 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8715 changelog = open(changelogpath).read()
8716 except SystemExit, e:
8717 raise # Needed else can't exit
8720 divisions = self.find_changelog_tags(changelog)
8721 #print 'XX from',current,'to',next
8722 #for div,text in divisions: print 'XX',div
8723 # skip entries for all revisions above the one we are about to emerge
8724 for i in range(len(divisions)):
8725 if divisions[i][0]==next:
8726 divisions = divisions[i:]
8728 # find out how many entries we are going to display
8729 for i in range(len(divisions)):
8730 if divisions[i][0]==current:
8731 divisions = divisions[:i]
8734 # couldnt find the current revision in the list. display nothing
8738 def find_changelog_tags(self,changelog):
8742 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8744 if release is not None:
8745 divs.append((release,changelog))
8747 if release is not None:
8748 divs.append((release,changelog[:match.start()]))
8749 changelog = changelog[match.end():]
8750 release = match.group(1)
8751 if release.endswith('.ebuild'):
8752 release = release[:-7]
8753 if release.endswith('-r0'):
8754 release = release[:-3]
8756 def saveNomergeFavorites(self):
8757 """Find atoms in favorites that are not in the mergelist and add them
8758 to the world file if necessary."""
8759 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8760 "--oneshot", "--onlydeps", "--pretend"):
8761 if x in self.myopts:
8763 root_config = self.roots[self.target_root]
8764 world_set = root_config.sets["world"]
8766 world_locked = False
8767 if hasattr(world_set, "lock"):
8771 if hasattr(world_set, "load"):
8772 world_set.load() # maybe it's changed on disk
8774 args_set = self._sets["args"]
8775 portdb = self.trees[self.target_root]["porttree"].dbapi
8776 added_favorites = set()
8777 for x in self._set_nodes:
8778 pkg_type, root, pkg_key, pkg_status = x
8779 if pkg_status != "nomerge":
8783 myfavkey = create_world_atom(x, args_set, root_config)
8785 if myfavkey in added_favorites:
8787 added_favorites.add(myfavkey)
8788 except portage.exception.InvalidDependString, e:
8789 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8790 (pkg_key, str(e)), noiselevel=-1)
8791 writemsg("!!! see '%s'\n\n" % os.path.join(
8792 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8795 for k in self._sets:
8796 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8801 all_added.append(SETPREFIX + k)
8802 all_added.extend(added_favorites)
8805 print ">>> Recording %s in \"world\" favorites file..." % \
8806 colorize("INFORM", str(a))
8808 world_set.update(all_added)
8813 def loadResumeCommand(self, resume_data, skip_masked=True,
8816 Add a resume command to the graph and validate it in the process. This
8817 will raise a PackageNotFound exception if a package is not available.
8820 if not isinstance(resume_data, dict):
8823 mergelist = resume_data.get("mergelist")
8824 if not isinstance(mergelist, list):
8827 fakedb = self.mydbapi
8829 serialized_tasks = []
8832 if not (isinstance(x, list) and len(x) == 4):
8834 pkg_type, myroot, pkg_key, action = x
8835 if pkg_type not in self.pkg_tree_map:
8837 if action != "merge":
8839 tree_type = self.pkg_tree_map[pkg_type]
8840 mydb = trees[myroot][tree_type].dbapi
8841 db_keys = list(self._trees_orig[myroot][
8842 tree_type].dbapi._aux_cache_keys)
8844 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8846 # It does no exist or it is corrupt.
8847 if action == "uninstall":
8850 # TODO: log these somewhere
8852 raise portage.exception.PackageNotFound(pkg_key)
8853 installed = action == "uninstall"
8854 built = pkg_type != "ebuild"
8855 root_config = self.roots[myroot]
8856 pkg = Package(built=built, cpv=pkg_key,
8857 installed=installed, metadata=metadata,
8858 operation=action, root_config=root_config,
8860 if pkg_type == "ebuild":
8861 pkgsettings = self.pkgsettings[myroot]
8862 pkgsettings.setcpv(pkg)
8863 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8864 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8865 self._pkg_cache[pkg] = pkg
8867 root_config = self.roots[pkg.root]
8868 if "merge" == pkg.operation and \
8869 not visible(root_config.settings, pkg):
8871 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8873 self._unsatisfied_deps_for_display.append(
8874 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8876 fakedb[myroot].cpv_inject(pkg)
8877 serialized_tasks.append(pkg)
8878 self.spinner.update()
8880 if self._unsatisfied_deps_for_display:
8883 if not serialized_tasks or "--nodeps" in self.myopts:
8884 self._serialized_tasks_cache = serialized_tasks
8885 self._scheduler_graph = self.digraph
8887 self._select_package = self._select_pkg_from_graph
8888 self.myparams.add("selective")
8889 # Always traverse deep dependencies in order to account for
8890 # potentially unsatisfied dependencies of installed packages.
8891 # This is necessary for correct --keep-going or --resume operation
8892 # in case a package from a group of circularly dependent packages
8893 # fails. In this case, a package which has recently been installed
8894 # may have an unsatisfied circular dependency (pulled in by
8895 # PDEPEND, for example). So, even though a package is already
8896 # installed, it may not have all of it's dependencies satisfied, so
8897 # it may not be usable. If such a package is in the subgraph of
8898 # deep depenedencies of a scheduled build, that build needs to
8899 # be cancelled. In order for this type of situation to be
8900 # recognized, deep traversal of dependencies is required.
8901 self.myparams.add("deep")
8903 favorites = resume_data.get("favorites")
8904 args_set = self._sets["args"]
8905 if isinstance(favorites, list):
8906 args = self._load_favorites(favorites)
8910 for task in serialized_tasks:
8911 if isinstance(task, Package) and \
8912 task.operation == "merge":
8913 if not self._add_pkg(task, None):
8916 # Packages for argument atoms need to be explicitly
8917 # added via _add_pkg() so that they are included in the
8918 # digraph (needed at least for --tree display).
8920 for atom in arg.set:
8921 pkg, existing_node = self._select_package(
8922 arg.root_config.root, atom)
8923 if existing_node is None and \
8925 if not self._add_pkg(pkg, Dependency(atom=atom,
8926 root=pkg.root, parent=arg)):
8929 # Allow unsatisfied deps here to avoid showing a masking
8930 # message for an unsatisfied dep that isn't necessarily
8932 if not self._create_graph(allow_unsatisfied=True):
8935 unsatisfied_deps = []
8936 for dep in self._unsatisfied_deps:
8937 if not isinstance(dep.parent, Package):
8939 if dep.parent.operation == "merge":
8940 unsatisfied_deps.append(dep)
8943 # For unsatisfied deps of installed packages, only account for
8944 # them if they are in the subgraph of dependencies of a package
8945 # which is scheduled to be installed.
8946 unsatisfied_install = False
8948 dep_stack = self.digraph.parent_nodes(dep.parent)
8950 node = dep_stack.pop()
8951 if not isinstance(node, Package):
8953 if node.operation == "merge":
8954 unsatisfied_install = True
8956 if node in traversed:
8959 dep_stack.extend(self.digraph.parent_nodes(node))
8961 if unsatisfied_install:
8962 unsatisfied_deps.append(dep)
8964 if masked_tasks or unsatisfied_deps:
8965 # This probably means that a required package
8966 # was dropped via --skipfirst. It makes the
8967 # resume list invalid, so convert it to a
8968 # UnsatisfiedResumeDep exception.
8969 raise self.UnsatisfiedResumeDep(self,
8970 masked_tasks + unsatisfied_deps)
8971 self._serialized_tasks_cache = None
8974 except self._unknown_internal_error:
8979 def _load_favorites(self, favorites):
8981 Use a list of favorites to resume state from a
8982 previous select_files() call. This creates similar
8983 DependencyArg instances to those that would have
8984 been created by the original select_files() call.
8985 This allows Package instances to be matched with
8986 DependencyArg instances during graph creation.
8988 root_config = self.roots[self.target_root]
8989 getSetAtoms = root_config.setconfig.getSetAtoms
8990 sets = root_config.sets
8993 if not isinstance(x, basestring):
8995 if x in ("system", "world"):
8997 if x.startswith(SETPREFIX):
8998 s = x[len(SETPREFIX):]
9003 # Recursively expand sets so that containment tests in
9004 # self._get_parent_sets() properly match atoms in nested
9005 # sets (like if world contains system).
9006 expanded_set = InternalPackageSet(
9007 initial_atoms=getSetAtoms(s))
9008 self._sets[s] = expanded_set
9009 args.append(SetArg(arg=x, set=expanded_set,
9010 root_config=root_config))
9012 if not portage.isvalidatom(x):
9014 args.append(AtomArg(arg=x, atom=x,
9015 root_config=root_config))
9017 self._set_args(args)
9020 class UnsatisfiedResumeDep(portage.exception.PortageException):
9022 A dependency of a resume list is not installed. This
9023 can occur when a required package is dropped from the
9024 merge list via --skipfirst.
9026 def __init__(self, depgraph, value):
9027 portage.exception.PortageException.__init__(self, value)
9028 self.depgraph = depgraph
9030 class _internal_exception(portage.exception.PortageException):
9031 def __init__(self, value=""):
9032 portage.exception.PortageException.__init__(self, value)
9034 class _unknown_internal_error(_internal_exception):
9036 Used by the depgraph internally to terminate graph creation.
9037 The specific reason for the failure should have been dumped
9038 to stderr, unfortunately, the exact reason for the failure
9042 class _serialize_tasks_retry(_internal_exception):
9044 This is raised by the _serialize_tasks() method when it needs to
9045 be called again for some reason. The only case that it's currently
9046 used for is when neglected dependencies need to be added to the
9047 graph in order to avoid making a potentially unsafe decision.
9050 class _dep_check_composite_db(portage.dbapi):
9052 A dbapi-like interface that is optimized for use in dep_check() calls.
9053 This is built on top of the existing depgraph package selection logic.
9054 Some packages that have been added to the graph may be masked from this
9055 view in order to influence the atom preference selection that occurs
9058 def __init__(self, depgraph, root):
9059 portage.dbapi.__init__(self)
9060 self._depgraph = depgraph
9062 self._match_cache = {}
9063 self._cpv_pkg_map = {}
9065 def _clear_cache(self):
9066 self._match_cache.clear()
9067 self._cpv_pkg_map.clear()
9069 def match(self, atom):
9070 ret = self._match_cache.get(atom)
9075 atom = self._dep_expand(atom)
9076 pkg, existing = self._depgraph._select_package(self._root, atom)
9080 # Return the highest available from select_package() as well as
9081 # any matching slots in the graph db.
9083 slots.add(pkg.metadata["SLOT"])
9084 atom_cp = portage.dep_getkey(atom)
9085 if pkg.cp.startswith("virtual/"):
9086 # For new-style virtual lookahead that occurs inside
9087 # dep_check(), examine all slots. This is needed
9088 # so that newer slots will not unnecessarily be pulled in
9089 # when a satisfying lower slot is already installed. For
9090 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9091 # there's no need to pull in a newer slot to satisfy a
9092 # virtual/jdk dependency.
9093 for db, pkg_type, built, installed, db_keys in \
9094 self._depgraph._filtered_trees[self._root]["dbs"]:
9095 for cpv in db.match(atom):
9096 if portage.cpv_getkey(cpv) != pkg.cp:
9098 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9100 if self._visible(pkg):
9101 self._cpv_pkg_map[pkg.cpv] = pkg
9103 slots.remove(pkg.metadata["SLOT"])
9105 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9106 pkg, existing = self._depgraph._select_package(
9107 self._root, slot_atom)
9110 if not self._visible(pkg):
9112 self._cpv_pkg_map[pkg.cpv] = pkg
9115 self._cpv_sort_ascending(ret)
9116 self._match_cache[orig_atom] = ret
9119 def _visible(self, pkg):
9120 if pkg.installed and "selective" not in self._depgraph.myparams:
9122 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9123 except (StopIteration, portage.exception.InvalidDependString):
9130 self._depgraph.pkgsettings[pkg.root], pkg):
9132 except portage.exception.InvalidDependString:
9134 in_graph = self._depgraph._slot_pkg_map[
9135 self._root].get(pkg.slot_atom)
9136 if in_graph is None:
9137 # Mask choices for packages which are not the highest visible
9138 # version within their slot (since they usually trigger slot
9140 highest_visible, in_graph = self._depgraph._select_package(
9141 self._root, pkg.slot_atom)
9142 if pkg != highest_visible:
9144 elif in_graph != pkg:
9145 # Mask choices for packages that would trigger a slot
9146 # conflict with a previously selected package.
9150 def _dep_expand(self, atom):
9152 This is only needed for old installed packages that may
9153 contain atoms that are not fully qualified with a specific
9154 category. Emulate the cpv_expand() function that's used by
9155 dbapi.match() in cases like this. If there are multiple
9156 matches, it's often due to a new-style virtual that has
9157 been added, so try to filter those out to avoid raising
9160 root_config = self._depgraph.roots[self._root]
9162 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9163 if len(expanded_atoms) > 1:
9164 non_virtual_atoms = []
9165 for x in expanded_atoms:
9166 if not portage.dep_getkey(x).startswith("virtual/"):
9167 non_virtual_atoms.append(x)
9168 if len(non_virtual_atoms) == 1:
9169 expanded_atoms = non_virtual_atoms
9170 if len(expanded_atoms) > 1:
9171 # compatible with portage.cpv_expand()
9172 raise portage.exception.AmbiguousPackageName(
9173 [portage.dep_getkey(x) for x in expanded_atoms])
9175 atom = expanded_atoms[0]
9177 null_atom = insert_category_into_atom(atom, "null")
9178 null_cp = portage.dep_getkey(null_atom)
9179 cat, atom_pn = portage.catsplit(null_cp)
9180 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9182 # Allow the resolver to choose which virtual.
9183 atom = insert_category_into_atom(atom, "virtual")
9185 atom = insert_category_into_atom(atom, "null")
9188 def aux_get(self, cpv, wants):
9189 metadata = self._cpv_pkg_map[cpv].metadata
9190 return [metadata.get(x, "") for x in wants]
9192 class RepoDisplay(object):
9193 def __init__(self, roots):
9194 self._shown_repos = {}
9195 self._unknown_repo = False
9197 for root_config in roots.itervalues():
9198 portdir = root_config.settings.get("PORTDIR")
9200 repo_paths.add(portdir)
9201 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9203 repo_paths.update(overlays.split())
9204 repo_paths = list(repo_paths)
9205 self._repo_paths = repo_paths
9206 self._repo_paths_real = [ os.path.realpath(repo_path) \
9207 for repo_path in repo_paths ]
9209 # pre-allocate index for PORTDIR so that it always has index 0.
9210 for root_config in roots.itervalues():
9211 portdb = root_config.trees["porttree"].dbapi
9212 portdir = portdb.porttree_root
9214 self.repoStr(portdir)
9216 def repoStr(self, repo_path_real):
9219 real_index = self._repo_paths_real.index(repo_path_real)
9220 if real_index == -1:
9222 self._unknown_repo = True
9224 shown_repos = self._shown_repos
9225 repo_paths = self._repo_paths
9226 repo_path = repo_paths[real_index]
9227 index = shown_repos.get(repo_path)
9229 index = len(shown_repos)
9230 shown_repos[repo_path] = index
9236 shown_repos = self._shown_repos
9237 unknown_repo = self._unknown_repo
9238 if shown_repos or self._unknown_repo:
9239 output.append("Portage tree and overlays:\n")
9240 show_repo_paths = list(shown_repos)
9241 for repo_path, repo_index in shown_repos.iteritems():
9242 show_repo_paths[repo_index] = repo_path
9244 for index, repo_path in enumerate(show_repo_paths):
9245 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9247 output.append(" "+teal("[?]") + \
9248 " indicates that the source repository could not be determined\n")
9249 return "".join(output)
9251 class PackageCounters(object):
9261 self.blocks_satisfied = 0
9263 self.restrict_fetch = 0
9264 self.restrict_fetch_satisfied = 0
9265 self.interactive = 0
9268 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9271 myoutput.append("Total: %s package" % total_installs)
9272 if total_installs != 1:
9273 myoutput.append("s")
9274 if total_installs != 0:
9275 myoutput.append(" (")
9276 if self.upgrades > 0:
9277 details.append("%s upgrade" % self.upgrades)
9278 if self.upgrades > 1:
9280 if self.downgrades > 0:
9281 details.append("%s downgrade" % self.downgrades)
9282 if self.downgrades > 1:
9285 details.append("%s new" % self.new)
9286 if self.newslot > 0:
9287 details.append("%s in new slot" % self.newslot)
9288 if self.newslot > 1:
9291 details.append("%s reinstall" % self.reinst)
9295 details.append("%s uninstall" % self.uninst)
9298 if self.interactive > 0:
9299 details.append("%s %s" % (self.interactive,
9300 colorize("WARN", "interactive")))
9301 myoutput.append(", ".join(details))
9302 if total_installs != 0:
9303 myoutput.append(")")
9304 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9305 if self.restrict_fetch:
9306 myoutput.append("\nFetch Restriction: %s package" % \
9307 self.restrict_fetch)
9308 if self.restrict_fetch > 1:
9309 myoutput.append("s")
9310 if self.restrict_fetch_satisfied < self.restrict_fetch:
9311 myoutput.append(bad(" (%s unsatisfied)") % \
9312 (self.restrict_fetch - self.restrict_fetch_satisfied))
9314 myoutput.append("\nConflict: %s block" % \
9317 myoutput.append("s")
9318 if self.blocks_satisfied < self.blocks:
9319 myoutput.append(bad(" (%s unsatisfied)") % \
9320 (self.blocks - self.blocks_satisfied))
9321 return "".join(myoutput)
9323 class UseFlagDisplay(object):
9325 __slots__ = ('name', 'enabled', 'forced')
9327 def __init__(self, name, enabled, forced):
9329 self.enabled = enabled
9330 self.forced = forced
9343 def _cmp_combined(a, b):
9345 Sort by name, combining enabled and disabled flags.
9347 return (a.name > b.name) - (a.name < b.name)
9349 sort_combined = cmp_sort_key(_cmp_combined)
9352 def _cmp_separated(a, b):
9354 Sort by name, separating enabled flags from disabled flags.
9356 enabled_diff = b.enabled - a.enabled
9359 return (a.name > b.name) - (a.name < b.name)
9361 sort_separated = cmp_sort_key(_cmp_separated)
9364 class PollSelectAdapter(PollConstants):
9367 Use select to emulate a poll object, for
9368 systems that don't support poll().
9372 self._registered = {}
9373 self._select_args = [[], [], []]
9375 def register(self, fd, *args):
9377 Only POLLIN is currently supported!
9381 "register expected at most 2 arguments, got " + \
9382 repr(1 + len(args)))
9384 eventmask = PollConstants.POLLIN | \
9385 PollConstants.POLLPRI | PollConstants.POLLOUT
9389 self._registered[fd] = eventmask
9390 self._select_args = None
9392 def unregister(self, fd):
9393 self._select_args = None
9394 del self._registered[fd]
9396 def poll(self, *args):
9399 "poll expected at most 2 arguments, got " + \
9400 repr(1 + len(args)))
9406 select_args = self._select_args
9407 if select_args is None:
9408 select_args = [self._registered.keys(), [], []]
9410 if timeout is not None:
9411 select_args = select_args[:]
9412 # Translate poll() timeout args to select() timeout args:
9414 # | units | value(s) for indefinite block
9415 # ---------|--------------|------------------------------
9416 # poll | milliseconds | omitted, negative, or None
9417 # ---------|--------------|------------------------------
9418 # select | seconds | omitted
9419 # ---------|--------------|------------------------------
9421 if timeout is not None and timeout < 0:
9423 if timeout is not None:
9424 select_args.append(timeout / 1000)
9426 select_events = select.select(*select_args)
9428 for fd in select_events[0]:
9429 poll_events.append((fd, PollConstants.POLLIN))
9432 class SequentialTaskQueue(SlotObject):
9434 __slots__ = ("max_jobs", "running_tasks") + \
9435 ("_dirty", "_scheduling", "_task_queue")
9437 def __init__(self, **kwargs):
9438 SlotObject.__init__(self, **kwargs)
9439 self._task_queue = deque()
9440 self.running_tasks = set()
9441 if self.max_jobs is None:
9445 def add(self, task):
9446 self._task_queue.append(task)
9449 def addFront(self, task):
9450 self._task_queue.appendleft(task)
9461 if self._scheduling:
9462 # Ignore any recursive schedule() calls triggered via
9463 # self._task_exit().
9466 self._scheduling = True
9468 task_queue = self._task_queue
9469 running_tasks = self.running_tasks
9470 max_jobs = self.max_jobs
9471 state_changed = False
9473 while task_queue and \
9474 (max_jobs is True or len(running_tasks) < max_jobs):
9475 task = task_queue.popleft()
9476 cancelled = getattr(task, "cancelled", None)
9478 running_tasks.add(task)
9479 task.addExitListener(self._task_exit)
9481 state_changed = True
9484 self._scheduling = False
9486 return state_changed
9488 def _task_exit(self, task):
9490 Since we can always rely on exit listeners being called, the set of
9491 running tasks is always pruned automatically and there is never any need
9492 to actively prune it.
9494 self.running_tasks.remove(task)
9495 if self._task_queue:
9499 self._task_queue.clear()
9500 running_tasks = self.running_tasks
9501 while running_tasks:
9502 task = running_tasks.pop()
9503 task.removeExitListener(self._task_exit)
9507 def __nonzero__(self):
9508 return bool(self._task_queue or self.running_tasks)
9511 return len(self._task_queue) + len(self.running_tasks)
9513 _can_poll_device = None
9515 def can_poll_device():
9517 Test if it's possible to use poll() on a device such as a pty. This
9518 is known to fail on Darwin.
9520 @returns: True if poll() on a device succeeds, False otherwise.
9523 global _can_poll_device
9524 if _can_poll_device is not None:
9525 return _can_poll_device
9527 if not hasattr(select, "poll"):
9528 _can_poll_device = False
9529 return _can_poll_device
9532 dev_null = open('/dev/null', 'rb')
9534 _can_poll_device = False
9535 return _can_poll_device
9538 p.register(dev_null.fileno(), PollConstants.POLLIN)
9540 invalid_request = False
9541 for f, event in p.poll():
9542 if event & PollConstants.POLLNVAL:
9543 invalid_request = True
9547 _can_poll_device = not invalid_request
9548 return _can_poll_device
9550 def create_poll_instance():
9552 Create an instance of select.poll, or an instance of
9553 PollSelectAdapter there is no poll() implementation or
9554 it is broken somehow.
9556 if can_poll_device():
9557 return select.poll()
9558 return PollSelectAdapter()
9560 getloadavg = getattr(os, "getloadavg", None)
9561 if getloadavg is None:
9564 Uses /proc/loadavg to emulate os.getloadavg().
9565 Raises OSError if the load average was unobtainable.
9568 loadavg_str = open('/proc/loadavg').readline()
9570 # getloadavg() is only supposed to raise OSError, so convert
9571 raise OSError('unknown')
9572 loadavg_split = loadavg_str.split()
9573 if len(loadavg_split) < 3:
9574 raise OSError('unknown')
9578 loadavg_floats.append(float(loadavg_split[i]))
9580 raise OSError('unknown')
9581 return tuple(loadavg_floats)
9583 class PollScheduler(object):
9585 class _sched_iface_class(SlotObject):
9586 __slots__ = ("register", "schedule", "unregister")
9590 self._max_load = None
9592 self._poll_event_queue = []
9593 self._poll_event_handlers = {}
9594 self._poll_event_handler_ids = {}
9595 # Increment id for each new handler.
9596 self._event_handler_id = 0
9597 self._poll_obj = create_poll_instance()
9598 self._scheduling = False
9600 def _schedule(self):
9602 Calls _schedule_tasks() and automatically returns early from
9603 any recursive calls to this method that the _schedule_tasks()
9604 call might trigger. This makes _schedule() safe to call from
9605 inside exit listeners.
9607 if self._scheduling:
9609 self._scheduling = True
9611 return self._schedule_tasks()
9613 self._scheduling = False
9615 def _running_job_count(self):
9618 def _can_add_job(self):
9619 max_jobs = self._max_jobs
9620 max_load = self._max_load
9622 if self._max_jobs is not True and \
9623 self._running_job_count() >= self._max_jobs:
9626 if max_load is not None and \
9627 (max_jobs is True or max_jobs > 1) and \
9628 self._running_job_count() >= 1:
9630 avg1, avg5, avg15 = getloadavg()
9634 if avg1 >= max_load:
9639 def _poll(self, timeout=None):
9641 All poll() calls pass through here. The poll events
9642 are added directly to self._poll_event_queue.
9643 In order to avoid endless blocking, this raises
9644 StopIteration if timeout is None and there are
9645 no file descriptors to poll.
9647 if not self._poll_event_handlers:
9649 if timeout is None and \
9650 not self._poll_event_handlers:
9651 raise StopIteration(
9652 "timeout is None and there are no poll() event handlers")
9654 # The following error is known to occur with Linux kernel versions
9657 # select.error: (4, 'Interrupted system call')
9659 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9660 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9661 # without any events.
9664 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9666 except select.error, e:
9667 writemsg_level("\n!!! select error: %s\n" % (e,),
9668 level=logging.ERROR, noiselevel=-1)
9670 if timeout is not None:
9673 def _next_poll_event(self, timeout=None):
9675 Since the _schedule_wait() loop is called by event
9676 handlers from _poll_loop(), maintain a central event
9677 queue for both of them to share events from a single
9678 poll() call. In order to avoid endless blocking, this
9679 raises StopIteration if timeout is None and there are
9680 no file descriptors to poll.
9682 if not self._poll_event_queue:
9684 return self._poll_event_queue.pop()
9686 def _poll_loop(self):
9688 event_handlers = self._poll_event_handlers
9689 event_handled = False
9692 while event_handlers:
9693 f, event = self._next_poll_event()
9694 handler, reg_id = event_handlers[f]
9696 event_handled = True
9697 except StopIteration:
9698 event_handled = True
9700 if not event_handled:
9701 raise AssertionError("tight loop")
9703 def _schedule_yield(self):
9705 Schedule for a short period of time chosen by the scheduler based
9706 on internal state. Synchronous tasks should call this periodically
9707 in order to allow the scheduler to service pending poll events. The
9708 scheduler will call poll() exactly once, without blocking, and any
9709 resulting poll events will be serviced.
9711 event_handlers = self._poll_event_handlers
9714 if not event_handlers:
9715 return bool(events_handled)
9717 if not self._poll_event_queue:
9721 while event_handlers and self._poll_event_queue:
9722 f, event = self._next_poll_event()
9723 handler, reg_id = event_handlers[f]
9726 except StopIteration:
9729 return bool(events_handled)
9731 def _register(self, f, eventmask, handler):
9734 @return: A unique registration id, for use in schedule() or
9737 if f in self._poll_event_handlers:
9738 raise AssertionError("fd %d is already registered" % f)
9739 self._event_handler_id += 1
9740 reg_id = self._event_handler_id
9741 self._poll_event_handler_ids[reg_id] = f
9742 self._poll_event_handlers[f] = (handler, reg_id)
9743 self._poll_obj.register(f, eventmask)
9746 def _unregister(self, reg_id):
9747 f = self._poll_event_handler_ids[reg_id]
9748 self._poll_obj.unregister(f)
9749 del self._poll_event_handlers[f]
9750 del self._poll_event_handler_ids[reg_id]
9752 def _schedule_wait(self, wait_ids):
9754 Schedule until wait_id is not longer registered
9757 @param wait_id: a task id to wait for
9759 event_handlers = self._poll_event_handlers
9760 handler_ids = self._poll_event_handler_ids
9761 event_handled = False
9763 if isinstance(wait_ids, int):
9764 wait_ids = frozenset([wait_ids])
9767 while wait_ids.intersection(handler_ids):
9768 f, event = self._next_poll_event()
9769 handler, reg_id = event_handlers[f]
9771 event_handled = True
9772 except StopIteration:
9773 event_handled = True
9775 return event_handled
9777 class QueueScheduler(PollScheduler):
9780 Add instances of SequentialTaskQueue and then call run(). The
9781 run() method returns when no tasks remain.
9784 def __init__(self, max_jobs=None, max_load=None):
9785 PollScheduler.__init__(self)
9787 if max_jobs is None:
9790 self._max_jobs = max_jobs
9791 self._max_load = max_load
9792 self.sched_iface = self._sched_iface_class(
9793 register=self._register,
9794 schedule=self._schedule_wait,
9795 unregister=self._unregister)
9798 self._schedule_listeners = []
9801 self._queues.append(q)
9803 def remove(self, q):
9804 self._queues.remove(q)
9808 while self._schedule():
9811 while self._running_job_count():
9814 def _schedule_tasks(self):
9817 @returns: True if there may be remaining tasks to schedule,
9820 while self._can_add_job():
9821 n = self._max_jobs - self._running_job_count()
9825 if not self._start_next_job(n):
9828 for q in self._queues:
9833 def _running_job_count(self):
9835 for q in self._queues:
9836 job_count += len(q.running_tasks)
9837 self._jobs = job_count
9840 def _start_next_job(self, n=1):
9842 for q in self._queues:
9843 initial_job_count = len(q.running_tasks)
9845 final_job_count = len(q.running_tasks)
9846 if final_job_count > initial_job_count:
9847 started_count += (final_job_count - initial_job_count)
9848 if started_count >= n:
9850 return started_count
9852 class TaskScheduler(object):
9855 A simple way to handle scheduling of AsynchrousTask instances. Simply
9856 add tasks and call run(). The run() method returns when no tasks remain.
9859 def __init__(self, max_jobs=None, max_load=None):
9860 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9861 self._scheduler = QueueScheduler(
9862 max_jobs=max_jobs, max_load=max_load)
9863 self.sched_iface = self._scheduler.sched_iface
9864 self.run = self._scheduler.run
9865 self._scheduler.add(self._queue)
9867 def add(self, task):
9868 self._queue.add(task)
9870 class JobStatusDisplay(object):
9872 _bound_properties = ("curval", "failed", "running")
9873 _jobs_column_width = 48
9875 # Don't update the display unless at least this much
9876 # time has passed, in units of seconds.
9877 _min_display_latency = 2
9879 _default_term_codes = {
9885 _termcap_name_map = {
9886 'carriage_return' : 'cr',
9891 def __init__(self, out=sys.stdout, quiet=False, xterm_titles=True):
9892 object.__setattr__(self, "out", out)
9893 object.__setattr__(self, "quiet", quiet)
9894 object.__setattr__(self, "xterm_titles", xterm_titles)
9895 object.__setattr__(self, "maxval", 0)
9896 object.__setattr__(self, "merges", 0)
9897 object.__setattr__(self, "_changed", False)
9898 object.__setattr__(self, "_displayed", False)
9899 object.__setattr__(self, "_last_display_time", 0)
9900 object.__setattr__(self, "width", 80)
9903 isatty = hasattr(out, "isatty") and out.isatty()
9904 object.__setattr__(self, "_isatty", isatty)
9905 if not isatty or not self._init_term():
9907 for k, capname in self._termcap_name_map.iteritems():
9908 term_codes[k] = self._default_term_codes[capname]
9909 object.__setattr__(self, "_term_codes", term_codes)
9910 encoding = sys.getdefaultencoding()
9911 for k, v in self._term_codes.items():
9912 if not isinstance(v, basestring):
9913 self._term_codes[k] = v.decode(encoding, 'replace')
9915 def _init_term(self):
9917 Initialize term control codes.
9919 @returns: True if term codes were successfully initialized,
9923 term_type = os.environ.get("TERM", "vt100")
9929 curses.setupterm(term_type, self.out.fileno())
9930 tigetstr = curses.tigetstr
9931 except curses.error:
9936 if tigetstr is None:
9940 for k, capname in self._termcap_name_map.iteritems():
9941 code = tigetstr(capname)
9943 code = self._default_term_codes[capname]
9944 term_codes[k] = code
9945 object.__setattr__(self, "_term_codes", term_codes)
9948 def _format_msg(self, msg):
9949 return ">>> %s" % msg
9953 self._term_codes['carriage_return'] + \
9954 self._term_codes['clr_eol'])
9956 self._displayed = False
9958 def _display(self, line):
9959 self.out.write(line)
9961 self._displayed = True
9963 def _update(self, msg):
9966 if not self._isatty:
9967 out.write(self._format_msg(msg) + self._term_codes['newline'])
9969 self._displayed = True
9975 self._display(self._format_msg(msg))
9977 def displayMessage(self, msg):
9979 was_displayed = self._displayed
9981 if self._isatty and self._displayed:
9984 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9986 self._displayed = False
9989 self._changed = True
9995 for name in self._bound_properties:
9996 object.__setattr__(self, name, 0)
9999 self.out.write(self._term_codes['newline'])
10001 self._displayed = False
10003 def __setattr__(self, name, value):
10004 old_value = getattr(self, name)
10005 if value == old_value:
10007 object.__setattr__(self, name, value)
10008 if name in self._bound_properties:
10009 self._property_change(name, old_value, value)
10011 def _property_change(self, name, old_value, new_value):
10012 self._changed = True
10015 def _load_avg_str(self):
10025 elif max_avg < 100:
10030 return ", ".join(("%%.%df" % digits ) % x for x in avg)
10034 Display status on stdout, but only if something has
10035 changed since the last call.
10041 current_time = time.time()
10042 time_delta = current_time - self._last_display_time
10043 if self._displayed and \
10045 if not self._isatty:
10047 if time_delta < self._min_display_latency:
10050 self._last_display_time = current_time
10051 self._changed = False
10052 self._display_status()
10054 def _display_status(self):
10055 # Don't use len(self._completed_tasks) here since that also
10056 # can include uninstall tasks.
10057 curval_str = str(self.curval)
10058 maxval_str = str(self.maxval)
10059 running_str = str(self.running)
10060 failed_str = str(self.failed)
10061 load_avg_str = self._load_avg_str()
10063 color_output = StringIO()
10064 plain_output = StringIO()
10065 style_file = portage.output.ConsoleStyleFile(color_output)
10066 style_file.write_listener = plain_output
10067 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10068 style_writer.style_listener = style_file.new_styles
10069 f = formatter.AbstractFormatter(style_writer)
10071 number_style = "INFORM"
10072 f.add_literal_data("Jobs: ")
10073 f.push_style(number_style)
10074 f.add_literal_data(curval_str)
10076 f.add_literal_data(" of ")
10077 f.push_style(number_style)
10078 f.add_literal_data(maxval_str)
10080 f.add_literal_data(" complete")
10083 f.add_literal_data(", ")
10084 f.push_style(number_style)
10085 f.add_literal_data(running_str)
10087 f.add_literal_data(" running")
10090 f.add_literal_data(", ")
10091 f.push_style(number_style)
10092 f.add_literal_data(failed_str)
10094 f.add_literal_data(" failed")
10096 padding = self._jobs_column_width - len(plain_output.getvalue())
10098 f.add_literal_data(padding * " ")
10100 f.add_literal_data("Load avg: ")
10101 f.add_literal_data(load_avg_str)
10103 # Truncate to fit width, to avoid making the terminal scroll if the
10104 # line overflows (happens when the load average is large).
10105 plain_output = plain_output.getvalue()
10106 if self._isatty and len(plain_output) > self.width:
10107 # Use plain_output here since it's easier to truncate
10108 # properly than the color output which contains console
10110 self._update(plain_output[:self.width])
10112 self._update(color_output.getvalue())
10114 if self.xterm_titles:
10115 xtermTitle(" ".join(plain_output.split()))
10117 class ProgressHandler(object):
10118 def __init__(self):
10121 self._last_update = 0
10122 self.min_latency = 0.2
10124 def onProgress(self, maxval, curval):
10125 self.maxval = maxval
10126 self.curval = curval
10127 cur_time = time.time()
10128 if cur_time - self._last_update >= self.min_latency:
10129 self._last_update = cur_time
10133 raise NotImplementedError(self)
10135 class Scheduler(PollScheduler):
10137 _opts_ignore_blockers = \
10138 frozenset(["--buildpkgonly",
10139 "--fetchonly", "--fetch-all-uri",
10140 "--nodeps", "--pretend"])
10142 _opts_no_background = \
10143 frozenset(["--pretend",
10144 "--fetchonly", "--fetch-all-uri"])
10146 _opts_no_restart = frozenset(["--buildpkgonly",
10147 "--fetchonly", "--fetch-all-uri", "--pretend"])
10149 _bad_resume_opts = set(["--ask", "--changelog",
10150 "--resume", "--skipfirst"])
10152 _fetch_log = "/var/log/emerge-fetch.log"
10154 class _iface_class(SlotObject):
10155 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10156 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10157 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10160 class _fetch_iface_class(SlotObject):
10161 __slots__ = ("log_file", "schedule")
10163 _task_queues_class = slot_dict_class(
10164 ("merge", "jobs", "fetch", "unpack"), prefix="")
10166 class _build_opts_class(SlotObject):
10167 __slots__ = ("buildpkg", "buildpkgonly",
10168 "fetch_all_uri", "fetchonly", "pretend")
10170 class _binpkg_opts_class(SlotObject):
10171 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10173 class _pkg_count_class(SlotObject):
10174 __slots__ = ("curval", "maxval")
10176 class _emerge_log_class(SlotObject):
10177 __slots__ = ("xterm_titles",)
10179 def log(self, *pargs, **kwargs):
10180 if not self.xterm_titles:
10181 # Avoid interference with the scheduler's status display.
10182 kwargs.pop("short_msg", None)
10183 emergelog(self.xterm_titles, *pargs, **kwargs)
10185 class _failed_pkg(SlotObject):
10186 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10188 class _ConfigPool(object):
10189 """Interface for a task to temporarily allocate a config
10190 instance from a pool. This allows a task to be constructed
10191 long before the config instance actually becomes needed, like
10192 when prefetchers are constructed for the whole merge list."""
10193 __slots__ = ("_root", "_allocate", "_deallocate")
10194 def __init__(self, root, allocate, deallocate):
10196 self._allocate = allocate
10197 self._deallocate = deallocate
10198 def allocate(self):
10199 return self._allocate(self._root)
10200 def deallocate(self, settings):
10201 self._deallocate(settings)
10203 class _unknown_internal_error(portage.exception.PortageException):
10205 Used internally to terminate scheduling. The specific reason for
10206 the failure should have been dumped to stderr.
10208 def __init__(self, value=""):
10209 portage.exception.PortageException.__init__(self, value)
10211 def __init__(self, settings, trees, mtimedb, myopts,
10212 spinner, mergelist, favorites, digraph):
10213 PollScheduler.__init__(self)
10214 self.settings = settings
10215 self.target_root = settings["ROOT"]
10217 self.myopts = myopts
10218 self._spinner = spinner
10219 self._mtimedb = mtimedb
10220 self._mergelist = mergelist
10221 self._favorites = favorites
10222 self._args_set = InternalPackageSet(favorites)
10223 self._build_opts = self._build_opts_class()
10224 for k in self._build_opts.__slots__:
10225 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10226 self._binpkg_opts = self._binpkg_opts_class()
10227 for k in self._binpkg_opts.__slots__:
10228 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10231 self._logger = self._emerge_log_class()
10232 self._task_queues = self._task_queues_class()
10233 for k in self._task_queues.allowed_keys:
10234 setattr(self._task_queues, k,
10235 SequentialTaskQueue())
10237 # Holds merges that will wait to be executed when no builds are
10238 # executing. This is useful for system packages since dependencies
10239 # on system packages are frequently unspecified.
10240 self._merge_wait_queue = []
10241 # Holds merges that have been transfered from the merge_wait_queue to
10242 # the actual merge queue. They are removed from this list upon
10243 # completion. Other packages can start building only when this list is
10245 self._merge_wait_scheduled = []
10247 # Holds system packages and their deep runtime dependencies. Before
10248 # being merged, these packages go to merge_wait_queue, to be merged
10249 # when no other packages are building.
10250 self._deep_system_deps = set()
10252 # Holds packages to merge which will satisfy currently unsatisfied
10253 # deep runtime dependencies of system packages. If this is not empty
10254 # then no parallel builds will be spawned until it is empty. This
10255 # minimizes the possibility that a build will fail due to the system
10256 # being in a fragile state. For example, see bug #259954.
10257 self._unsatisfied_system_deps = set()
10259 self._status_display = JobStatusDisplay(
10260 xterm_titles=('notitles' not in settings.features))
10261 self._max_load = myopts.get("--load-average")
10262 max_jobs = myopts.get("--jobs")
10263 if max_jobs is None:
10265 self._set_max_jobs(max_jobs)
10267 # The root where the currently running
10268 # portage instance is installed.
10269 self._running_root = trees["/"]["root_config"]
10271 if settings.get("PORTAGE_DEBUG", "") == "1":
10273 self.pkgsettings = {}
10274 self._config_pool = {}
10275 self._blocker_db = {}
10277 self._config_pool[root] = []
10278 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10280 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10281 schedule=self._schedule_fetch)
10282 self._sched_iface = self._iface_class(
10283 dblinkEbuildPhase=self._dblink_ebuild_phase,
10284 dblinkDisplayMerge=self._dblink_display_merge,
10285 dblinkElog=self._dblink_elog,
10286 dblinkEmergeLog=self._dblink_emerge_log,
10287 fetch=fetch_iface, register=self._register,
10288 schedule=self._schedule_wait,
10289 scheduleSetup=self._schedule_setup,
10290 scheduleUnpack=self._schedule_unpack,
10291 scheduleYield=self._schedule_yield,
10292 unregister=self._unregister)
10294 self._prefetchers = weakref.WeakValueDictionary()
10295 self._pkg_queue = []
10296 self._completed_tasks = set()
10298 self._failed_pkgs = []
10299 self._failed_pkgs_all = []
10300 self._failed_pkgs_die_msgs = []
10301 self._post_mod_echo_msgs = []
10302 self._parallel_fetch = False
10303 merge_count = len([x for x in mergelist \
10304 if isinstance(x, Package) and x.operation == "merge"])
10305 self._pkg_count = self._pkg_count_class(
10306 curval=0, maxval=merge_count)
10307 self._status_display.maxval = self._pkg_count.maxval
10309 # The load average takes some time to respond when new
10310 # jobs are added, so we need to limit the rate of adding
10312 self._job_delay_max = 10
10313 self._job_delay_factor = 1.0
10314 self._job_delay_exp = 1.5
10315 self._previous_job_start_time = None
10317 self._set_digraph(digraph)
10319 # This is used to memoize the _choose_pkg() result when
10320 # no packages can be chosen until one of the existing
10322 self._choose_pkg_return_early = False
10324 features = self.settings.features
10325 if "parallel-fetch" in features and \
10326 not ("--pretend" in self.myopts or \
10327 "--fetch-all-uri" in self.myopts or \
10328 "--fetchonly" in self.myopts):
10329 if "distlocks" not in features:
10330 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10331 portage.writemsg(red("!!!")+" parallel-fetching " + \
10332 "requires the distlocks feature enabled"+"\n",
10334 portage.writemsg(red("!!!")+" you have it disabled, " + \
10335 "thus parallel-fetching is being disabled"+"\n",
10337 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10338 elif len(mergelist) > 1:
10339 self._parallel_fetch = True
10341 if self._parallel_fetch:
10342 # clear out existing fetch log if it exists
10344 open(self._fetch_log, 'w')
10345 except EnvironmentError:
10348 self._running_portage = None
10349 portage_match = self._running_root.trees["vartree"].dbapi.match(
10350 portage.const.PORTAGE_PACKAGE_ATOM)
10352 cpv = portage_match.pop()
10353 self._running_portage = self._pkg(cpv, "installed",
10354 self._running_root, installed=True)
10356 def _poll(self, timeout=None):
10358 PollScheduler._poll(self, timeout=timeout)
10360 def _set_max_jobs(self, max_jobs):
10361 self._max_jobs = max_jobs
10362 self._task_queues.jobs.max_jobs = max_jobs
10364 def _background_mode(self):
10366 Check if background mode is enabled and adjust states as necessary.
10369 @returns: True if background mode is enabled, False otherwise.
10371 background = (self._max_jobs is True or \
10372 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10373 not bool(self._opts_no_background.intersection(self.myopts))
10376 interactive_tasks = self._get_interactive_tasks()
10377 if interactive_tasks:
10379 writemsg_level(">>> Sending package output to stdio due " + \
10380 "to interactive package(s):\n",
10381 level=logging.INFO, noiselevel=-1)
10383 for pkg in interactive_tasks:
10384 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10385 if pkg.root != "/":
10386 pkg_str += " for " + pkg.root
10387 msg.append(pkg_str)
10389 writemsg_level("".join("%s\n" % (l,) for l in msg),
10390 level=logging.INFO, noiselevel=-1)
10391 if self._max_jobs is True or self._max_jobs > 1:
10392 self._set_max_jobs(1)
10393 writemsg_level(">>> Setting --jobs=1 due " + \
10394 "to the above interactive package(s)\n",
10395 level=logging.INFO, noiselevel=-1)
10397 self._status_display.quiet = \
10398 not background or \
10399 ("--quiet" in self.myopts and \
10400 "--verbose" not in self.myopts)
10402 self._logger.xterm_titles = \
10403 "notitles" not in self.settings.features and \
10404 self._status_display.quiet
10408 def _get_interactive_tasks(self):
10409 from portage import flatten
10410 from portage.dep import use_reduce, paren_reduce
10411 interactive_tasks = []
10412 for task in self._mergelist:
10413 if not (isinstance(task, Package) and \
10414 task.operation == "merge"):
10417 properties = flatten(use_reduce(paren_reduce(
10418 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10419 except portage.exception.InvalidDependString, e:
10420 show_invalid_depstring_notice(task,
10421 task.metadata["PROPERTIES"], str(e))
10422 raise self._unknown_internal_error()
10423 if "interactive" in properties:
10424 interactive_tasks.append(task)
10425 return interactive_tasks
10427 def _set_digraph(self, digraph):
10428 if "--nodeps" in self.myopts or \
10429 (self._max_jobs is not True and self._max_jobs < 2):
10431 self._digraph = None
10434 self._digraph = digraph
10435 self._find_system_deps()
10436 self._prune_digraph()
10437 self._prevent_builddir_collisions()
10439 def _find_system_deps(self):
10441 Find system packages and their deep runtime dependencies. Before being
10442 merged, these packages go to merge_wait_queue, to be merged when no
10443 other packages are building.
10445 deep_system_deps = self._deep_system_deps
10446 deep_system_deps.clear()
10447 deep_system_deps.update(
10448 _find_deep_system_runtime_deps(self._digraph))
10449 deep_system_deps.difference_update([pkg for pkg in \
10450 deep_system_deps if pkg.operation != "merge"])
10452 def _prune_digraph(self):
10454 Prune any root nodes that are irrelevant.
10457 graph = self._digraph
10458 completed_tasks = self._completed_tasks
10459 removed_nodes = set()
10461 for node in graph.root_nodes():
10462 if not isinstance(node, Package) or \
10463 (node.installed and node.operation == "nomerge") or \
10465 node in completed_tasks:
10466 removed_nodes.add(node)
10468 graph.difference_update(removed_nodes)
10469 if not removed_nodes:
10471 removed_nodes.clear()
10473 def _prevent_builddir_collisions(self):
10475 When building stages, sometimes the same exact cpv needs to be merged
10476 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10477 in the builddir. Currently, normal file locks would be inappropriate
10478 for this purpose since emerge holds all of it's build dir locks from
10482 for pkg in self._mergelist:
10483 if not isinstance(pkg, Package):
10484 # a satisfied blocker
10488 if pkg.cpv not in cpv_map:
10489 cpv_map[pkg.cpv] = [pkg]
10491 for earlier_pkg in cpv_map[pkg.cpv]:
10492 self._digraph.add(earlier_pkg, pkg,
10493 priority=DepPriority(buildtime=True))
10494 cpv_map[pkg.cpv].append(pkg)
10496 class _pkg_failure(portage.exception.PortageException):
10498 An instance of this class is raised by unmerge() when
10499 an uninstallation fails.
10502 def __init__(self, *pargs):
10503 portage.exception.PortageException.__init__(self, pargs)
10505 self.status = pargs[0]
10507 def _schedule_fetch(self, fetcher):
10509 Schedule a fetcher on the fetch queue, in order to
10510 serialize access to the fetch log.
10512 self._task_queues.fetch.addFront(fetcher)
10514 def _schedule_setup(self, setup_phase):
10516 Schedule a setup phase on the merge queue, in order to
10517 serialize unsandboxed access to the live filesystem.
10519 self._task_queues.merge.addFront(setup_phase)
10522 def _schedule_unpack(self, unpack_phase):
10524 Schedule an unpack phase on the unpack queue, in order
10525 to serialize $DISTDIR access for live ebuilds.
10527 self._task_queues.unpack.add(unpack_phase)
10529 def _find_blockers(self, new_pkg):
10531 Returns a callable which should be called only when
10532 the vdb lock has been acquired.
10534 def get_blockers():
10535 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10536 return get_blockers
10538 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10539 if self._opts_ignore_blockers.intersection(self.myopts):
10542 # Call gc.collect() here to avoid heap overflow that
10543 # triggers 'Cannot allocate memory' errors (reported
10544 # with python-2.5).
10548 blocker_db = self._blocker_db[new_pkg.root]
10550 blocker_dblinks = []
10551 for blocking_pkg in blocker_db.findInstalledBlockers(
10552 new_pkg, acquire_lock=acquire_lock):
10553 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10555 if new_pkg.cpv == blocking_pkg.cpv:
10557 blocker_dblinks.append(portage.dblink(
10558 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10559 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10560 vartree=self.trees[blocking_pkg.root]["vartree"]))
10564 return blocker_dblinks
10566 def _dblink_pkg(self, pkg_dblink):
10567 cpv = pkg_dblink.mycpv
10568 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10569 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10570 installed = type_name == "installed"
10571 return self._pkg(cpv, type_name, root_config, installed=installed)
10573 def _append_to_log_path(self, log_path, msg):
10574 f = open(log_path, 'a')
10580 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10582 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10585 background = self._background
10587 if background and log_path is not None:
10588 log_file = open(log_path, 'a')
10593 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10595 if log_file is not None:
10598 def _dblink_emerge_log(self, msg):
10599 self._logger.log(msg)
10601 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10602 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10603 background = self._background
10605 if log_path is None:
10606 if not (background and level < logging.WARN):
10607 portage.util.writemsg_level(msg,
10608 level=level, noiselevel=noiselevel)
10611 portage.util.writemsg_level(msg,
10612 level=level, noiselevel=noiselevel)
10613 self._append_to_log_path(log_path, msg)
10615 def _dblink_ebuild_phase(self,
10616 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10618 Using this callback for merge phases allows the scheduler
10619 to run while these phases execute asynchronously, and allows
10620 the scheduler control output handling.
10623 scheduler = self._sched_iface
10624 settings = pkg_dblink.settings
10625 pkg = self._dblink_pkg(pkg_dblink)
10626 background = self._background
10627 log_path = settings.get("PORTAGE_LOG_FILE")
10629 ebuild_phase = EbuildPhase(background=background,
10630 pkg=pkg, phase=phase, scheduler=scheduler,
10631 settings=settings, tree=pkg_dblink.treetype)
10632 ebuild_phase.start()
10633 ebuild_phase.wait()
10635 return ebuild_phase.returncode
10637 def _generate_digests(self):
10639 Generate digests if necessary for --digests or FEATURES=digest.
10640 In order to avoid interference, this must done before parallel
10644 if '--fetchonly' in self.myopts:
10647 digest = '--digest' in self.myopts
10649 for pkgsettings in self.pkgsettings.itervalues():
10650 if 'digest' in pkgsettings.features:
10657 for x in self._mergelist:
10658 if not isinstance(x, Package) or \
10659 x.type_name != 'ebuild' or \
10660 x.operation != 'merge':
10662 pkgsettings = self.pkgsettings[x.root]
10663 if '--digest' not in self.myopts and \
10664 'digest' not in pkgsettings.features:
10666 portdb = x.root_config.trees['porttree'].dbapi
10667 ebuild_path = portdb.findname(x.cpv)
10668 if not ebuild_path:
10670 "!!! Could not locate ebuild for '%s'.\n" \
10671 % x.cpv, level=logging.ERROR, noiselevel=-1)
10673 pkgsettings['O'] = os.path.dirname(ebuild_path)
10674 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10676 "!!! Unable to generate manifest for '%s'.\n" \
10677 % x.cpv, level=logging.ERROR, noiselevel=-1)
10682 def _check_manifests(self):
10683 # Verify all the manifests now so that the user is notified of failure
10684 # as soon as possible.
10685 if "strict" not in self.settings.features or \
10686 "--fetchonly" in self.myopts or \
10687 "--fetch-all-uri" in self.myopts:
10690 shown_verifying_msg = False
10691 quiet_settings = {}
10692 for myroot, pkgsettings in self.pkgsettings.iteritems():
10693 quiet_config = portage.config(clone=pkgsettings)
10694 quiet_config["PORTAGE_QUIET"] = "1"
10695 quiet_config.backup_changes("PORTAGE_QUIET")
10696 quiet_settings[myroot] = quiet_config
10699 for x in self._mergelist:
10700 if not isinstance(x, Package) or \
10701 x.type_name != "ebuild":
10704 if not shown_verifying_msg:
10705 shown_verifying_msg = True
10706 self._status_msg("Verifying ebuild manifests")
10708 root_config = x.root_config
10709 portdb = root_config.trees["porttree"].dbapi
10710 quiet_config = quiet_settings[root_config.root]
10711 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10712 if not portage.digestcheck([], quiet_config, strict=True):
10717 def _add_prefetchers(self):
10719 if not self._parallel_fetch:
10722 if self._parallel_fetch:
10723 self._status_msg("Starting parallel fetch")
10725 prefetchers = self._prefetchers
10726 getbinpkg = "--getbinpkg" in self.myopts
10728 # In order to avoid "waiting for lock" messages
10729 # at the beginning, which annoy users, never
10730 # spawn a prefetcher for the first package.
10731 for pkg in self._mergelist[1:]:
10732 prefetcher = self._create_prefetcher(pkg)
10733 if prefetcher is not None:
10734 self._task_queues.fetch.add(prefetcher)
10735 prefetchers[pkg] = prefetcher
10737 def _create_prefetcher(self, pkg):
10739 @return: a prefetcher, or None if not applicable
10743 if not isinstance(pkg, Package):
10746 elif pkg.type_name == "ebuild":
10748 prefetcher = EbuildFetcher(background=True,
10749 config_pool=self._ConfigPool(pkg.root,
10750 self._allocate_config, self._deallocate_config),
10751 fetchonly=1, logfile=self._fetch_log,
10752 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10754 elif pkg.type_name == "binary" and \
10755 "--getbinpkg" in self.myopts and \
10756 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10758 prefetcher = BinpkgPrefetcher(background=True,
10759 pkg=pkg, scheduler=self._sched_iface)
10763 def _is_restart_scheduled(self):
10765 Check if the merge list contains a replacement
10766 for the current running instance, that will result
10767 in restart after merge.
10769 @returns: True if a restart is scheduled, False otherwise.
10771 if self._opts_no_restart.intersection(self.myopts):
10774 mergelist = self._mergelist
10776 for i, pkg in enumerate(mergelist):
10777 if self._is_restart_necessary(pkg) and \
10778 i != len(mergelist) - 1:
10783 def _is_restart_necessary(self, pkg):
10785 @return: True if merging the given package
10786 requires restart, False otherwise.
10789 # Figure out if we need a restart.
10790 if pkg.root == self._running_root.root and \
10791 portage.match_from_list(
10792 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10793 if self._running_portage:
10794 return pkg.cpv != self._running_portage.cpv
10798 def _restart_if_necessary(self, pkg):
10800 Use execv() to restart emerge. This happens
10801 if portage upgrades itself and there are
10802 remaining packages in the list.
10805 if self._opts_no_restart.intersection(self.myopts):
10808 if not self._is_restart_necessary(pkg):
10811 if pkg == self._mergelist[-1]:
10814 self._main_loop_cleanup()
10816 logger = self._logger
10817 pkg_count = self._pkg_count
10818 mtimedb = self._mtimedb
10819 bad_resume_opts = self._bad_resume_opts
10821 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10822 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10824 logger.log(" *** RESTARTING " + \
10825 "emerge via exec() after change of " + \
10826 "portage version.")
10828 mtimedb["resume"]["mergelist"].remove(list(pkg))
10830 portage.run_exitfuncs()
10831 mynewargv = [sys.argv[0], "--resume"]
10832 resume_opts = self.myopts.copy()
10833 # For automatic resume, we need to prevent
10834 # any of bad_resume_opts from leaking in
10835 # via EMERGE_DEFAULT_OPTS.
10836 resume_opts["--ignore-default-opts"] = True
10837 for myopt, myarg in resume_opts.iteritems():
10838 if myopt not in bad_resume_opts:
10840 mynewargv.append(myopt)
10842 mynewargv.append(myopt +"="+ str(myarg))
10843 # priority only needs to be adjusted on the first run
10844 os.environ["PORTAGE_NICENESS"] = "0"
10845 os.execv(mynewargv[0], mynewargv)
10849 if "--resume" in self.myopts:
10851 portage.writemsg_stdout(
10852 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10853 self._logger.log(" *** Resuming merge...")
10855 self._save_resume_list()
10858 self._background = self._background_mode()
10859 except self._unknown_internal_error:
10862 for root in self.trees:
10863 root_config = self.trees[root]["root_config"]
10865 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10866 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10867 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10868 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10869 if not tmpdir or not os.path.isdir(tmpdir):
10870 msg = "The directory specified in your " + \
10871 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10872 "does not exist. Please create this " + \
10873 "directory or correct your PORTAGE_TMPDIR setting."
10874 msg = textwrap.wrap(msg, 70)
10875 out = portage.output.EOutput()
10880 if self._background:
10881 root_config.settings.unlock()
10882 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10883 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10884 root_config.settings.lock()
10886 self.pkgsettings[root] = portage.config(
10887 clone=root_config.settings)
10889 rval = self._generate_digests()
10890 if rval != os.EX_OK:
10893 rval = self._check_manifests()
10894 if rval != os.EX_OK:
10897 keep_going = "--keep-going" in self.myopts
10898 fetchonly = self._build_opts.fetchonly
10899 mtimedb = self._mtimedb
10900 failed_pkgs = self._failed_pkgs
10903 rval = self._merge()
10904 if rval == os.EX_OK or fetchonly or not keep_going:
10906 if "resume" not in mtimedb:
10908 mergelist = self._mtimedb["resume"].get("mergelist")
10912 if not failed_pkgs:
10915 for failed_pkg in failed_pkgs:
10916 mergelist.remove(list(failed_pkg.pkg))
10918 self._failed_pkgs_all.extend(failed_pkgs)
10924 if not self._calc_resume_list():
10927 clear_caches(self.trees)
10928 if not self._mergelist:
10931 self._save_resume_list()
10932 self._pkg_count.curval = 0
10933 self._pkg_count.maxval = len([x for x in self._mergelist \
10934 if isinstance(x, Package) and x.operation == "merge"])
10935 self._status_display.maxval = self._pkg_count.maxval
10937 self._logger.log(" *** Finished. Cleaning up...")
10940 self._failed_pkgs_all.extend(failed_pkgs)
10943 background = self._background
10944 failure_log_shown = False
10945 if background and len(self._failed_pkgs_all) == 1:
10946 # If only one package failed then just show it's
10947 # whole log for easy viewing.
10948 failed_pkg = self._failed_pkgs_all[-1]
10949 build_dir = failed_pkg.build_dir
10952 log_paths = [failed_pkg.build_log]
10954 log_path = self._locate_failure_log(failed_pkg)
10955 if log_path is not None:
10957 log_file = open(log_path)
10961 if log_file is not None:
10963 for line in log_file:
10964 writemsg_level(line, noiselevel=-1)
10967 failure_log_shown = True
10969 # Dump mod_echo output now since it tends to flood the terminal.
10970 # This allows us to avoid having more important output, generated
10971 # later, from being swept away by the mod_echo output.
10972 mod_echo_output = _flush_elog_mod_echo()
10974 if background and not failure_log_shown and \
10975 self._failed_pkgs_all and \
10976 self._failed_pkgs_die_msgs and \
10977 not mod_echo_output:
10979 printer = portage.output.EOutput()
10980 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10982 if mysettings["ROOT"] != "/":
10983 root_msg = " merged to %s" % mysettings["ROOT"]
10985 printer.einfo("Error messages for package %s%s:" % \
10986 (colorize("INFORM", key), root_msg))
10988 for phase in portage.const.EBUILD_PHASES:
10989 if phase not in logentries:
10991 for msgtype, msgcontent in logentries[phase]:
10992 if isinstance(msgcontent, basestring):
10993 msgcontent = [msgcontent]
10994 for line in msgcontent:
10995 printer.eerror(line.strip("\n"))
10997 if self._post_mod_echo_msgs:
10998 for msg in self._post_mod_echo_msgs:
11001 if len(self._failed_pkgs_all) > 1 or \
11002 (self._failed_pkgs_all and "--keep-going" in self.myopts):
11003 if len(self._failed_pkgs_all) > 1:
11004 msg = "The following %d packages have " % \
11005 len(self._failed_pkgs_all) + \
11006 "failed to build or install:"
11008 msg = "The following package has " + \
11009 "failed to build or install:"
11010 prefix = bad(" * ")
11011 writemsg(prefix + "\n", noiselevel=-1)
11012 from textwrap import wrap
11013 for line in wrap(msg, 72):
11014 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
11015 writemsg(prefix + "\n", noiselevel=-1)
11016 for failed_pkg in self._failed_pkgs_all:
11017 writemsg("%s\t%s\n" % (prefix,
11018 colorize("INFORM", str(failed_pkg.pkg))),
11020 writemsg(prefix + "\n", noiselevel=-1)
11024 def _elog_listener(self, mysettings, key, logentries, fulltext):
11025 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
11027 self._failed_pkgs_die_msgs.append(
11028 (mysettings, key, errors))
11030 def _locate_failure_log(self, failed_pkg):
11032 build_dir = failed_pkg.build_dir
11035 log_paths = [failed_pkg.build_log]
11037 for log_path in log_paths:
11042 log_size = os.stat(log_path).st_size
11053 def _add_packages(self):
11054 pkg_queue = self._pkg_queue
11055 for pkg in self._mergelist:
11056 if isinstance(pkg, Package):
11057 pkg_queue.append(pkg)
11058 elif isinstance(pkg, Blocker):
11061 def _system_merge_started(self, merge):
11063 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11065 graph = self._digraph
11068 pkg = merge.merge.pkg
11070 # Skip this if $ROOT != / since it shouldn't matter if there
11071 # are unsatisfied system runtime deps in this case.
11072 if pkg.root != '/':
11075 completed_tasks = self._completed_tasks
11076 unsatisfied = self._unsatisfied_system_deps
11078 def ignore_non_runtime_or_satisfied(priority):
11080 Ignore non-runtime and satisfied runtime priorities.
11082 if isinstance(priority, DepPriority) and \
11083 not priority.satisfied and \
11084 (priority.runtime or priority.runtime_post):
11088 # When checking for unsatisfied runtime deps, only check
11089 # direct deps since indirect deps are checked when the
11090 # corresponding parent is merged.
11091 for child in graph.child_nodes(pkg,
11092 ignore_priority=ignore_non_runtime_or_satisfied):
11093 if not isinstance(child, Package) or \
11094 child.operation == 'uninstall':
11098 if child.operation == 'merge' and \
11099 child not in completed_tasks:
11100 unsatisfied.add(child)
11102 def _merge_wait_exit_handler(self, task):
11103 self._merge_wait_scheduled.remove(task)
11104 self._merge_exit(task)
11106 def _merge_exit(self, merge):
11107 self._do_merge_exit(merge)
11108 self._deallocate_config(merge.merge.settings)
11109 if merge.returncode == os.EX_OK and \
11110 not merge.merge.pkg.installed:
11111 self._status_display.curval += 1
11112 self._status_display.merges = len(self._task_queues.merge)
11115 def _do_merge_exit(self, merge):
11116 pkg = merge.merge.pkg
11117 if merge.returncode != os.EX_OK:
11118 settings = merge.merge.settings
11119 build_dir = settings.get("PORTAGE_BUILDDIR")
11120 build_log = settings.get("PORTAGE_LOG_FILE")
11122 self._failed_pkgs.append(self._failed_pkg(
11123 build_dir=build_dir, build_log=build_log,
11125 returncode=merge.returncode))
11126 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11128 self._status_display.failed = len(self._failed_pkgs)
11131 self._task_complete(pkg)
11132 pkg_to_replace = merge.merge.pkg_to_replace
11133 if pkg_to_replace is not None:
11134 # When a package is replaced, mark it's uninstall
11135 # task complete (if any).
11136 uninst_hash_key = \
11137 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11138 self._task_complete(uninst_hash_key)
11143 self._restart_if_necessary(pkg)
11145 # Call mtimedb.commit() after each merge so that
11146 # --resume still works after being interrupted
11147 # by reboot, sigkill or similar.
11148 mtimedb = self._mtimedb
11149 mtimedb["resume"]["mergelist"].remove(list(pkg))
11150 if not mtimedb["resume"]["mergelist"]:
11151 del mtimedb["resume"]
11154 def _build_exit(self, build):
11155 if build.returncode == os.EX_OK:
11157 merge = PackageMerge(merge=build)
11158 if not build.build_opts.buildpkgonly and \
11159 build.pkg in self._deep_system_deps:
11160 # Since dependencies on system packages are frequently
11161 # unspecified, merge them only when no builds are executing.
11162 self._merge_wait_queue.append(merge)
11163 merge.addStartListener(self._system_merge_started)
11165 merge.addExitListener(self._merge_exit)
11166 self._task_queues.merge.add(merge)
11167 self._status_display.merges = len(self._task_queues.merge)
11169 settings = build.settings
11170 build_dir = settings.get("PORTAGE_BUILDDIR")
11171 build_log = settings.get("PORTAGE_LOG_FILE")
11173 self._failed_pkgs.append(self._failed_pkg(
11174 build_dir=build_dir, build_log=build_log,
11176 returncode=build.returncode))
11177 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11179 self._status_display.failed = len(self._failed_pkgs)
11180 self._deallocate_config(build.settings)
11182 self._status_display.running = self._jobs
11185 def _extract_exit(self, build):
11186 self._build_exit(build)
11188 def _task_complete(self, pkg):
11189 self._completed_tasks.add(pkg)
11190 self._unsatisfied_system_deps.discard(pkg)
11191 self._choose_pkg_return_early = False
11195 self._add_prefetchers()
11196 self._add_packages()
11197 pkg_queue = self._pkg_queue
11198 failed_pkgs = self._failed_pkgs
11199 portage.locks._quiet = self._background
11200 portage.elog._emerge_elog_listener = self._elog_listener
11206 self._main_loop_cleanup()
11207 portage.locks._quiet = False
11208 portage.elog._emerge_elog_listener = None
11210 rval = failed_pkgs[-1].returncode
11214 def _main_loop_cleanup(self):
11215 del self._pkg_queue[:]
11216 self._completed_tasks.clear()
11217 self._deep_system_deps.clear()
11218 self._unsatisfied_system_deps.clear()
11219 self._choose_pkg_return_early = False
11220 self._status_display.reset()
11221 self._digraph = None
11222 self._task_queues.fetch.clear()
11224 def _choose_pkg(self):
11226 Choose a task that has all it's dependencies satisfied.
11229 if self._choose_pkg_return_early:
11232 if self._digraph is None:
11233 if (self._jobs or self._task_queues.merge) and \
11234 not ("--nodeps" in self.myopts and \
11235 (self._max_jobs is True or self._max_jobs > 1)):
11236 self._choose_pkg_return_early = True
11238 return self._pkg_queue.pop(0)
11240 if not (self._jobs or self._task_queues.merge):
11241 return self._pkg_queue.pop(0)
11243 self._prune_digraph()
11246 later = set(self._pkg_queue)
11247 for pkg in self._pkg_queue:
11249 if not self._dependent_on_scheduled_merges(pkg, later):
11253 if chosen_pkg is not None:
11254 self._pkg_queue.remove(chosen_pkg)
11256 if chosen_pkg is None:
11257 # There's no point in searching for a package to
11258 # choose until at least one of the existing jobs
11260 self._choose_pkg_return_early = True
11264 def _dependent_on_scheduled_merges(self, pkg, later):
11266 Traverse the subgraph of the given packages deep dependencies
11267 to see if it contains any scheduled merges.
11268 @param pkg: a package to check dependencies for
11270 @param later: packages for which dependence should be ignored
11271 since they will be merged later than pkg anyway and therefore
11272 delaying the merge of pkg will not result in a more optimal
11276 @returns: True if the package is dependent, False otherwise.
11279 graph = self._digraph
11280 completed_tasks = self._completed_tasks
11283 traversed_nodes = set([pkg])
11284 direct_deps = graph.child_nodes(pkg)
11285 node_stack = direct_deps
11286 direct_deps = frozenset(direct_deps)
11288 node = node_stack.pop()
11289 if node in traversed_nodes:
11291 traversed_nodes.add(node)
11292 if not ((node.installed and node.operation == "nomerge") or \
11293 (node.operation == "uninstall" and \
11294 node not in direct_deps) or \
11295 node in completed_tasks or \
11299 node_stack.extend(graph.child_nodes(node))
11303 def _allocate_config(self, root):
11305 Allocate a unique config instance for a task in order
11306 to prevent interference between parallel tasks.
11308 if self._config_pool[root]:
11309 temp_settings = self._config_pool[root].pop()
11311 temp_settings = portage.config(clone=self.pkgsettings[root])
11312 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11313 # performance reasons, call it here to make sure all settings from the
11314 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11315 temp_settings.reload()
11316 temp_settings.reset()
11317 return temp_settings
11319 def _deallocate_config(self, settings):
11320 self._config_pool[settings["ROOT"]].append(settings)
11322 def _main_loop(self):
11324 # Only allow 1 job max if a restart is scheduled
11325 # due to portage update.
11326 if self._is_restart_scheduled() or \
11327 self._opts_no_background.intersection(self.myopts):
11328 self._set_max_jobs(1)
11330 merge_queue = self._task_queues.merge
11332 while self._schedule():
11333 if self._poll_event_handlers:
11338 if not (self._jobs or merge_queue):
11340 if self._poll_event_handlers:
11343 def _keep_scheduling(self):
11344 return bool(self._pkg_queue and \
11345 not (self._failed_pkgs and not self._build_opts.fetchonly))
11347 def _schedule_tasks(self):
11349 # When the number of jobs drops to zero, process all waiting merges.
11350 if not self._jobs and self._merge_wait_queue:
11351 for task in self._merge_wait_queue:
11352 task.addExitListener(self._merge_wait_exit_handler)
11353 self._task_queues.merge.add(task)
11354 self._status_display.merges = len(self._task_queues.merge)
11355 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11356 del self._merge_wait_queue[:]
11358 self._schedule_tasks_imp()
11359 self._status_display.display()
11362 for q in self._task_queues.values():
11366 # Cancel prefetchers if they're the only reason
11367 # the main poll loop is still running.
11368 if self._failed_pkgs and not self._build_opts.fetchonly and \
11369 not (self._jobs or self._task_queues.merge) and \
11370 self._task_queues.fetch:
11371 self._task_queues.fetch.clear()
11375 self._schedule_tasks_imp()
11376 self._status_display.display()
11378 return self._keep_scheduling()
11380 def _job_delay(self):
11383 @returns: True if job scheduling should be delayed, False otherwise.
11386 if self._jobs and self._max_load is not None:
11388 current_time = time.time()
11390 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11391 if delay > self._job_delay_max:
11392 delay = self._job_delay_max
11393 if (current_time - self._previous_job_start_time) < delay:
11398 def _schedule_tasks_imp(self):
11401 @returns: True if state changed, False otherwise.
11408 if not self._keep_scheduling():
11409 return bool(state_change)
11411 if self._choose_pkg_return_early or \
11412 self._merge_wait_scheduled or \
11413 (self._jobs and self._unsatisfied_system_deps) or \
11414 not self._can_add_job() or \
11416 return bool(state_change)
11418 pkg = self._choose_pkg()
11420 return bool(state_change)
11424 if not pkg.installed:
11425 self._pkg_count.curval += 1
11427 task = self._task(pkg)
11430 merge = PackageMerge(merge=task)
11431 merge.addExitListener(self._merge_exit)
11432 self._task_queues.merge.add(merge)
11436 self._previous_job_start_time = time.time()
11437 self._status_display.running = self._jobs
11438 task.addExitListener(self._extract_exit)
11439 self._task_queues.jobs.add(task)
11443 self._previous_job_start_time = time.time()
11444 self._status_display.running = self._jobs
11445 task.addExitListener(self._build_exit)
11446 self._task_queues.jobs.add(task)
11448 return bool(state_change)
11450 def _task(self, pkg):
11452 pkg_to_replace = None
11453 if pkg.operation != "uninstall":
11454 vardb = pkg.root_config.trees["vartree"].dbapi
11455 previous_cpv = vardb.match(pkg.slot_atom)
11457 previous_cpv = previous_cpv.pop()
11458 pkg_to_replace = self._pkg(previous_cpv,
11459 "installed", pkg.root_config, installed=True)
11461 task = MergeListItem(args_set=self._args_set,
11462 background=self._background, binpkg_opts=self._binpkg_opts,
11463 build_opts=self._build_opts,
11464 config_pool=self._ConfigPool(pkg.root,
11465 self._allocate_config, self._deallocate_config),
11466 emerge_opts=self.myopts,
11467 find_blockers=self._find_blockers(pkg), logger=self._logger,
11468 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11469 pkg_to_replace=pkg_to_replace,
11470 prefetcher=self._prefetchers.get(pkg),
11471 scheduler=self._sched_iface,
11472 settings=self._allocate_config(pkg.root),
11473 statusMessage=self._status_msg,
11474 world_atom=self._world_atom)
11478 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11479 pkg = failed_pkg.pkg
11480 msg = "%s to %s %s" % \
11481 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11482 if pkg.root != "/":
11483 msg += " %s %s" % (preposition, pkg.root)
11485 log_path = self._locate_failure_log(failed_pkg)
11486 if log_path is not None:
11487 msg += ", Log file:"
11488 self._status_msg(msg)
11490 if log_path is not None:
11491 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11493 def _status_msg(self, msg):
11495 Display a brief status message (no newlines) in the status display.
11496 This is called by tasks to provide feedback to the user. This
11497 delegates the resposibility of generating \r and \n control characters,
11498 to guarantee that lines are created or erased when necessary and
11502 @param msg: a brief status message (no newlines allowed)
11504 if not self._background:
11505 writemsg_level("\n")
11506 self._status_display.displayMessage(msg)
11508 def _save_resume_list(self):
11510 Do this before verifying the ebuild Manifests since it might
11511 be possible for the user to use --resume --skipfirst get past
11512 a non-essential package with a broken digest.
11514 mtimedb = self._mtimedb
11515 mtimedb["resume"]["mergelist"] = [list(x) \
11516 for x in self._mergelist \
11517 if isinstance(x, Package) and x.operation == "merge"]
11521 def _calc_resume_list(self):
11523 Use the current resume list to calculate a new one,
11524 dropping any packages with unsatisfied deps.
11526 @returns: True if successful, False otherwise.
11528 print colorize("GOOD", "*** Resuming merge...")
11530 if self._show_list():
11531 if "--tree" in self.myopts:
11532 portage.writemsg_stdout("\n" + \
11533 darkgreen("These are the packages that " + \
11534 "would be merged, in reverse order:\n\n"))
11537 portage.writemsg_stdout("\n" + \
11538 darkgreen("These are the packages that " + \
11539 "would be merged, in order:\n\n"))
11541 show_spinner = "--quiet" not in self.myopts and \
11542 "--nodeps" not in self.myopts
11545 print "Calculating dependencies ",
11547 myparams = create_depgraph_params(self.myopts, None)
11551 success, mydepgraph, dropped_tasks = resume_depgraph(
11552 self.settings, self.trees, self._mtimedb, self.myopts,
11553 myparams, self._spinner)
11554 except depgraph.UnsatisfiedResumeDep, exc:
11555 # rename variable to avoid python-3.0 error:
11556 # SyntaxError: can not delete variable 'e' referenced in nested
11559 mydepgraph = e.depgraph
11560 dropped_tasks = set()
11563 print "\b\b... done!"
11566 def unsatisfied_resume_dep_msg():
11567 mydepgraph.display_problems()
11568 out = portage.output.EOutput()
11569 out.eerror("One or more packages are either masked or " + \
11570 "have missing dependencies:")
11573 show_parents = set()
11574 for dep in e.value:
11575 if dep.parent in show_parents:
11577 show_parents.add(dep.parent)
11578 if dep.atom is None:
11579 out.eerror(indent + "Masked package:")
11580 out.eerror(2 * indent + str(dep.parent))
11583 out.eerror(indent + str(dep.atom) + " pulled in by:")
11584 out.eerror(2 * indent + str(dep.parent))
11586 msg = "The resume list contains packages " + \
11587 "that are either masked or have " + \
11588 "unsatisfied dependencies. " + \
11589 "Please restart/continue " + \
11590 "the operation manually, or use --skipfirst " + \
11591 "to skip the first package in the list and " + \
11592 "any other packages that may be " + \
11593 "masked or have missing dependencies."
11594 for line in textwrap.wrap(msg, 72):
11596 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11599 if success and self._show_list():
11600 mylist = mydepgraph.altlist()
11602 if "--tree" in self.myopts:
11604 mydepgraph.display(mylist, favorites=self._favorites)
11607 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11609 mydepgraph.display_problems()
11611 mylist = mydepgraph.altlist()
11612 mydepgraph.break_refs(mylist)
11613 mydepgraph.break_refs(dropped_tasks)
11614 self._mergelist = mylist
11615 self._set_digraph(mydepgraph.schedulerGraph())
11618 for task in dropped_tasks:
11619 if not (isinstance(task, Package) and task.operation == "merge"):
11622 msg = "emerge --keep-going:" + \
11624 if pkg.root != "/":
11625 msg += " for %s" % (pkg.root,)
11626 msg += " dropped due to unsatisfied dependency."
11627 for line in textwrap.wrap(msg, msg_width):
11628 eerror(line, phase="other", key=pkg.cpv)
11629 settings = self.pkgsettings[pkg.root]
11630 # Ensure that log collection from $T is disabled inside
11631 # elog_process(), since any logs that might exist are
11633 settings.pop("T", None)
11634 portage.elog.elog_process(pkg.cpv, settings)
11635 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11639 def _show_list(self):
11640 myopts = self.myopts
11641 if "--quiet" not in myopts and \
11642 ("--ask" in myopts or "--tree" in myopts or \
11643 "--verbose" in myopts):
11647 def _world_atom(self, pkg):
11649 Add the package to the world file, but only if
11650 it's supposed to be added. Otherwise, do nothing.
11653 if set(("--buildpkgonly", "--fetchonly",
11655 "--oneshot", "--onlydeps",
11656 "--pretend")).intersection(self.myopts):
11659 if pkg.root != self.target_root:
11662 args_set = self._args_set
11663 if not args_set.findAtomForPackage(pkg):
11666 logger = self._logger
11667 pkg_count = self._pkg_count
11668 root_config = pkg.root_config
11669 world_set = root_config.sets["world"]
11670 world_locked = False
11671 if hasattr(world_set, "lock"):
11673 world_locked = True
11676 if hasattr(world_set, "load"):
11677 world_set.load() # maybe it's changed on disk
11679 atom = create_world_atom(pkg, args_set, root_config)
11681 if hasattr(world_set, "add"):
11682 self._status_msg(('Recording %s in "world" ' + \
11683 'favorites file...') % atom)
11684 logger.log(" === (%s of %s) Updating world file (%s)" % \
11685 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11686 world_set.add(atom)
11688 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11689 (atom,), level=logging.WARN, noiselevel=-1)
11694 def _pkg(self, cpv, type_name, root_config, installed=False):
11696 Get a package instance from the cache, or create a new
11697 one if necessary. Raises KeyError from aux_get if it
11698 failures for some reason (package does not exist or is
11701 operation = "merge"
11703 operation = "nomerge"
11705 if self._digraph is not None:
11706 # Reuse existing instance when available.
11707 pkg = self._digraph.get(
11708 (type_name, root_config.root, cpv, operation))
11709 if pkg is not None:
11712 tree_type = depgraph.pkg_tree_map[type_name]
11713 db = root_config.trees[tree_type].dbapi
11714 db_keys = list(self.trees[root_config.root][
11715 tree_type].dbapi._aux_cache_keys)
11716 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11717 pkg = Package(cpv=cpv, metadata=metadata,
11718 root_config=root_config, installed=installed)
11719 if type_name == "ebuild":
11720 settings = self.pkgsettings[root_config.root]
11721 settings.setcpv(pkg)
11722 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11723 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11727 class MetadataRegen(PollScheduler):
11729 def __init__(self, portdb, cp_iter=None, consumer=None,
11730 max_jobs=None, max_load=None):
11731 PollScheduler.__init__(self)
11732 self._portdb = portdb
11733 self._global_cleanse = False
11734 if cp_iter is None:
11735 cp_iter = self._iter_every_cp()
11736 # We can globally cleanse stale cache only if we
11737 # iterate over every single cp.
11738 self._global_cleanse = True
11739 self._cp_iter = cp_iter
11740 self._consumer = consumer
11742 if max_jobs is None:
11745 self._max_jobs = max_jobs
11746 self._max_load = max_load
11747 self._sched_iface = self._sched_iface_class(
11748 register=self._register,
11749 schedule=self._schedule_wait,
11750 unregister=self._unregister)
11752 self._valid_pkgs = set()
11753 self._cp_set = set()
11754 self._process_iter = self._iter_metadata_processes()
11755 self.returncode = os.EX_OK
11756 self._error_count = 0
11758 def _iter_every_cp(self):
11759 every_cp = self._portdb.cp_all()
11760 every_cp.sort(reverse=True)
11763 yield every_cp.pop()
11767 def _iter_metadata_processes(self):
11768 portdb = self._portdb
11769 valid_pkgs = self._valid_pkgs
11770 cp_set = self._cp_set
11771 consumer = self._consumer
11773 for cp in self._cp_iter:
11775 portage.writemsg_stdout("Processing %s\n" % cp)
11776 cpv_list = portdb.cp_list(cp)
11777 for cpv in cpv_list:
11778 valid_pkgs.add(cpv)
11779 ebuild_path, repo_path = portdb.findname2(cpv)
11780 metadata, st, emtime = portdb._pull_valid_cache(
11781 cpv, ebuild_path, repo_path)
11782 if metadata is not None:
11783 if consumer is not None:
11784 consumer(cpv, ebuild_path,
11785 repo_path, metadata)
11788 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11789 ebuild_mtime=emtime,
11790 metadata_callback=portdb._metadata_callback,
11791 portdb=portdb, repo_path=repo_path,
11792 settings=portdb.doebuild_settings)
11796 portdb = self._portdb
11797 from portage.cache.cache_errors import CacheError
11800 while self._schedule():
11806 if self._global_cleanse:
11807 for mytree in portdb.porttrees:
11809 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11810 except CacheError, e:
11811 portage.writemsg("Error listing cache entries for " + \
11812 "'%s': %s, continuing...\n" % (mytree, e),
11818 cp_set = self._cp_set
11819 cpv_getkey = portage.cpv_getkey
11820 for mytree in portdb.porttrees:
11822 dead_nodes[mytree] = set(cpv for cpv in \
11823 portdb.auxdb[mytree].iterkeys() \
11824 if cpv_getkey(cpv) in cp_set)
11825 except CacheError, e:
11826 portage.writemsg("Error listing cache entries for " + \
11827 "'%s': %s, continuing...\n" % (mytree, e),
11834 for y in self._valid_pkgs:
11835 for mytree in portdb.porttrees:
11836 if portdb.findname2(y, mytree=mytree)[0]:
11837 dead_nodes[mytree].discard(y)
11839 for mytree, nodes in dead_nodes.iteritems():
11840 auxdb = portdb.auxdb[mytree]
11844 except (KeyError, CacheError):
11847 def _schedule_tasks(self):
11850 @returns: True if there may be remaining tasks to schedule,
11853 while self._can_add_job():
11855 metadata_process = self._process_iter.next()
11856 except StopIteration:
11860 metadata_process.scheduler = self._sched_iface
11861 metadata_process.addExitListener(self._metadata_exit)
11862 metadata_process.start()
11865 def _metadata_exit(self, metadata_process):
11867 if metadata_process.returncode != os.EX_OK:
11868 self.returncode = 1
11869 self._error_count += 1
11870 self._valid_pkgs.discard(metadata_process.cpv)
11871 portage.writemsg("Error processing %s, continuing...\n" % \
11872 (metadata_process.cpv,), noiselevel=-1)
11874 if self._consumer is not None:
11875 # On failure, still notify the consumer (in this case the metadata
11876 # argument is None).
11877 self._consumer(metadata_process.cpv,
11878 metadata_process.ebuild_path,
11879 metadata_process.repo_path,
11880 metadata_process.metadata)
11884 class UninstallFailure(portage.exception.PortageException):
11886 An instance of this class is raised by unmerge() when
11887 an uninstallation fails.
11890 def __init__(self, *pargs):
11891 portage.exception.PortageException.__init__(self, pargs)
11893 self.status = pargs[0]
11895 def unmerge(root_config, myopts, unmerge_action,
11896 unmerge_files, ldpath_mtimes, autoclean=0,
11897 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11898 scheduler=None, writemsg_level=portage.util.writemsg_level):
11901 clean_world = myopts.get('--deselect') != 'n'
11902 quiet = "--quiet" in myopts
11903 settings = root_config.settings
11904 sets = root_config.sets
11905 vartree = root_config.trees["vartree"]
11906 candidate_catpkgs=[]
11908 xterm_titles = "notitles" not in settings.features
11909 out = portage.output.EOutput()
11911 db_keys = list(vartree.dbapi._aux_cache_keys)
11914 pkg = pkg_cache.get(cpv)
11916 pkg = Package(cpv=cpv, installed=True,
11917 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11918 root_config=root_config,
11919 type_name="installed")
11920 pkg_cache[cpv] = pkg
11923 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11925 # At least the parent needs to exist for the lock file.
11926 portage.util.ensure_dirs(vdb_path)
11927 except portage.exception.PortageException:
11931 if os.access(vdb_path, os.W_OK):
11932 vdb_lock = portage.locks.lockdir(vdb_path)
11933 realsyslist = sets["system"].getAtoms()
11935 for x in realsyslist:
11936 mycp = portage.dep_getkey(x)
11937 if mycp in settings.getvirtuals():
11939 for provider in settings.getvirtuals()[mycp]:
11940 if vartree.dbapi.match(provider):
11941 providers.append(provider)
11942 if len(providers) == 1:
11943 syslist.extend(providers)
11945 syslist.append(mycp)
11947 mysettings = portage.config(clone=settings)
11949 if not unmerge_files:
11950 if unmerge_action == "unmerge":
11952 print bold("emerge unmerge") + " can only be used with specific package names"
11958 localtree = vartree
11959 # process all arguments and add all
11960 # valid db entries to candidate_catpkgs
11962 if not unmerge_files:
11963 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11965 #we've got command-line arguments
11966 if not unmerge_files:
11967 print "\nNo packages to unmerge have been provided.\n"
11969 for x in unmerge_files:
11970 arg_parts = x.split('/')
11971 if x[0] not in [".","/"] and \
11972 arg_parts[-1][-7:] != ".ebuild":
11973 #possible cat/pkg or dep; treat as such
11974 candidate_catpkgs.append(x)
11975 elif unmerge_action in ["prune","clean"]:
11976 print "\n!!! Prune and clean do not accept individual" + \
11977 " ebuilds as arguments;\n skipping.\n"
11980 # it appears that the user is specifying an installed
11981 # ebuild and we're in "unmerge" mode, so it's ok.
11982 if not os.path.exists(x):
11983 print "\n!!! The path '"+x+"' doesn't exist.\n"
11986 absx = os.path.abspath(x)
11987 sp_absx = absx.split("/")
11988 if sp_absx[-1][-7:] == ".ebuild":
11990 absx = "/".join(sp_absx)
11992 sp_absx_len = len(sp_absx)
11994 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11995 vdb_len = len(vdb_path)
11997 sp_vdb = vdb_path.split("/")
11998 sp_vdb_len = len(sp_vdb)
12000 if not os.path.exists(absx+"/CONTENTS"):
12001 print "!!! Not a valid db dir: "+str(absx)
12004 if sp_absx_len <= sp_vdb_len:
12005 # The Path is shorter... so it can't be inside the vdb.
12008 print "\n!!!",x,"cannot be inside "+ \
12009 vdb_path+"; aborting.\n"
12012 for idx in range(0,sp_vdb_len):
12013 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
12016 print "\n!!!", x, "is not inside "+\
12017 vdb_path+"; aborting.\n"
12020 print "="+"/".join(sp_absx[sp_vdb_len:])
12021 candidate_catpkgs.append(
12022 "="+"/".join(sp_absx[sp_vdb_len:]))
12025 if (not "--quiet" in myopts):
12027 if settings["ROOT"] != "/":
12028 writemsg_level(darkgreen(newline+ \
12029 ">>> Using system located in ROOT tree %s\n" % \
12032 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
12033 not ("--quiet" in myopts):
12034 writemsg_level(darkgreen(newline+\
12035 ">>> These are the packages that would be unmerged:\n"))
12037 # Preservation of order is required for --depclean and --prune so
12038 # that dependencies are respected. Use all_selected to eliminate
12039 # duplicate packages since the same package may be selected by
12042 all_selected = set()
12043 for x in candidate_catpkgs:
12044 # cycle through all our candidate deps and determine
12045 # what will and will not get unmerged
12047 mymatch = vartree.dbapi.match(x)
12048 except portage.exception.AmbiguousPackageName, errpkgs:
12049 print "\n\n!!! The short ebuild name \"" + \
12050 x + "\" is ambiguous. Please specify"
12051 print "!!! one of the following fully-qualified " + \
12052 "ebuild names instead:\n"
12053 for i in errpkgs[0]:
12054 print " " + green(i)
12058 if not mymatch and x[0] not in "<>=~":
12059 mymatch = localtree.dep_match(x)
12061 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
12062 (x, unmerge_action), noiselevel=-1)
12066 {"protected": set(), "selected": set(), "omitted": set()})
12067 mykey = len(pkgmap) - 1
12068 if unmerge_action=="unmerge":
12070 if y not in all_selected:
12071 pkgmap[mykey]["selected"].add(y)
12072 all_selected.add(y)
12073 elif unmerge_action == "prune":
12074 if len(mymatch) == 1:
12076 best_version = mymatch[0]
12077 best_slot = vartree.getslot(best_version)
12078 best_counter = vartree.dbapi.cpv_counter(best_version)
12079 for mypkg in mymatch[1:]:
12080 myslot = vartree.getslot(mypkg)
12081 mycounter = vartree.dbapi.cpv_counter(mypkg)
12082 if (myslot == best_slot and mycounter > best_counter) or \
12083 mypkg == portage.best([mypkg, best_version]):
12084 if myslot == best_slot:
12085 if mycounter < best_counter:
12086 # On slot collision, keep the one with the
12087 # highest counter since it is the most
12088 # recently installed.
12090 best_version = mypkg
12092 best_counter = mycounter
12093 pkgmap[mykey]["protected"].add(best_version)
12094 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12095 if mypkg != best_version and mypkg not in all_selected)
12096 all_selected.update(pkgmap[mykey]["selected"])
12098 # unmerge_action == "clean"
12100 for mypkg in mymatch:
12101 if unmerge_action == "clean":
12102 myslot = localtree.getslot(mypkg)
12104 # since we're pruning, we don't care about slots
12105 # and put all the pkgs in together
12107 if myslot not in slotmap:
12108 slotmap[myslot] = {}
12109 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12111 for mypkg in vartree.dbapi.cp_list(
12112 portage.dep_getkey(mymatch[0])):
12113 myslot = vartree.getslot(mypkg)
12114 if myslot not in slotmap:
12115 slotmap[myslot] = {}
12116 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12118 for myslot in slotmap:
12119 counterkeys = slotmap[myslot].keys()
12120 if not counterkeys:
12123 pkgmap[mykey]["protected"].add(
12124 slotmap[myslot][counterkeys[-1]])
12125 del counterkeys[-1]
12127 for counter in counterkeys[:]:
12128 mypkg = slotmap[myslot][counter]
12129 if mypkg not in mymatch:
12130 counterkeys.remove(counter)
12131 pkgmap[mykey]["protected"].add(
12132 slotmap[myslot][counter])
12134 #be pretty and get them in order of merge:
12135 for ckey in counterkeys:
12136 mypkg = slotmap[myslot][ckey]
12137 if mypkg not in all_selected:
12138 pkgmap[mykey]["selected"].add(mypkg)
12139 all_selected.add(mypkg)
12140 # ok, now the last-merged package
12141 # is protected, and the rest are selected
12142 numselected = len(all_selected)
12143 if global_unmerge and not numselected:
12144 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12147 if not numselected:
12148 portage.writemsg_stdout(
12149 "\n>>> No packages selected for removal by " + \
12150 unmerge_action + "\n")
12154 vartree.dbapi.flush_cache()
12155 portage.locks.unlockdir(vdb_lock)
12157 from portage.sets.base import EditablePackageSet
12159 # generate a list of package sets that are directly or indirectly listed in "world",
12160 # as there is no persistent list of "installed" sets
12161 installed_sets = ["world"]
12166 pos = len(installed_sets)
12167 for s in installed_sets[pos - 1:]:
12170 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12173 installed_sets += candidates
12174 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12177 # we don't want to unmerge packages that are still listed in user-editable package sets
12178 # listed in "world" as they would be remerged on the next update of "world" or the
12179 # relevant package sets.
12180 unknown_sets = set()
12181 for cp in xrange(len(pkgmap)):
12182 for cpv in pkgmap[cp]["selected"].copy():
12186 # It could have been uninstalled
12187 # by a concurrent process.
12190 if unmerge_action != "clean" and \
12191 root_config.root == "/" and \
12192 portage.match_from_list(
12193 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12194 msg = ("Not unmerging package %s since there is no valid " + \
12195 "reason for portage to unmerge itself.") % (pkg.cpv,)
12196 for line in textwrap.wrap(msg, 75):
12198 # adjust pkgmap so the display output is correct
12199 pkgmap[cp]["selected"].remove(cpv)
12200 all_selected.remove(cpv)
12201 pkgmap[cp]["protected"].add(cpv)
12205 for s in installed_sets:
12206 # skip sets that the user requested to unmerge, and skip world
12207 # unless we're unmerging a package set (as the package would be
12208 # removed from "world" later on)
12209 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12213 if s in unknown_sets:
12215 unknown_sets.add(s)
12216 out = portage.output.EOutput()
12217 out.eerror(("Unknown set '@%s' in " + \
12218 "%svar/lib/portage/world_sets") % \
12219 (s, root_config.root))
12222 # only check instances of EditablePackageSet as other classes are generally used for
12223 # special purposes and can be ignored here (and are usually generated dynamically, so the
12224 # user can't do much about them anyway)
12225 if isinstance(sets[s], EditablePackageSet):
12227 # This is derived from a snippet of code in the
12228 # depgraph._iter_atoms_for_pkg() method.
12229 for atom in sets[s].iterAtomsForPackage(pkg):
12230 inst_matches = vartree.dbapi.match(atom)
12231 inst_matches.reverse() # descending order
12233 for inst_cpv in inst_matches:
12235 inst_pkg = _pkg(inst_cpv)
12237 # It could have been uninstalled
12238 # by a concurrent process.
12241 if inst_pkg.cp != atom.cp:
12243 if pkg >= inst_pkg:
12244 # This is descending order, and we're not
12245 # interested in any versions <= pkg given.
12247 if pkg.slot_atom != inst_pkg.slot_atom:
12248 higher_slot = inst_pkg
12250 if higher_slot is None:
12254 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12255 #print colorize("WARN", "but still listed in the following package sets:")
12256 #print " %s\n" % ", ".join(parents)
12257 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12258 print colorize("WARN", "still referenced by the following package sets:")
12259 print " %s\n" % ", ".join(parents)
12260 # adjust pkgmap so the display output is correct
12261 pkgmap[cp]["selected"].remove(cpv)
12262 all_selected.remove(cpv)
12263 pkgmap[cp]["protected"].add(cpv)
12267 numselected = len(all_selected)
12268 if not numselected:
12270 "\n>>> No packages selected for removal by " + \
12271 unmerge_action + "\n")
12274 # Unmerge order only matters in some cases
12278 selected = d["selected"]
12281 cp = portage.cpv_getkey(iter(selected).next())
12282 cp_dict = unordered.get(cp)
12283 if cp_dict is None:
12285 unordered[cp] = cp_dict
12288 for k, v in d.iteritems():
12289 cp_dict[k].update(v)
12290 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12292 for x in xrange(len(pkgmap)):
12293 selected = pkgmap[x]["selected"]
12296 for mytype, mylist in pkgmap[x].iteritems():
12297 if mytype == "selected":
12299 mylist.difference_update(all_selected)
12300 cp = portage.cpv_getkey(iter(selected).next())
12301 for y in localtree.dep_match(cp):
12302 if y not in pkgmap[x]["omitted"] and \
12303 y not in pkgmap[x]["selected"] and \
12304 y not in pkgmap[x]["protected"] and \
12305 y not in all_selected:
12306 pkgmap[x]["omitted"].add(y)
12307 if global_unmerge and not pkgmap[x]["selected"]:
12308 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12310 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12311 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12312 "'%s' is part of your system profile.\n" % cp),
12313 level=logging.WARNING, noiselevel=-1)
12314 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12315 "be damaging to your system.\n\n"),
12316 level=logging.WARNING, noiselevel=-1)
12317 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12318 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12319 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12321 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12323 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12324 for mytype in ["selected","protected","omitted"]:
12326 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12327 if pkgmap[x][mytype]:
12328 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12329 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12330 for pn, ver, rev in sorted_pkgs:
12334 myversion = ver + "-" + rev
12335 if mytype == "selected":
12337 colorize("UNMERGE_WARN", myversion + " "),
12341 colorize("GOOD", myversion + " "), noiselevel=-1)
12343 writemsg_level("none ", noiselevel=-1)
12345 writemsg_level("\n", noiselevel=-1)
12347 writemsg_level("\n", noiselevel=-1)
12349 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12350 " packages are slated for removal.\n")
12351 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12352 " and " + colorize("GOOD", "'omitted'") + \
12353 " packages will not be removed.\n\n")
12355 if "--pretend" in myopts:
12356 #we're done... return
12358 if "--ask" in myopts:
12359 if userquery("Would you like to unmerge these packages?")=="No":
12360 # enter pretend mode for correct formatting of results
12361 myopts["--pretend"] = True
12366 #the real unmerging begins, after a short delay....
12367 if clean_delay and not autoclean:
12368 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12370 for x in xrange(len(pkgmap)):
12371 for y in pkgmap[x]["selected"]:
12372 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12373 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12374 mysplit = y.split("/")
12376 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12377 mysettings, unmerge_action not in ["clean","prune"],
12378 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12379 scheduler=scheduler)
12381 if retval != os.EX_OK:
12382 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12384 raise UninstallFailure(retval)
12387 if clean_world and hasattr(sets["world"], "cleanPackage"):
12388 sets["world"].cleanPackage(vartree.dbapi, y)
12389 emergelog(xterm_titles, " >>> unmerge success: "+y)
12390 if clean_world and hasattr(sets["world"], "remove"):
12391 for s in root_config.setconfig.active:
12392 sets["world"].remove(SETPREFIX+s)
12395 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12397 if os.path.exists("/usr/bin/install-info"):
12398 out = portage.output.EOutput()
12403 inforoot=normpath(root+z)
12404 if os.path.isdir(inforoot):
12405 infomtime = long(os.stat(inforoot).st_mtime)
12406 if inforoot not in prev_mtimes or \
12407 prev_mtimes[inforoot] != infomtime:
12408 regen_infodirs.append(inforoot)
12410 if not regen_infodirs:
12411 portage.writemsg_stdout("\n")
12412 out.einfo("GNU info directory index is up-to-date.")
12414 portage.writemsg_stdout("\n")
12415 out.einfo("Regenerating GNU info directory index...")
12417 dir_extensions = ("", ".gz", ".bz2")
12421 for inforoot in regen_infodirs:
12425 if not os.path.isdir(inforoot) or \
12426 not os.access(inforoot, os.W_OK):
12429 file_list = os.listdir(inforoot)
12431 dir_file = os.path.join(inforoot, "dir")
12432 moved_old_dir = False
12433 processed_count = 0
12434 for x in file_list:
12435 if x.startswith(".") or \
12436 os.path.isdir(os.path.join(inforoot, x)):
12438 if x.startswith("dir"):
12440 for ext in dir_extensions:
12441 if x == "dir" + ext or \
12442 x == "dir" + ext + ".old":
12447 if processed_count == 0:
12448 for ext in dir_extensions:
12450 os.rename(dir_file + ext, dir_file + ext + ".old")
12451 moved_old_dir = True
12452 except EnvironmentError, e:
12453 if e.errno != errno.ENOENT:
12456 processed_count += 1
12457 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12458 existsstr="already exists, for file `"
12460 if re.search(existsstr,myso):
12461 # Already exists... Don't increment the count for this.
12463 elif myso[:44]=="install-info: warning: no info dir entry in ":
12464 # This info file doesn't contain a DIR-header: install-info produces this
12465 # (harmless) warning (the --quiet switch doesn't seem to work).
12466 # Don't increment the count for this.
12469 badcount=badcount+1
12470 errmsg += myso + "\n"
12473 if moved_old_dir and not os.path.exists(dir_file):
12474 # We didn't generate a new dir file, so put the old file
12475 # back where it was originally found.
12476 for ext in dir_extensions:
12478 os.rename(dir_file + ext + ".old", dir_file + ext)
12479 except EnvironmentError, e:
12480 if e.errno != errno.ENOENT:
12484 # Clean dir.old cruft so that they don't prevent
12485 # unmerge of otherwise empty directories.
12486 for ext in dir_extensions:
12488 os.unlink(dir_file + ext + ".old")
12489 except EnvironmentError, e:
12490 if e.errno != errno.ENOENT:
12494 #update mtime so we can potentially avoid regenerating.
12495 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12498 out.eerror("Processed %d info files; %d errors." % \
12499 (icount, badcount))
12500 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12503 out.einfo("Processed %d info files." % (icount,))
12506 def display_news_notification(root_config, myopts):
12507 target_root = root_config.root
12508 trees = root_config.trees
12509 settings = trees["vartree"].settings
12510 portdb = trees["porttree"].dbapi
12511 vardb = trees["vartree"].dbapi
12512 NEWS_PATH = os.path.join("metadata", "news")
12513 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12514 newsReaderDisplay = False
12515 update = "--pretend" not in myopts
12517 for repo in portdb.getRepositories():
12518 unreadItems = checkUpdatedNewsItems(
12519 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12521 if not newsReaderDisplay:
12522 newsReaderDisplay = True
12524 print colorize("WARN", " * IMPORTANT:"),
12525 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12528 if newsReaderDisplay:
12529 print colorize("WARN", " *"),
12530 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12533 def display_preserved_libs(vardbapi):
12536 # Ensure the registry is consistent with existing files.
12537 vardbapi.plib_registry.pruneNonExisting()
12539 if vardbapi.plib_registry.hasEntries():
12541 print colorize("WARN", "!!!") + " existing preserved libs:"
12542 plibdata = vardbapi.plib_registry.getPreservedLibs()
12543 linkmap = vardbapi.linkmap
12546 linkmap_broken = False
12550 except portage.exception.CommandNotFound, e:
12551 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12552 level=logging.ERROR, noiselevel=-1)
12554 linkmap_broken = True
12556 search_for_owners = set()
12557 for cpv in plibdata:
12558 internal_plib_keys = set(linkmap._obj_key(f) \
12559 for f in plibdata[cpv])
12560 for f in plibdata[cpv]:
12561 if f in consumer_map:
12564 for c in linkmap.findConsumers(f):
12565 # Filter out any consumers that are also preserved libs
12566 # belonging to the same package as the provider.
12567 if linkmap._obj_key(c) not in internal_plib_keys:
12568 consumers.append(c)
12570 consumer_map[f] = consumers
12571 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12573 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12575 for cpv in plibdata:
12576 print colorize("WARN", ">>>") + " package: %s" % cpv
12578 for f in plibdata[cpv]:
12579 obj_key = linkmap._obj_key(f)
12580 alt_paths = samefile_map.get(obj_key)
12581 if alt_paths is None:
12583 samefile_map[obj_key] = alt_paths
12586 for alt_paths in samefile_map.itervalues():
12587 alt_paths = sorted(alt_paths)
12588 for p in alt_paths:
12589 print colorize("WARN", " * ") + " - %s" % (p,)
12591 consumers = consumer_map.get(f, [])
12592 for c in consumers[:MAX_DISPLAY]:
12593 print colorize("WARN", " * ") + " used by %s (%s)" % \
12594 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12595 if len(consumers) == MAX_DISPLAY + 1:
12596 print colorize("WARN", " * ") + " used by %s (%s)" % \
12597 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12598 for x in owners.get(consumers[MAX_DISPLAY], [])))
12599 elif len(consumers) > MAX_DISPLAY:
12600 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12601 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12604 def _flush_elog_mod_echo():
12606 Dump the mod_echo output now so that our other
12607 notifications are shown last.
12609 @returns: True if messages were shown, False otherwise.
12611 messages_shown = False
12613 from portage.elog import mod_echo
12614 except ImportError:
12615 pass # happens during downgrade to a version without the module
12617 messages_shown = bool(mod_echo._items)
12618 mod_echo.finalize()
12619 return messages_shown
12621 def post_emerge(root_config, myopts, mtimedb, retval):
12623 Misc. things to run at the end of a merge session.
12626 Update Config Files
12629 Display preserved libs warnings
12632 @param trees: A dictionary mapping each ROOT to it's package databases
12634 @param mtimedb: The mtimeDB to store data needed across merge invocations
12635 @type mtimedb: MtimeDB class instance
12636 @param retval: Emerge's return value
12640 1. Calls sys.exit(retval)
12643 target_root = root_config.root
12644 trees = { target_root : root_config.trees }
12645 vardbapi = trees[target_root]["vartree"].dbapi
12646 settings = vardbapi.settings
12647 info_mtimes = mtimedb["info"]
12649 # Load the most current variables from ${ROOT}/etc/profile.env
12652 settings.regenerate()
12655 config_protect = settings.get("CONFIG_PROTECT","").split()
12656 infodirs = settings.get("INFOPATH","").split(":") + \
12657 settings.get("INFODIR","").split(":")
12661 if retval == os.EX_OK:
12662 exit_msg = " *** exiting successfully."
12664 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12665 emergelog("notitles" not in settings.features, exit_msg)
12667 _flush_elog_mod_echo()
12669 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12670 if "--pretend" in myopts or (counter_hash is not None and \
12671 counter_hash == vardbapi._counter_hash()):
12672 display_news_notification(root_config, myopts)
12673 # If vdb state has not changed then there's nothing else to do.
12676 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12677 portage.util.ensure_dirs(vdb_path)
12679 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12680 vdb_lock = portage.locks.lockdir(vdb_path)
12684 if "noinfo" not in settings.features:
12685 chk_updated_info_files(target_root,
12686 infodirs, info_mtimes, retval)
12690 portage.locks.unlockdir(vdb_lock)
12692 chk_updated_cfg_files(target_root, config_protect)
12694 display_news_notification(root_config, myopts)
12695 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12696 display_preserved_libs(vardbapi)
12701 def chk_updated_cfg_files(target_root, config_protect):
12703 #number of directories with some protect files in them
12705 for x in config_protect:
12706 x = os.path.join(target_root, x.lstrip(os.path.sep))
12707 if not os.access(x, os.W_OK):
12708 # Avoid Permission denied errors generated
12712 mymode = os.lstat(x).st_mode
12715 if stat.S_ISLNK(mymode):
12716 # We want to treat it like a directory if it
12717 # is a symlink to an existing directory.
12719 real_mode = os.stat(x).st_mode
12720 if stat.S_ISDIR(real_mode):
12724 if stat.S_ISDIR(mymode):
12725 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12727 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12728 os.path.split(x.rstrip(os.path.sep))
12729 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12730 a = commands.getstatusoutput(mycommand)
12732 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12734 # Show the error message alone, sending stdout to /dev/null.
12735 os.system(mycommand + " 1>/dev/null")
12737 files = a[1].split('\0')
12738 # split always produces an empty string as the last element
12739 if files and not files[-1]:
12743 print "\n"+colorize("WARN", " * IMPORTANT:"),
12744 if stat.S_ISDIR(mymode):
12745 print "%d config files in '%s' need updating." % \
12748 print "config file '%s' needs updating." % x
12751 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12752 " section of the " + bold("emerge")
12753 print " "+yellow("*")+" man page to learn how to update config files."
12755 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12758 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12759 Returns the number of unread (yet relevent) items.
12761 @param portdb: a portage tree database
12762 @type portdb: pordbapi
12763 @param vardb: an installed package database
12764 @type vardb: vardbapi
12767 @param UNREAD_PATH:
12773 1. The number of unread but relevant news items.
12776 from portage.news import NewsManager
12777 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12778 return manager.getUnreadItems( repo_id, update=update )
12780 def insert_category_into_atom(atom, category):
12781 alphanum = re.search(r'\w', atom)
12783 ret = atom[:alphanum.start()] + "%s/" % category + \
12784 atom[alphanum.start():]
12789 def is_valid_package_atom(x):
12791 alphanum = re.search(r'\w', x)
12793 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12794 return portage.isvalidatom(x)
12796 def show_blocker_docs_link():
12798 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12799 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12801 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12804 def show_mask_docs():
12805 print "For more information, see the MASKED PACKAGES section in the emerge"
12806 print "man page or refer to the Gentoo Handbook."
12808 def action_sync(settings, trees, mtimedb, myopts, myaction):
12809 xterm_titles = "notitles" not in settings.features
12810 emergelog(xterm_titles, " === sync")
12811 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12812 myportdir = portdb.porttree_root
12813 out = portage.output.EOutput()
12815 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12817 if myportdir[-1]=="/":
12818 myportdir=myportdir[:-1]
12820 st = os.stat(myportdir)
12824 print ">>>",myportdir,"not found, creating it."
12825 os.makedirs(myportdir,0755)
12826 st = os.stat(myportdir)
12829 spawn_kwargs["env"] = settings.environ()
12830 if 'usersync' in settings.features and \
12831 portage.data.secpass >= 2 and \
12832 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12833 st.st_gid != os.getgid() and st.st_mode & 0070):
12835 homedir = pwd.getpwuid(st.st_uid).pw_dir
12839 # Drop privileges when syncing, in order to match
12840 # existing uid/gid settings.
12841 spawn_kwargs["uid"] = st.st_uid
12842 spawn_kwargs["gid"] = st.st_gid
12843 spawn_kwargs["groups"] = [st.st_gid]
12844 spawn_kwargs["env"]["HOME"] = homedir
12846 if not st.st_mode & 0020:
12847 umask = umask | 0020
12848 spawn_kwargs["umask"] = umask
12850 syncuri = settings.get("SYNC", "").strip()
12852 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12853 noiselevel=-1, level=logging.ERROR)
12856 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12857 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12860 dosyncuri = syncuri
12861 updatecache_flg = False
12862 if myaction == "metadata":
12863 print "skipping sync"
12864 updatecache_flg = True
12865 elif ".git" in vcs_dirs:
12866 # Update existing git repository, and ignore the syncuri. We are
12867 # going to trust the user and assume that the user is in the branch
12868 # that he/she wants updated. We'll let the user manage branches with
12870 if portage.process.find_binary("git") is None:
12871 msg = ["Command not found: git",
12872 "Type \"emerge dev-util/git\" to enable git support."]
12874 writemsg_level("!!! %s\n" % l,
12875 level=logging.ERROR, noiselevel=-1)
12877 msg = ">>> Starting git pull in %s..." % myportdir
12878 emergelog(xterm_titles, msg )
12879 writemsg_level(msg + "\n")
12880 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12881 (portage._shell_quote(myportdir),), **spawn_kwargs)
12882 if exitcode != os.EX_OK:
12883 msg = "!!! git pull error in %s." % myportdir
12884 emergelog(xterm_titles, msg)
12885 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12887 msg = ">>> Git pull in %s successful" % myportdir
12888 emergelog(xterm_titles, msg)
12889 writemsg_level(msg + "\n")
12890 exitcode = git_sync_timestamps(settings, myportdir)
12891 if exitcode == os.EX_OK:
12892 updatecache_flg = True
12893 elif syncuri[:8]=="rsync://":
12894 for vcs_dir in vcs_dirs:
12895 writemsg_level(("!!! %s appears to be under revision " + \
12896 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12897 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12899 if not os.path.exists("/usr/bin/rsync"):
12900 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12901 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12906 if settings["PORTAGE_RSYNC_OPTS"] == "":
12907 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12908 rsync_opts.extend([
12909 "--recursive", # Recurse directories
12910 "--links", # Consider symlinks
12911 "--safe-links", # Ignore links outside of tree
12912 "--perms", # Preserve permissions
12913 "--times", # Preserive mod times
12914 "--compress", # Compress the data transmitted
12915 "--force", # Force deletion on non-empty dirs
12916 "--whole-file", # Don't do block transfers, only entire files
12917 "--delete", # Delete files that aren't in the master tree
12918 "--stats", # Show final statistics about what was transfered
12919 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12920 "--exclude=/distfiles", # Exclude distfiles from consideration
12921 "--exclude=/local", # Exclude local from consideration
12922 "--exclude=/packages", # Exclude packages from consideration
12926 # The below validation is not needed when using the above hardcoded
12929 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12931 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12932 for opt in ("--recursive", "--times"):
12933 if opt not in rsync_opts:
12934 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12935 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12936 rsync_opts.append(opt)
12938 for exclude in ("distfiles", "local", "packages"):
12939 opt = "--exclude=/%s" % exclude
12940 if opt not in rsync_opts:
12941 portage.writemsg(yellow("WARNING:") + \
12942 " adding required option %s not included in " % opt + \
12943 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12944 rsync_opts.append(opt)
12946 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12947 def rsync_opt_startswith(opt_prefix):
12948 for x in rsync_opts:
12949 if x.startswith(opt_prefix):
12953 if not rsync_opt_startswith("--timeout="):
12954 rsync_opts.append("--timeout=%d" % mytimeout)
12956 for opt in ("--compress", "--whole-file"):
12957 if opt not in rsync_opts:
12958 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12959 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12960 rsync_opts.append(opt)
12962 if "--quiet" in myopts:
12963 rsync_opts.append("--quiet") # Shut up a lot
12965 rsync_opts.append("--verbose") # Print filelist
12967 if "--verbose" in myopts:
12968 rsync_opts.append("--progress") # Progress meter for each file
12970 if "--debug" in myopts:
12971 rsync_opts.append("--checksum") # Force checksum on all files
12973 # Real local timestamp file.
12974 servertimestampfile = os.path.join(
12975 myportdir, "metadata", "timestamp.chk")
12977 content = portage.util.grabfile(servertimestampfile)
12981 mytimestamp = time.mktime(time.strptime(content[0],
12982 "%a, %d %b %Y %H:%M:%S +0000"))
12983 except (OverflowError, ValueError):
12988 rsync_initial_timeout = \
12989 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12991 rsync_initial_timeout = 15
12994 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12995 except SystemExit, e:
12996 raise # Needed else can't exit
12998 maxretries=3 #default number of retries
13001 user_name, hostname, port = re.split(
13002 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
13005 if user_name is None:
13007 updatecache_flg=True
13008 all_rsync_opts = set(rsync_opts)
13009 extra_rsync_opts = shlex.split(
13010 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
13011 all_rsync_opts.update(extra_rsync_opts)
13012 family = socket.AF_INET
13013 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
13014 family = socket.AF_INET
13015 elif socket.has_ipv6 and \
13016 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
13017 family = socket.AF_INET6
13019 SERVER_OUT_OF_DATE = -1
13020 EXCEEDED_MAX_RETRIES = -2
13026 for addrinfo in socket.getaddrinfo(
13027 hostname, None, family, socket.SOCK_STREAM):
13028 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
13029 # IPv6 addresses need to be enclosed in square brackets
13030 ips.append("[%s]" % addrinfo[4][0])
13032 ips.append(addrinfo[4][0])
13033 from random import shuffle
13035 except SystemExit, e:
13036 raise # Needed else can't exit
13037 except Exception, e:
13038 print "Notice:",str(e)
13043 dosyncuri = syncuri.replace(
13044 "//" + user_name + hostname + port + "/",
13045 "//" + user_name + ips[0] + port + "/", 1)
13046 except SystemExit, e:
13047 raise # Needed else can't exit
13048 except Exception, e:
13049 print "Notice:",str(e)
13053 if "--ask" in myopts:
13054 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
13059 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
13060 if "--quiet" not in myopts:
13061 print ">>> Starting rsync with "+dosyncuri+"..."
13063 emergelog(xterm_titles,
13064 ">>> Starting retry %d of %d with %s" % \
13065 (retries,maxretries,dosyncuri))
13066 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13068 if mytimestamp != 0 and "--quiet" not in myopts:
13069 print ">>> Checking server timestamp ..."
13071 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13073 if "--debug" in myopts:
13076 exitcode = os.EX_OK
13077 servertimestamp = 0
13078 # Even if there's no timestamp available locally, fetch the
13079 # timestamp anyway as an initial probe to verify that the server is
13080 # responsive. This protects us from hanging indefinitely on a
13081 # connection attempt to an unresponsive server which rsync's
13082 # --timeout option does not prevent.
13084 # Temporary file for remote server timestamp comparison.
13085 from tempfile import mkstemp
13086 fd, tmpservertimestampfile = mkstemp()
13088 mycommand = rsynccommand[:]
13089 mycommand.append(dosyncuri.rstrip("/") + \
13090 "/metadata/timestamp.chk")
13091 mycommand.append(tmpservertimestampfile)
13095 def timeout_handler(signum, frame):
13096 raise portage.exception.PortageException("timed out")
13097 signal.signal(signal.SIGALRM, timeout_handler)
13098 # Timeout here in case the server is unresponsive. The
13099 # --timeout rsync option doesn't apply to the initial
13100 # connection attempt.
13101 if rsync_initial_timeout:
13102 signal.alarm(rsync_initial_timeout)
13104 mypids.extend(portage.process.spawn(
13105 mycommand, env=settings.environ(), returnpid=True))
13106 exitcode = os.waitpid(mypids[0], 0)[1]
13107 content = portage.grabfile(tmpservertimestampfile)
13109 if rsync_initial_timeout:
13112 os.unlink(tmpservertimestampfile)
13115 except portage.exception.PortageException, e:
13119 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13120 os.kill(mypids[0], signal.SIGTERM)
13121 os.waitpid(mypids[0], 0)
13122 # This is the same code rsync uses for timeout.
13125 if exitcode != os.EX_OK:
13126 if exitcode & 0xff:
13127 exitcode = (exitcode & 0xff) << 8
13129 exitcode = exitcode >> 8
13131 portage.process.spawned_pids.remove(mypids[0])
13134 servertimestamp = time.mktime(time.strptime(
13135 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13136 except (OverflowError, ValueError):
13138 del mycommand, mypids, content
13139 if exitcode == os.EX_OK:
13140 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13141 emergelog(xterm_titles,
13142 ">>> Cancelling sync -- Already current.")
13145 print ">>> Timestamps on the server and in the local repository are the same."
13146 print ">>> Cancelling all further sync action. You are already up to date."
13148 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13152 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13153 emergelog(xterm_titles,
13154 ">>> Server out of date: %s" % dosyncuri)
13157 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13159 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13162 exitcode = SERVER_OUT_OF_DATE
13163 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13165 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13166 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13167 if exitcode in [0,1,3,4,11,14,20,21]:
13169 elif exitcode in [1,3,4,11,14,20,21]:
13172 # Code 2 indicates protocol incompatibility, which is expected
13173 # for servers with protocol < 29 that don't support
13174 # --prune-empty-directories. Retry for a server that supports
13175 # at least rsync protocol version 29 (>=rsync-2.6.4).
13180 if retries<=maxretries:
13181 print ">>> Retrying..."
13186 updatecache_flg=False
13187 exitcode = EXCEEDED_MAX_RETRIES
13191 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13192 elif exitcode == SERVER_OUT_OF_DATE:
13194 elif exitcode == EXCEEDED_MAX_RETRIES:
13196 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13201 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13202 msg.append("that your SYNC statement is proper.")
13203 msg.append("SYNC=" + settings["SYNC"])
13205 msg.append("Rsync has reported that there is a File IO error. Normally")
13206 msg.append("this means your disk is full, but can be caused by corruption")
13207 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13208 msg.append("and try again after the problem has been fixed.")
13209 msg.append("PORTDIR=" + settings["PORTDIR"])
13211 msg.append("Rsync was killed before it finished.")
13213 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13214 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13215 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13216 msg.append("temporary problem unless complications exist with your network")
13217 msg.append("(and possibly your system's filesystem) configuration.")
13221 elif syncuri[:6]=="cvs://":
13222 if not os.path.exists("/usr/bin/cvs"):
13223 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13224 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13226 cvsroot=syncuri[6:]
13227 cvsdir=os.path.dirname(myportdir)
13228 if not os.path.exists(myportdir+"/CVS"):
13230 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13231 if os.path.exists(cvsdir+"/gentoo-x86"):
13232 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13235 os.rmdir(myportdir)
13237 if e.errno != errno.ENOENT:
13239 "!!! existing '%s' directory; exiting.\n" % myportdir)
13242 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13243 print "!!! cvs checkout error; exiting."
13245 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13248 print ">>> Starting cvs update with "+syncuri+"..."
13249 retval = portage.process.spawn_bash(
13250 "cd %s; cvs -z0 -q update -dP" % \
13251 (portage._shell_quote(myportdir),), **spawn_kwargs)
13252 if retval != os.EX_OK:
13254 dosyncuri = syncuri
13256 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13257 noiselevel=-1, level=logging.ERROR)
13260 if updatecache_flg and \
13261 myaction != "metadata" and \
13262 "metadata-transfer" not in settings.features:
13263 updatecache_flg = False
13265 # Reload the whole config from scratch.
13266 settings, trees, mtimedb = load_emerge_config(trees=trees)
13267 root_config = trees[settings["ROOT"]]["root_config"]
13268 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13270 if updatecache_flg and \
13271 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
13273 # Only update cache for myportdir since that's
13274 # the only one that's been synced here.
13275 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
13277 if portage._global_updates(trees, mtimedb["updates"]):
13279 # Reload the whole config from scratch.
13280 settings, trees, mtimedb = load_emerge_config(trees=trees)
13281 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13282 root_config = trees[settings["ROOT"]]["root_config"]
13284 mybestpv = portdb.xmatch("bestmatch-visible",
13285 portage.const.PORTAGE_PACKAGE_ATOM)
13286 mypvs = portage.best(
13287 trees[settings["ROOT"]]["vartree"].dbapi.match(
13288 portage.const.PORTAGE_PACKAGE_ATOM))
13290 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13292 if myaction != "metadata":
13293 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13294 retval = portage.process.spawn(
13295 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13296 dosyncuri], env=settings.environ())
13297 if retval != os.EX_OK:
13298 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13300 if(mybestpv != mypvs) and not "--quiet" in myopts:
13302 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13303 print red(" * ")+"that you update portage now, before any other packages are updated."
13305 print red(" * ")+"To update portage, run 'emerge portage' now."
13308 display_news_notification(root_config, myopts)
13311 def git_sync_timestamps(settings, portdir):
13313 Since git doesn't preserve timestamps, synchronize timestamps between
13314 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13315 for a given file as long as the file in the working tree is not modified
13316 (relative to HEAD).
13318 cache_dir = os.path.join(portdir, "metadata", "cache")
13319 if not os.path.isdir(cache_dir):
13321 writemsg_level(">>> Synchronizing timestamps...\n")
13323 from portage.cache.cache_errors import CacheError
13325 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13326 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13327 except CacheError, e:
13328 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13329 level=logging.ERROR, noiselevel=-1)
13332 ec_dir = os.path.join(portdir, "eclass")
13334 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13335 if f.endswith(".eclass"))
13337 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13338 level=logging.ERROR, noiselevel=-1)
13341 args = [portage.const.BASH_BINARY, "-c",
13342 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13343 portage._shell_quote(portdir)]
13345 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13346 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13348 if rval != os.EX_OK:
13351 modified_eclasses = set(ec for ec in ec_names \
13352 if os.path.join("eclass", ec + ".eclass") in modified_files)
13354 updated_ec_mtimes = {}
13356 for cpv in cache_db:
13357 cpv_split = portage.catpkgsplit(cpv)
13358 if cpv_split is None:
13359 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13360 level=logging.ERROR, noiselevel=-1)
13363 cat, pn, ver, rev = cpv_split
13364 cat, pf = portage.catsplit(cpv)
13365 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13366 if relative_eb_path in modified_files:
13370 cache_entry = cache_db[cpv]
13371 eb_mtime = cache_entry.get("_mtime_")
13372 ec_mtimes = cache_entry.get("_eclasses_")
13374 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13375 level=logging.ERROR, noiselevel=-1)
13377 except CacheError, e:
13378 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13379 (cpv, e), level=logging.ERROR, noiselevel=-1)
13382 if eb_mtime is None:
13383 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13384 level=logging.ERROR, noiselevel=-1)
13388 eb_mtime = long(eb_mtime)
13390 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13391 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13394 if ec_mtimes is None:
13395 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13396 level=logging.ERROR, noiselevel=-1)
13399 if modified_eclasses.intersection(ec_mtimes):
13402 missing_eclasses = set(ec_mtimes).difference(ec_names)
13403 if missing_eclasses:
13404 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13405 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13409 eb_path = os.path.join(portdir, relative_eb_path)
13411 current_eb_mtime = os.stat(eb_path)
13413 writemsg_level("!!! Missing ebuild: %s\n" % \
13414 (cpv,), level=logging.ERROR, noiselevel=-1)
13417 inconsistent = False
13418 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13419 updated_mtime = updated_ec_mtimes.get(ec)
13420 if updated_mtime is not None and updated_mtime != ec_mtime:
13421 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13422 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13423 inconsistent = True
13429 if current_eb_mtime != eb_mtime:
13430 os.utime(eb_path, (eb_mtime, eb_mtime))
13432 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13433 if ec in updated_ec_mtimes:
13435 ec_path = os.path.join(ec_dir, ec + ".eclass")
13436 current_mtime = long(os.stat(ec_path).st_mtime)
13437 if current_mtime != ec_mtime:
13438 os.utime(ec_path, (ec_mtime, ec_mtime))
13439 updated_ec_mtimes[ec] = ec_mtime
13443 def action_metadata(settings, portdb, myopts, porttrees=None):
13444 if porttrees is None:
13445 porttrees = portdb.porttrees
13446 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
13447 old_umask = os.umask(0002)
13448 cachedir = os.path.normpath(settings.depcachedir)
13449 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13450 "/lib", "/opt", "/proc", "/root", "/sbin",
13451 "/sys", "/tmp", "/usr", "/var"]:
13452 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13453 "ROOT DIRECTORY ON YOUR SYSTEM."
13454 print >> sys.stderr, \
13455 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13457 if not os.path.exists(cachedir):
13458 os.makedirs(cachedir)
13460 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
13461 auxdbkeys = tuple(auxdbkeys)
13463 class TreeData(object):
13464 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
13465 def __init__(self, dest_db, eclass_db, path, src_db):
13466 self.dest_db = dest_db
13467 self.eclass_db = eclass_db
13469 self.src_db = src_db
13470 self.valid_nodes = set()
13472 porttrees_data = []
13473 for path in porttrees:
13474 src_db = portdb._pregen_auxdb.get(path)
13475 if src_db is None and \
13476 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
13477 src_db = portdb.metadbmodule(
13478 path, 'metadata/cache', auxdbkeys, readonly=True)
13480 src_db.ec = portdb._repo_info[path].eclass_db
13481 except AttributeError:
13484 if src_db is not None:
13485 porttrees_data.append(TreeData(portdb.auxdb[path],
13486 portdb._repo_info[path].eclass_db, path, src_db))
13488 porttrees = [tree_data.path for tree_data in porttrees_data]
13490 isatty = sys.stdout.isatty()
13491 quiet = not isatty or '--quiet' in myopts
13494 progressBar = portage.output.TermProgressBar()
13495 progressHandler = ProgressHandler()
13496 onProgress = progressHandler.onProgress
13498 progressBar.set(progressHandler.curval, progressHandler.maxval)
13499 progressHandler.display = display
13500 def sigwinch_handler(signum, frame):
13501 lines, progressBar.term_columns = \
13502 portage.output.get_term_size()
13503 signal.signal(signal.SIGWINCH, sigwinch_handler)
13505 # Temporarily override portdb.porttrees so portdb.cp_all()
13506 # will only return the relevant subset.
13507 portdb_porttrees = portdb.porttrees
13508 portdb.porttrees = porttrees
13510 cp_all = portdb.cp_all()
13512 portdb.porttrees = portdb_porttrees
13515 maxval = len(cp_all)
13516 if onProgress is not None:
13517 onProgress(maxval, curval)
13519 from portage.cache.util import quiet_mirroring
13520 from portage import eapi_is_supported, \
13521 _validate_cache_for_unsupported_eapis
13523 # TODO: Display error messages, but do not interfere with the progress bar.
13525 # 1) erase the progress bar
13526 # 2) show the error message
13527 # 3) redraw the progress bar on a new line
13528 noise = quiet_mirroring()
13531 for tree_data in porttrees_data:
13532 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
13533 tree_data.valid_nodes.add(cpv)
13535 src = tree_data.src_db[cpv]
13536 except KeyError, e:
13537 noise.missing_entry(cpv)
13540 except CacheError, ce:
13541 noise.exception(cpv, ce)
13545 eapi = src.get('EAPI')
13548 eapi = eapi.lstrip('-')
13549 eapi_supported = eapi_is_supported(eapi)
13550 if not eapi_supported:
13551 if not _validate_cache_for_unsupported_eapis:
13552 noise.misc(cpv, "unable to validate " + \
13553 "cache for EAPI='%s'" % eapi)
13558 dest = tree_data.dest_db[cpv]
13559 except (KeyError, CacheError):
13562 for d in (src, dest):
13563 if d is not None and d.get('EAPI') in ('', '0'):
13566 if dest is not None:
13567 if not (dest['_mtime_'] == src['_mtime_'] and \
13568 tree_data.eclass_db.is_eclass_data_valid(
13569 dest['_eclasses_']) and \
13570 set(dest['_eclasses_']) == set(src['_eclasses_'])):
13573 # We don't want to skip the write unless we're really
13574 # sure that the existing cache is identical, so don't
13575 # trust _mtime_ and _eclasses_ alone.
13576 for k in set(chain(src, dest)).difference(
13577 ('_mtime_', '_eclasses_')):
13578 if dest.get(k, '') != src.get(k, ''):
13582 if dest is not None:
13583 # The existing data is valid and identical,
13584 # so there's no need to overwrite it.
13588 inherited = src.get('INHERITED', '')
13589 eclasses = src.get('_eclasses_')
13590 except CacheError, ce:
13591 noise.exception(cpv, ce)
13595 if eclasses is not None:
13596 if not tree_data.eclass_db.is_eclass_data_valid(
13597 src['_eclasses_']):
13598 noise.eclass_stale(cpv)
13600 inherited = eclasses
13602 inherited = inherited.split()
13604 if tree_data.src_db.complete_eclass_entries and \
13606 noise.corruption(cpv, "missing _eclasses_ field")
13610 # Even if _eclasses_ already exists, replace it with data from
13611 # eclass_cache, in order to insert local eclass paths.
13613 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
13615 # INHERITED contains a non-existent eclass.
13616 noise.eclass_stale(cpv)
13619 if eclasses is None:
13620 noise.eclass_stale(cpv)
13622 src['_eclasses_'] = eclasses
13624 src['_eclasses_'] = {}
13626 if not eapi_supported:
13628 'EAPI' : '-' + eapi,
13629 '_mtime_' : src['_mtime_'],
13630 '_eclasses_' : src['_eclasses_'],
13634 tree_data.dest_db[cpv] = src
13635 except CacheError, ce:
13636 noise.exception(cpv, ce)
13640 if onProgress is not None:
13641 onProgress(maxval, curval)
13643 if onProgress is not None:
13644 onProgress(maxval, curval)
13646 for tree_data in porttrees_data:
13648 dead_nodes = set(tree_data.dest_db.iterkeys())
13649 except CacheError, e:
13650 writemsg_level("Error listing cache entries for " + \
13651 "'%s': %s, continuing...\n" % (tree_data.path, e),
13652 level=logging.ERROR, noiselevel=-1)
13655 dead_nodes.difference_update(tree_data.valid_nodes)
13656 for cpv in dead_nodes:
13658 del tree_data.dest_db[cpv]
13659 except (KeyError, CacheError):
13663 # make sure the final progress is displayed
13664 progressHandler.display()
13666 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
13669 os.umask(old_umask)
13671 def action_regen(settings, portdb, max_jobs, max_load):
13672 xterm_titles = "notitles" not in settings.features
13673 emergelog(xterm_titles, " === regen")
13674 #regenerate cache entries
13675 portage.writemsg_stdout("Regenerating cache entries...\n")
13677 os.close(sys.stdin.fileno())
13678 except SystemExit, e:
13679 raise # Needed else can't exit
13684 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13687 portage.writemsg_stdout("done!\n")
13688 return regen.returncode
13690 def action_config(settings, trees, myopts, myfiles):
13691 if len(myfiles) != 1:
13692 print red("!!! config can only take a single package atom at this time\n")
13694 if not is_valid_package_atom(myfiles[0]):
13695 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13697 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13698 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13702 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13703 except portage.exception.AmbiguousPackageName, e:
13704 # Multiple matches thrown from cpv_expand
13707 print "No packages found.\n"
13709 elif len(pkgs) > 1:
13710 if "--ask" in myopts:
13712 print "Please select a package to configure:"
13716 options.append(str(idx))
13717 print options[-1]+") "+pkg
13719 options.append("X")
13720 idx = userquery("Selection?", options)
13723 pkg = pkgs[int(idx)-1]
13725 print "The following packages available:"
13728 print "\nPlease use a specific atom or the --ask option."
13734 if "--ask" in myopts:
13735 if userquery("Ready to configure "+pkg+"?") == "No":
13738 print "Configuring pkg..."
13740 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13741 mysettings = portage.config(clone=settings)
13742 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13743 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13744 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13746 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13747 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13748 if retval == os.EX_OK:
13749 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13750 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13753 def action_info(settings, trees, myopts, myfiles):
13754 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13755 settings.profile_path, settings["CHOST"],
13756 trees[settings["ROOT"]]["vartree"].dbapi)
13758 header_title = "System Settings"
13760 print header_width * "="
13761 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13762 print header_width * "="
13763 print "System uname: "+platform.platform(aliased=1)
13765 lastSync = portage.grabfile(os.path.join(
13766 settings["PORTDIR"], "metadata", "timestamp.chk"))
13767 print "Timestamp of tree:",
13773 output=commands.getstatusoutput("distcc --version")
13775 print str(output[1].split("\n",1)[0]),
13776 if "distcc" in settings.features:
13781 output=commands.getstatusoutput("ccache -V")
13783 print str(output[1].split("\n",1)[0]),
13784 if "ccache" in settings.features:
13789 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13790 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13791 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13792 myvars = portage.util.unique_array(myvars)
13796 if portage.isvalidatom(x):
13797 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13798 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13799 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13801 for pn, ver, rev in pkg_matches:
13803 pkgs.append(ver + "-" + rev)
13807 pkgs = ", ".join(pkgs)
13808 print "%-20s %s" % (x+":", pkgs)
13810 print "%-20s %s" % (x+":", "[NOT VALID]")
13812 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13814 if "--verbose" in myopts:
13815 myvars=settings.keys()
13817 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13818 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13819 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13820 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13822 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13824 myvars = portage.util.unique_array(myvars)
13825 use_expand = settings.get('USE_EXPAND', '').split()
13827 use_expand_hidden = set(
13828 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13829 alphabetical_use = '--alphabetical' in myopts
13830 root_config = trees[settings["ROOT"]]['root_config']
13836 print '%s="%s"' % (x, settings[x])
13838 use = set(settings["USE"].split())
13839 for varname in use_expand:
13840 flag_prefix = varname.lower() + "_"
13841 for f in list(use):
13842 if f.startswith(flag_prefix):
13846 print 'USE="%s"' % " ".join(use),
13847 for varname in use_expand:
13848 myval = settings.get(varname)
13850 print '%s="%s"' % (varname, myval),
13853 unset_vars.append(x)
13855 print "Unset: "+", ".join(unset_vars)
13858 if "--debug" in myopts:
13859 for x in dir(portage):
13860 module = getattr(portage, x)
13861 if "cvs_id_string" in dir(module):
13862 print "%s: %s" % (str(x), str(module.cvs_id_string))
13864 # See if we can find any packages installed matching the strings
13865 # passed on the command line
13867 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13868 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13870 mypkgs.extend(vardb.match(x))
13872 # If some packages were found...
13874 # Get our global settings (we only print stuff if it varies from
13875 # the current config)
13876 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13877 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13878 auxkeys.append('DEFINED_PHASES')
13880 pkgsettings = portage.config(clone=settings)
13882 # Loop through each package
13883 # Only print settings if they differ from global settings
13884 header_title = "Package Settings"
13885 print header_width * "="
13886 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13887 print header_width * "="
13888 from portage.output import EOutput
13891 # Get all package specific variables
13892 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13893 pkg = Package(built=True, cpv=cpv,
13894 installed=True, metadata=izip(Package.metadata_keys,
13895 (metadata.get(x, '') for x in Package.metadata_keys)),
13896 root_config=root_config, type_name='installed')
13898 print "\n%s was built with the following:" % \
13899 colorize("INFORM", str(pkg.cpv))
13901 pkgsettings.setcpv(pkg)
13902 forced_flags = set(chain(pkgsettings.useforce,
13903 pkgsettings.usemask))
13904 use = set(pkg.use.enabled)
13905 use.discard(pkgsettings.get('ARCH'))
13906 use_expand_flags = set()
13909 for varname in use_expand:
13910 flag_prefix = varname.lower() + "_"
13912 if f.startswith(flag_prefix):
13913 use_expand_flags.add(f)
13914 use_enabled.setdefault(
13915 varname.upper(), []).append(f[len(flag_prefix):])
13917 for f in pkg.iuse.all:
13918 if f.startswith(flag_prefix):
13919 use_expand_flags.add(f)
13921 use_disabled.setdefault(
13922 varname.upper(), []).append(f[len(flag_prefix):])
13924 var_order = set(use_enabled)
13925 var_order.update(use_disabled)
13926 var_order = sorted(var_order)
13927 var_order.insert(0, 'USE')
13928 use.difference_update(use_expand_flags)
13929 use_enabled['USE'] = list(use)
13930 use_disabled['USE'] = []
13932 for f in pkg.iuse.all:
13933 if f not in use and \
13934 f not in use_expand_flags:
13935 use_disabled['USE'].append(f)
13937 for varname in var_order:
13938 if varname in use_expand_hidden:
13941 for f in use_enabled.get(varname, []):
13942 flags.append(UseFlagDisplay(f, True, f in forced_flags))
13943 for f in use_disabled.get(varname, []):
13944 flags.append(UseFlagDisplay(f, False, f in forced_flags))
13945 if alphabetical_use:
13946 flags.sort(key=UseFlagDisplay.sort_combined)
13948 flags.sort(key=UseFlagDisplay.sort_separated)
13949 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13952 for myvar in mydesiredvars:
13953 if metadata[myvar].split() != settings.get(myvar, '').split():
13954 print "%s=\"%s\"" % (myvar, metadata[myvar])
13957 if metadata['DEFINED_PHASES']:
13958 if 'info' not in metadata['DEFINED_PHASES'].split():
13961 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13962 ebuildpath = vardb.findname(pkg.cpv)
13963 if not ebuildpath or not os.path.exists(ebuildpath):
13964 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13966 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13967 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13968 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13971 def action_search(root_config, myopts, myfiles, spinner):
13973 print "emerge: no search terms provided."
13975 searchinstance = search(root_config,
13976 spinner, "--searchdesc" in myopts,
13977 "--quiet" not in myopts, "--usepkg" in myopts,
13978 "--usepkgonly" in myopts)
13979 for mysearch in myfiles:
13981 searchinstance.execute(mysearch)
13982 except re.error, comment:
13983 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13985 searchinstance.output()
13987 def action_uninstall(settings, trees, ldpath_mtimes,
13988 opts, action, files, spinner):
13990 # For backward compat, some actions do not require leading '='.
13991 ignore_missing_eq = action in ('clean', 'unmerge')
13992 root = settings['ROOT']
13993 vardb = trees[root]['vartree'].dbapi
13997 # Ensure atoms are valid before calling unmerge().
13998 # For backward compat, leading '=' is not required.
14000 if is_valid_package_atom(x) or \
14001 (ignore_missing_eq and is_valid_package_atom('=' + x)):
14004 valid_atoms.append(
14005 portage.dep_expand(x, mydb=vardb, settings=settings))
14006 except portage.exception.AmbiguousPackageName, e:
14007 msg = "The short ebuild name \"" + x + \
14008 "\" is ambiguous. Please specify " + \
14009 "one of the following " + \
14010 "fully-qualified ebuild names instead:"
14011 for line in textwrap.wrap(msg, 70):
14012 writemsg_level("!!! %s\n" % (line,),
14013 level=logging.ERROR, noiselevel=-1)
14015 writemsg_level(" %s\n" % colorize("INFORM", i),
14016 level=logging.ERROR, noiselevel=-1)
14017 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14020 elif x.startswith(os.sep):
14021 if not x.startswith(root):
14022 writemsg_level(("!!! '%s' does not start with" + \
14023 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
14025 # Queue these up since it's most efficient to handle
14026 # multiple files in a single iter_owners() call.
14027 lookup_owners.append(x)
14031 msg.append("'%s' is not a valid package atom." % (x,))
14032 msg.append("Please check ebuild(5) for full details.")
14033 writemsg_level("".join("!!! %s\n" % line for line in msg),
14034 level=logging.ERROR, noiselevel=-1)
14038 relative_paths = []
14039 search_for_multiple = False
14040 if len(lookup_owners) > 1:
14041 search_for_multiple = True
14043 for x in lookup_owners:
14044 if not search_for_multiple and os.path.isdir(x):
14045 search_for_multiple = True
14046 relative_paths.append(x[len(root):])
14049 for pkg, relative_path in \
14050 vardb._owners.iter_owners(relative_paths):
14051 owners.add(pkg.mycpv)
14052 if not search_for_multiple:
14057 slot = vardb.aux_get(cpv, ['SLOT'])[0]
14059 # portage now masks packages with missing slot, but it's
14060 # possible that one was installed by an older version
14061 atom = portage.cpv_getkey(cpv)
14063 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
14064 valid_atoms.append(portage.dep.Atom(atom))
14066 writemsg_level(("!!! '%s' is not claimed " + \
14067 "by any package.\n") % lookup_owners[0],
14068 level=logging.WARNING, noiselevel=-1)
14070 if files and not valid_atoms:
14073 if action in ('clean', 'unmerge') or \
14074 (action == 'prune' and "--nodeps" in opts):
14075 # When given a list of atoms, unmerge them in the order given.
14076 ordered = action == 'unmerge'
14077 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
14078 valid_atoms, ldpath_mtimes, ordered=ordered)
14080 elif action == 'deselect':
14081 rval = action_deselect(settings, trees, opts, valid_atoms)
14083 rval = action_depclean(settings, trees, ldpath_mtimes,
14084 opts, action, valid_atoms, spinner)
14088 def action_deselect(settings, trees, opts, atoms):
14089 root_config = trees[settings['ROOT']]['root_config']
14090 world_set = root_config.sets['world']
14091 if not hasattr(world_set, 'update'):
14092 writemsg_level("World set does not appear to be mutable.\n",
14093 level=logging.ERROR, noiselevel=-1)
14096 vardb = root_config.trees['vartree'].dbapi
14097 expanded_atoms = set(atoms)
14098 from portage.dep import Atom
14100 for cpv in vardb.match(atom):
14101 slot, = vardb.aux_get(cpv, ['SLOT'])
14104 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
14106 pretend = '--pretend' in opts
14108 if not pretend and hasattr(world_set, 'lock'):
14112 discard_atoms = set()
14114 for atom in world_set:
14115 if not isinstance(atom, Atom):
14118 for arg_atom in expanded_atoms:
14119 if arg_atom.intersects(atom) and \
14120 not (arg_atom.slot and not atom.slot):
14121 discard_atoms.add(atom)
14124 for atom in sorted(discard_atoms):
14125 print ">>> Removing %s from \"world\" favorites file..." % \
14126 colorize("INFORM", str(atom))
14128 if '--ask' in opts:
14129 prompt = "Would you like to remove these " + \
14130 "packages from your world favorites?"
14131 if userquery(prompt) == 'No':
14134 remaining = set(world_set)
14135 remaining.difference_update(discard_atoms)
14137 world_set.replace(remaining)
14139 print ">>> No matching atoms found in \"world\" favorites file..."
14145 def action_depclean(settings, trees, ldpath_mtimes,
14146 myopts, action, myfiles, spinner):
14147 # Kill packages that aren't explicitly merged or are required as a
14148 # dependency of another package. World file is explicit.
14150 # Global depclean or prune operations are not very safe when there are
14151 # missing dependencies since it's unknown how badly incomplete
14152 # the dependency graph is, and we might accidentally remove packages
14153 # that should have been pulled into the graph. On the other hand, it's
14154 # relatively safe to ignore missing deps when only asked to remove
14155 # specific packages.
14156 allow_missing_deps = len(myfiles) > 0
14159 msg.append("Always study the list of packages to be cleaned for any obvious\n")
14160 msg.append("mistakes. Packages that are part of the world set will always\n")
14161 msg.append("be kept. They can be manually added to this set with\n")
14162 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
14163 msg.append("package.provided (see portage(5)) will be removed by\n")
14164 msg.append("depclean, even if they are part of the world set.\n")
14166 msg.append("As a safety measure, depclean will not remove any packages\n")
14167 msg.append("unless *all* required dependencies have been resolved. As a\n")
14168 msg.append("consequence, it is often necessary to run %s\n" % \
14169 good("`emerge --update"))
14170 msg.append(good("--newuse --deep @system @world`") + \
14171 " prior to depclean.\n")
14173 if action == "depclean" and "--quiet" not in myopts and not myfiles:
14174 portage.writemsg_stdout("\n")
14176 portage.writemsg_stdout(colorize("WARN", " * ") + x)
14178 xterm_titles = "notitles" not in settings.features
14179 myroot = settings["ROOT"]
14180 root_config = trees[myroot]["root_config"]
14181 getSetAtoms = root_config.setconfig.getSetAtoms
14182 vardb = trees[myroot]["vartree"].dbapi
14183 deselect = myopts.get('--deselect') != 'n'
14185 required_set_names = ("system", "world")
14189 for s in required_set_names:
14190 required_sets[s] = InternalPackageSet(
14191 initial_atoms=getSetAtoms(s))
14194 # When removing packages, use a temporary version of world
14195 # which excludes packages that are intended to be eligible for
14197 world_temp_set = required_sets["world"]
14198 system_set = required_sets["system"]
14200 if not system_set or not world_temp_set:
14203 writemsg_level("!!! You have no system list.\n",
14204 level=logging.ERROR, noiselevel=-1)
14206 if not world_temp_set:
14207 writemsg_level("!!! You have no world file.\n",
14208 level=logging.WARNING, noiselevel=-1)
14210 writemsg_level("!!! Proceeding is likely to " + \
14211 "break your installation.\n",
14212 level=logging.WARNING, noiselevel=-1)
14213 if "--pretend" not in myopts:
14214 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
14216 if action == "depclean":
14217 emergelog(xterm_titles, " >>> depclean")
14220 args_set = InternalPackageSet()
14222 args_set.update(myfiles)
14223 matched_packages = False
14226 matched_packages = True
14228 if not matched_packages:
14229 writemsg_level(">>> No packages selected for removal by %s\n" % \
14233 writemsg_level("\nCalculating dependencies ")
14234 resolver_params = create_depgraph_params(myopts, "remove")
14235 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
14236 vardb = resolver.trees[myroot]["vartree"].dbapi
14238 if action == "depclean":
14243 world_temp_set.clear()
14245 # Pull in everything that's installed but not matched
14246 # by an argument atom since we don't want to clean any
14247 # package if something depends on it.
14252 if args_set.findAtomForPackage(pkg) is None:
14253 world_temp_set.add("=" + pkg.cpv)
14255 except portage.exception.InvalidDependString, e:
14256 show_invalid_depstring_notice(pkg,
14257 pkg.metadata["PROVIDE"], str(e))
14259 world_temp_set.add("=" + pkg.cpv)
14262 elif action == "prune":
14265 world_temp_set.clear()
14267 # Pull in everything that's installed since we don't
14268 # to prune a package if something depends on it.
14269 world_temp_set.update(vardb.cp_all())
14273 # Try to prune everything that's slotted.
14274 for cp in vardb.cp_all():
14275 if len(vardb.cp_list(cp)) > 1:
14278 # Remove atoms from world that match installed packages
14279 # that are also matched by argument atoms, but do not remove
14280 # them if they match the highest installed version.
14283 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
14284 if not pkgs_for_cp or pkg not in pkgs_for_cp:
14285 raise AssertionError("package expected in matches: " + \
14286 "cp = %s, cpv = %s matches = %s" % \
14287 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14289 highest_version = pkgs_for_cp[-1]
14290 if pkg == highest_version:
14291 # pkg is the highest version
14292 world_temp_set.add("=" + pkg.cpv)
14295 if len(pkgs_for_cp) <= 1:
14296 raise AssertionError("more packages expected: " + \
14297 "cp = %s, cpv = %s matches = %s" % \
14298 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14301 if args_set.findAtomForPackage(pkg) is None:
14302 world_temp_set.add("=" + pkg.cpv)
14304 except portage.exception.InvalidDependString, e:
14305 show_invalid_depstring_notice(pkg,
14306 pkg.metadata["PROVIDE"], str(e))
14308 world_temp_set.add("=" + pkg.cpv)
14312 for s, package_set in required_sets.iteritems():
14313 set_atom = SETPREFIX + s
14314 set_arg = SetArg(arg=set_atom, set=package_set,
14315 root_config=resolver.roots[myroot])
14316 set_args[s] = set_arg
14317 for atom in set_arg.set:
14318 resolver._dep_stack.append(
14319 Dependency(atom=atom, root=myroot, parent=set_arg))
14320 resolver.digraph.add(set_arg, None)
14322 success = resolver._complete_graph()
14323 writemsg_level("\b\b... done!\n")
14325 resolver.display_problems()
14330 def unresolved_deps():
14332 unresolvable = set()
14333 for dep in resolver._initially_unsatisfied_deps:
14334 if isinstance(dep.parent, Package) and \
14335 (dep.priority > UnmergeDepPriority.SOFT):
14336 unresolvable.add((dep.atom, dep.parent.cpv))
14338 if not unresolvable:
14341 if unresolvable and not allow_missing_deps:
14342 prefix = bad(" * ")
14344 msg.append("Dependencies could not be completely resolved due to")
14345 msg.append("the following required packages not being installed:")
14347 for atom, parent in unresolvable:
14348 msg.append(" %s pulled in by:" % (atom,))
14349 msg.append(" %s" % (parent,))
14351 msg.append("Have you forgotten to run " + \
14352 good("`emerge --update --newuse --deep @system @world`") + " prior")
14353 msg.append(("to %s? It may be necessary to manually " + \
14354 "uninstall packages that no longer") % action)
14355 msg.append("exist in the portage tree since " + \
14356 "it may not be possible to satisfy their")
14357 msg.append("dependencies. Also, be aware of " + \
14358 "the --with-bdeps option that is documented")
14359 msg.append("in " + good("`man emerge`") + ".")
14360 if action == "prune":
14362 msg.append("If you would like to ignore " + \
14363 "dependencies then use %s." % good("--nodeps"))
14364 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14365 level=logging.ERROR, noiselevel=-1)
14369 if unresolved_deps():
14372 graph = resolver.digraph.copy()
14373 required_pkgs_total = 0
14375 if isinstance(node, Package):
14376 required_pkgs_total += 1
14378 def show_parents(child_node):
14379 parent_nodes = graph.parent_nodes(child_node)
14380 if not parent_nodes:
14381 # With --prune, the highest version can be pulled in without any
14382 # real parent since all installed packages are pulled in. In that
14383 # case there's nothing to show here.
14386 for node in parent_nodes:
14387 parent_strs.append(str(getattr(node, "cpv", node)))
14390 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
14391 for parent_str in parent_strs:
14392 msg.append(" %s\n" % (parent_str,))
14394 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14396 def cmp_pkg_cpv(pkg1, pkg2):
14397 """Sort Package instances by cpv."""
14398 if pkg1.cpv > pkg2.cpv:
14400 elif pkg1.cpv == pkg2.cpv:
14405 def create_cleanlist():
14406 pkgs_to_remove = []
14408 if action == "depclean":
14411 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14414 arg_atom = args_set.findAtomForPackage(pkg)
14415 except portage.exception.InvalidDependString:
14416 # this error has already been displayed by now
14420 if pkg not in graph:
14421 pkgs_to_remove.append(pkg)
14422 elif "--verbose" in myopts:
14426 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14427 if pkg not in graph:
14428 pkgs_to_remove.append(pkg)
14429 elif "--verbose" in myopts:
14432 elif action == "prune":
14433 # Prune really uses all installed instead of world. It's not
14434 # a real reverse dependency so don't display it as such.
14435 graph.remove(set_args["world"])
14437 for atom in args_set:
14438 for pkg in vardb.match_pkgs(atom):
14439 if pkg not in graph:
14440 pkgs_to_remove.append(pkg)
14441 elif "--verbose" in myopts:
14444 if not pkgs_to_remove:
14446 ">>> No packages selected for removal by %s\n" % action)
14447 if "--verbose" not in myopts:
14449 ">>> To see reverse dependencies, use %s\n" % \
14451 if action == "prune":
14453 ">>> To ignore dependencies, use %s\n" % \
14456 return pkgs_to_remove
14458 cleanlist = create_cleanlist()
14461 clean_set = set(cleanlist)
14463 # Check if any of these package are the sole providers of libraries
14464 # with consumers that have not been selected for removal. If so, these
14465 # packages and any dependencies need to be added to the graph.
14466 real_vardb = trees[myroot]["vartree"].dbapi
14467 linkmap = real_vardb.linkmap
14468 liblist = linkmap.listLibraryObjects()
14469 consumer_cache = {}
14470 provider_cache = {}
14474 writemsg_level(">>> Checking for lib consumers...\n")
14476 for pkg in cleanlist:
14477 pkg_dblink = real_vardb._dblink(pkg.cpv)
14478 provided_libs = set()
14480 for lib in liblist:
14481 if pkg_dblink.isowner(lib, myroot):
14482 provided_libs.add(lib)
14484 if not provided_libs:
14488 for lib in provided_libs:
14489 lib_consumers = consumer_cache.get(lib)
14490 if lib_consumers is None:
14491 lib_consumers = linkmap.findConsumers(lib)
14492 consumer_cache[lib] = lib_consumers
14494 consumers[lib] = lib_consumers
14499 for lib, lib_consumers in consumers.items():
14500 for consumer_file in list(lib_consumers):
14501 if pkg_dblink.isowner(consumer_file, myroot):
14502 lib_consumers.remove(consumer_file)
14503 if not lib_consumers:
14509 for lib, lib_consumers in consumers.iteritems():
14511 soname = soname_cache.get(lib)
14513 soname = linkmap.getSoname(lib)
14514 soname_cache[lib] = soname
14516 consumer_providers = []
14517 for lib_consumer in lib_consumers:
14518 providers = provider_cache.get(lib)
14519 if providers is None:
14520 providers = linkmap.findProviders(lib_consumer)
14521 provider_cache[lib_consumer] = providers
14522 if soname not in providers:
14523 # Why does this happen?
14525 consumer_providers.append(
14526 (lib_consumer, providers[soname]))
14528 consumers[lib] = consumer_providers
14530 consumer_map[pkg] = consumers
14534 search_files = set()
14535 for consumers in consumer_map.itervalues():
14536 for lib, consumer_providers in consumers.iteritems():
14537 for lib_consumer, providers in consumer_providers:
14538 search_files.add(lib_consumer)
14539 search_files.update(providers)
14541 writemsg_level(">>> Assigning files to packages...\n")
14542 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14544 for pkg, consumers in consumer_map.items():
14545 for lib, consumer_providers in consumers.items():
14546 lib_consumers = set()
14548 for lib_consumer, providers in consumer_providers:
14549 owner_set = file_owners.get(lib_consumer)
14550 provider_dblinks = set()
14551 provider_pkgs = set()
14553 if len(providers) > 1:
14554 for provider in providers:
14555 provider_set = file_owners.get(provider)
14556 if provider_set is not None:
14557 provider_dblinks.update(provider_set)
14559 if len(provider_dblinks) > 1:
14560 for provider_dblink in provider_dblinks:
14561 pkg_key = ("installed", myroot,
14562 provider_dblink.mycpv, "nomerge")
14563 if pkg_key not in clean_set:
14564 provider_pkgs.add(vardb.get(pkg_key))
14569 if owner_set is not None:
14570 lib_consumers.update(owner_set)
14572 for consumer_dblink in list(lib_consumers):
14573 if ("installed", myroot, consumer_dblink.mycpv,
14574 "nomerge") in clean_set:
14575 lib_consumers.remove(consumer_dblink)
14579 consumers[lib] = lib_consumers
14583 del consumer_map[pkg]
14586 # TODO: Implement a package set for rebuilding consumer packages.
14588 msg = "In order to avoid breakage of link level " + \
14589 "dependencies, one or more packages will not be removed. " + \
14590 "This can be solved by rebuilding " + \
14591 "the packages that pulled them in."
14593 prefix = bad(" * ")
14594 from textwrap import wrap
14595 writemsg_level("".join(prefix + "%s\n" % line for \
14596 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14599 for pkg, consumers in consumer_map.iteritems():
14600 unique_consumers = set(chain(*consumers.values()))
14601 unique_consumers = sorted(consumer.mycpv \
14602 for consumer in unique_consumers)
14604 msg.append(" %s pulled in by:" % (pkg.cpv,))
14605 for consumer in unique_consumers:
14606 msg.append(" %s" % (consumer,))
14608 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14609 level=logging.WARNING, noiselevel=-1)
14611 # Add lib providers to the graph as children of lib consumers,
14612 # and also add any dependencies pulled in by the provider.
14613 writemsg_level(">>> Adding lib providers to graph...\n")
14615 for pkg, consumers in consumer_map.iteritems():
14616 for consumer_dblink in set(chain(*consumers.values())):
14617 consumer_pkg = vardb.get(("installed", myroot,
14618 consumer_dblink.mycpv, "nomerge"))
14619 if not resolver._add_pkg(pkg,
14620 Dependency(parent=consumer_pkg,
14621 priority=UnmergeDepPriority(runtime=True),
14623 resolver.display_problems()
14626 writemsg_level("\nCalculating dependencies ")
14627 success = resolver._complete_graph()
14628 writemsg_level("\b\b... done!\n")
14629 resolver.display_problems()
14632 if unresolved_deps():
14635 graph = resolver.digraph.copy()
14636 required_pkgs_total = 0
14638 if isinstance(node, Package):
14639 required_pkgs_total += 1
14640 cleanlist = create_cleanlist()
14643 clean_set = set(cleanlist)
14645 # Use a topological sort to create an unmerge order such that
14646 # each package is unmerged before it's dependencies. This is
14647 # necessary to avoid breaking things that may need to run
14648 # during pkg_prerm or pkg_postrm phases.
14650 # Create a new graph to account for dependencies between the
14651 # packages being unmerged.
14655 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14656 runtime = UnmergeDepPriority(runtime=True)
14657 runtime_post = UnmergeDepPriority(runtime_post=True)
14658 buildtime = UnmergeDepPriority(buildtime=True)
14660 "RDEPEND": runtime,
14661 "PDEPEND": runtime_post,
14662 "DEPEND": buildtime,
14665 for node in clean_set:
14666 graph.add(node, None)
14668 node_use = node.metadata["USE"].split()
14669 for dep_type in dep_keys:
14670 depstr = node.metadata[dep_type]
14674 portage.dep._dep_check_strict = False
14675 success, atoms = portage.dep_check(depstr, None, settings,
14676 myuse=node_use, trees=resolver._graph_trees,
14679 portage.dep._dep_check_strict = True
14681 # Ignore invalid deps of packages that will
14682 # be uninstalled anyway.
14685 priority = priority_map[dep_type]
14687 if not isinstance(atom, portage.dep.Atom):
14688 # Ignore invalid atoms returned from dep_check().
14692 matches = vardb.match_pkgs(atom)
14695 for child_node in matches:
14696 if child_node in clean_set:
14697 graph.add(child_node, node, priority=priority)
14700 if len(graph.order) == len(graph.root_nodes()):
14701 # If there are no dependencies between packages
14702 # let unmerge() group them by cat/pn.
14704 cleanlist = [pkg.cpv for pkg in graph.order]
14706 # Order nodes from lowest to highest overall reference count for
14707 # optimal root node selection.
14708 node_refcounts = {}
14709 for node in graph.order:
14710 node_refcounts[node] = len(graph.parent_nodes(node))
14711 def cmp_reference_count(node1, node2):
14712 return node_refcounts[node1] - node_refcounts[node2]
14713 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14715 ignore_priority_range = [None]
14716 ignore_priority_range.extend(
14717 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14718 while not graph.empty():
14719 for ignore_priority in ignore_priority_range:
14720 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14724 raise AssertionError("no root nodes")
14725 if ignore_priority is not None:
14726 # Some deps have been dropped due to circular dependencies,
14727 # so only pop one node in order do minimize the number that
14732 cleanlist.append(node.cpv)
14734 unmerge(root_config, myopts, "unmerge", cleanlist,
14735 ldpath_mtimes, ordered=ordered)
14737 if action == "prune":
14740 if not cleanlist and "--quiet" in myopts:
14743 print "Packages installed: "+str(len(vardb.cpv_all()))
14744 print "Packages in world: " + \
14745 str(len(root_config.sets["world"].getAtoms()))
14746 print "Packages in system: " + \
14747 str(len(root_config.sets["system"].getAtoms()))
14748 print "Required packages: "+str(required_pkgs_total)
14749 if "--pretend" in myopts:
14750 print "Number to remove: "+str(len(cleanlist))
14752 print "Number removed: "+str(len(cleanlist))
14754 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14756 Construct a depgraph for the given resume list. This will raise
14757 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14759 @returns: (success, depgraph, dropped_tasks)
14762 skip_unsatisfied = True
14763 mergelist = mtimedb["resume"]["mergelist"]
14764 dropped_tasks = set()
14766 mydepgraph = depgraph(settings, trees,
14767 myopts, myparams, spinner)
14769 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14770 skip_masked=skip_masked)
14771 except depgraph.UnsatisfiedResumeDep, e:
14772 if not skip_unsatisfied:
14775 graph = mydepgraph.digraph
14776 unsatisfied_parents = dict((dep.parent, dep.parent) \
14777 for dep in e.value)
14778 traversed_nodes = set()
14779 unsatisfied_stack = list(unsatisfied_parents)
14780 while unsatisfied_stack:
14781 pkg = unsatisfied_stack.pop()
14782 if pkg in traversed_nodes:
14784 traversed_nodes.add(pkg)
14786 # If this package was pulled in by a parent
14787 # package scheduled for merge, removing this
14788 # package may cause the the parent package's
14789 # dependency to become unsatisfied.
14790 for parent_node in graph.parent_nodes(pkg):
14791 if not isinstance(parent_node, Package) \
14792 or parent_node.operation not in ("merge", "nomerge"):
14795 graph.child_nodes(parent_node,
14796 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14797 if pkg in unsatisfied:
14798 unsatisfied_parents[parent_node] = parent_node
14799 unsatisfied_stack.append(parent_node)
14801 pruned_mergelist = []
14802 for x in mergelist:
14803 if isinstance(x, list) and \
14804 tuple(x) not in unsatisfied_parents:
14805 pruned_mergelist.append(x)
14807 # If the mergelist doesn't shrink then this loop is infinite.
14808 if len(pruned_mergelist) == len(mergelist):
14809 # This happens if a package can't be dropped because
14810 # it's already installed, but it has unsatisfied PDEPEND.
14812 mergelist[:] = pruned_mergelist
14814 # Exclude installed packages that have been removed from the graph due
14815 # to failure to build/install runtime dependencies after the dependent
14816 # package has already been installed.
14817 dropped_tasks.update(pkg for pkg in \
14818 unsatisfied_parents if pkg.operation != "nomerge")
14819 mydepgraph.break_refs(unsatisfied_parents)
14821 del e, graph, traversed_nodes, \
14822 unsatisfied_parents, unsatisfied_stack
14826 return (success, mydepgraph, dropped_tasks)
14828 def action_build(settings, trees, mtimedb,
14829 myopts, myaction, myfiles, spinner):
14831 # validate the state of the resume data
14832 # so that we can make assumptions later.
14833 for k in ("resume", "resume_backup"):
14834 if k not in mtimedb:
14836 resume_data = mtimedb[k]
14837 if not isinstance(resume_data, dict):
14840 mergelist = resume_data.get("mergelist")
14841 if not isinstance(mergelist, list):
14844 for x in mergelist:
14845 if not (isinstance(x, list) and len(x) == 4):
14847 pkg_type, pkg_root, pkg_key, pkg_action = x
14848 if pkg_root not in trees:
14849 # Current $ROOT setting differs,
14850 # so the list must be stale.
14856 resume_opts = resume_data.get("myopts")
14857 if not isinstance(resume_opts, (dict, list)):
14860 favorites = resume_data.get("favorites")
14861 if not isinstance(favorites, list):
14866 if "--resume" in myopts and \
14867 ("resume" in mtimedb or
14868 "resume_backup" in mtimedb):
14870 if "resume" not in mtimedb:
14871 mtimedb["resume"] = mtimedb["resume_backup"]
14872 del mtimedb["resume_backup"]
14874 # "myopts" is a list for backward compatibility.
14875 resume_opts = mtimedb["resume"].get("myopts", [])
14876 if isinstance(resume_opts, list):
14877 resume_opts = dict((k,True) for k in resume_opts)
14878 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14879 resume_opts.pop(opt, None)
14881 # Current options always override resume_opts.
14882 resume_opts.update(myopts)
14884 myopts.update(resume_opts)
14886 if "--debug" in myopts:
14887 writemsg_level("myopts %s\n" % (myopts,))
14889 # Adjust config according to options of the command being resumed.
14890 for myroot in trees:
14891 mysettings = trees[myroot]["vartree"].settings
14892 mysettings.unlock()
14893 adjust_config(myopts, mysettings)
14895 del myroot, mysettings
14897 ldpath_mtimes = mtimedb["ldpath"]
14900 buildpkgonly = "--buildpkgonly" in myopts
14901 pretend = "--pretend" in myopts
14902 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14903 ask = "--ask" in myopts
14904 nodeps = "--nodeps" in myopts
14905 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14906 tree = "--tree" in myopts
14907 if nodeps and tree:
14909 del myopts["--tree"]
14910 portage.writemsg(colorize("WARN", " * ") + \
14911 "--tree is broken with --nodeps. Disabling...\n")
14912 debug = "--debug" in myopts
14913 verbose = "--verbose" in myopts
14914 quiet = "--quiet" in myopts
14915 if pretend or fetchonly:
14916 # make the mtimedb readonly
14917 mtimedb.filename = None
14918 if '--digest' in myopts or 'digest' in settings.features:
14919 if '--digest' in myopts:
14920 msg = "The --digest option"
14922 msg = "The FEATURES=digest setting"
14924 msg += " can prevent corruption from being" + \
14925 " noticed. The `repoman manifest` command is the preferred" + \
14926 " way to generate manifests and it is capable of doing an" + \
14927 " entire repository or category at once."
14928 prefix = bad(" * ")
14929 writemsg(prefix + "\n")
14930 from textwrap import wrap
14931 for line in wrap(msg, 72):
14932 writemsg("%s%s\n" % (prefix, line))
14933 writemsg(prefix + "\n")
14935 if "--quiet" not in myopts and \
14936 ("--pretend" in myopts or "--ask" in myopts or \
14937 "--tree" in myopts or "--verbose" in myopts):
14939 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14941 elif "--buildpkgonly" in myopts:
14945 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14947 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14951 print darkgreen("These are the packages that would be %s, in order:") % action
14954 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14955 if not show_spinner:
14956 spinner.update = spinner.update_quiet
14959 favorites = mtimedb["resume"].get("favorites")
14960 if not isinstance(favorites, list):
14964 print "Calculating dependencies ",
14965 myparams = create_depgraph_params(myopts, myaction)
14967 resume_data = mtimedb["resume"]
14968 mergelist = resume_data["mergelist"]
14969 if mergelist and "--skipfirst" in myopts:
14970 for i, task in enumerate(mergelist):
14971 if isinstance(task, list) and \
14972 task and task[-1] == "merge":
14979 success, mydepgraph, dropped_tasks = resume_depgraph(
14980 settings, trees, mtimedb, myopts, myparams, spinner)
14981 except (portage.exception.PackageNotFound,
14982 depgraph.UnsatisfiedResumeDep), e:
14983 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14984 mydepgraph = e.depgraph
14987 from textwrap import wrap
14988 from portage.output import EOutput
14991 resume_data = mtimedb["resume"]
14992 mergelist = resume_data.get("mergelist")
14993 if not isinstance(mergelist, list):
14995 if mergelist and debug or (verbose and not quiet):
14996 out.eerror("Invalid resume list:")
14999 for task in mergelist:
15000 if isinstance(task, list):
15001 out.eerror(indent + str(tuple(task)))
15004 if isinstance(e, depgraph.UnsatisfiedResumeDep):
15005 out.eerror("One or more packages are either masked or " + \
15006 "have missing dependencies:")
15009 for dep in e.value:
15010 if dep.atom is None:
15011 out.eerror(indent + "Masked package:")
15012 out.eerror(2 * indent + str(dep.parent))
15015 out.eerror(indent + str(dep.atom) + " pulled in by:")
15016 out.eerror(2 * indent + str(dep.parent))
15018 msg = "The resume list contains packages " + \
15019 "that are either masked or have " + \
15020 "unsatisfied dependencies. " + \
15021 "Please restart/continue " + \
15022 "the operation manually, or use --skipfirst " + \
15023 "to skip the first package in the list and " + \
15024 "any other packages that may be " + \
15025 "masked or have missing dependencies."
15026 for line in wrap(msg, 72):
15028 elif isinstance(e, portage.exception.PackageNotFound):
15029 out.eerror("An expected package is " + \
15030 "not available: %s" % str(e))
15032 msg = "The resume list contains one or more " + \
15033 "packages that are no longer " + \
15034 "available. Please restart/continue " + \
15035 "the operation manually."
15036 for line in wrap(msg, 72):
15040 print "\b\b... done!"
15044 portage.writemsg("!!! One or more packages have been " + \
15045 "dropped due to\n" + \
15046 "!!! masking or unsatisfied dependencies:\n\n",
15048 for task in dropped_tasks:
15049 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
15050 portage.writemsg("\n", noiselevel=-1)
15053 if mydepgraph is not None:
15054 mydepgraph.display_problems()
15055 if not (ask or pretend):
15056 # delete the current list and also the backup
15057 # since it's probably stale too.
15058 for k in ("resume", "resume_backup"):
15059 mtimedb.pop(k, None)
15064 if ("--resume" in myopts):
15065 print darkgreen("emerge: It seems we have nothing to resume...")
15068 myparams = create_depgraph_params(myopts, myaction)
15069 if "--quiet" not in myopts and "--nodeps" not in myopts:
15070 print "Calculating dependencies ",
15072 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
15074 retval, favorites = mydepgraph.select_files(myfiles)
15075 except portage.exception.PackageNotFound, e:
15076 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
15078 except portage.exception.PackageSetNotFound, e:
15079 root_config = trees[settings["ROOT"]]["root_config"]
15080 display_missing_pkg_set(root_config, e.value)
15083 print "\b\b... done!"
15085 mydepgraph.display_problems()
15088 if "--pretend" not in myopts and \
15089 ("--ask" in myopts or "--tree" in myopts or \
15090 "--verbose" in myopts) and \
15091 not ("--quiet" in myopts and "--ask" not in myopts):
15092 if "--resume" in myopts:
15093 mymergelist = mydepgraph.altlist()
15094 if len(mymergelist) == 0:
15095 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15097 favorites = mtimedb["resume"]["favorites"]
15098 retval = mydepgraph.display(
15099 mydepgraph.altlist(reversed=tree),
15100 favorites=favorites)
15101 mydepgraph.display_problems()
15102 if retval != os.EX_OK:
15104 prompt="Would you like to resume merging these packages?"
15106 retval = mydepgraph.display(
15107 mydepgraph.altlist(reversed=("--tree" in myopts)),
15108 favorites=favorites)
15109 mydepgraph.display_problems()
15110 if retval != os.EX_OK:
15113 for x in mydepgraph.altlist():
15114 if isinstance(x, Package) and x.operation == "merge":
15118 sets = trees[settings["ROOT"]]["root_config"].sets
15119 world_candidates = None
15120 if "--noreplace" in myopts and \
15121 not oneshot and favorites:
15122 # Sets that are not world candidates are filtered
15123 # out here since the favorites list needs to be
15124 # complete for depgraph.loadResumeCommand() to
15125 # operate correctly.
15126 world_candidates = [x for x in favorites \
15127 if not (x.startswith(SETPREFIX) and \
15128 not sets[x[1:]].world_candidate)]
15129 if "--noreplace" in myopts and \
15130 not oneshot and world_candidates:
15132 for x in world_candidates:
15133 print " %s %s" % (good("*"), x)
15134 prompt="Would you like to add these packages to your world favorites?"
15135 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
15136 prompt="Nothing to merge; would you like to auto-clean packages?"
15139 print "Nothing to merge; quitting."
15142 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
15143 prompt="Would you like to fetch the source files for these packages?"
15145 prompt="Would you like to merge these packages?"
15147 if "--ask" in myopts and userquery(prompt) == "No":
15152 # Don't ask again (e.g. when auto-cleaning packages after merge)
15153 myopts.pop("--ask", None)
15155 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
15156 if ("--resume" in myopts):
15157 mymergelist = mydepgraph.altlist()
15158 if len(mymergelist) == 0:
15159 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15161 favorites = mtimedb["resume"]["favorites"]
15162 retval = mydepgraph.display(
15163 mydepgraph.altlist(reversed=tree),
15164 favorites=favorites)
15165 mydepgraph.display_problems()
15166 if retval != os.EX_OK:
15169 retval = mydepgraph.display(
15170 mydepgraph.altlist(reversed=("--tree" in myopts)),
15171 favorites=favorites)
15172 mydepgraph.display_problems()
15173 if retval != os.EX_OK:
15175 if "--buildpkgonly" in myopts:
15176 graph_copy = mydepgraph.digraph.clone()
15177 removed_nodes = set()
15178 for node in graph_copy:
15179 if not isinstance(node, Package) or \
15180 node.operation == "nomerge":
15181 removed_nodes.add(node)
15182 graph_copy.difference_update(removed_nodes)
15183 if not graph_copy.hasallzeros(ignore_priority = \
15184 DepPrioritySatisfiedRange.ignore_medium):
15185 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15186 print "!!! You have to merge the dependencies before you can build this package.\n"
15189 if "--buildpkgonly" in myopts:
15190 graph_copy = mydepgraph.digraph.clone()
15191 removed_nodes = set()
15192 for node in graph_copy:
15193 if not isinstance(node, Package) or \
15194 node.operation == "nomerge":
15195 removed_nodes.add(node)
15196 graph_copy.difference_update(removed_nodes)
15197 if not graph_copy.hasallzeros(ignore_priority = \
15198 DepPrioritySatisfiedRange.ignore_medium):
15199 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15200 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
15203 if ("--resume" in myopts):
15204 favorites=mtimedb["resume"]["favorites"]
15205 mymergelist = mydepgraph.altlist()
15206 mydepgraph.break_refs(mymergelist)
15207 mergetask = Scheduler(settings, trees, mtimedb, myopts,
15208 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
15209 del mydepgraph, mymergelist
15210 clear_caches(trees)
15212 retval = mergetask.merge()
15213 merge_count = mergetask.curval
15215 if "resume" in mtimedb and \
15216 "mergelist" in mtimedb["resume"] and \
15217 len(mtimedb["resume"]["mergelist"]) > 1:
15218 mtimedb["resume_backup"] = mtimedb["resume"]
15219 del mtimedb["resume"]
15221 mtimedb["resume"]={}
15222 # Stored as a dict starting with portage-2.1.6_rc1, and supported
15223 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
15224 # a list type for options.
15225 mtimedb["resume"]["myopts"] = myopts.copy()
15227 # Convert Atom instances to plain str.
15228 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
15230 pkglist = mydepgraph.altlist()
15231 mydepgraph.saveNomergeFavorites()
15232 mydepgraph.break_refs(pkglist)
15233 mergetask = Scheduler(settings, trees, mtimedb, myopts,
15234 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
15235 del mydepgraph, pkglist
15236 clear_caches(trees)
15238 retval = mergetask.merge()
15239 merge_count = mergetask.curval
15241 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
15242 if "yes" == settings.get("AUTOCLEAN"):
15243 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
15244 unmerge(trees[settings["ROOT"]]["root_config"],
15245 myopts, "clean", [],
15246 ldpath_mtimes, autoclean=1)
15248 portage.writemsg_stdout(colorize("WARN", "WARNING:")
15249 + " AUTOCLEAN is disabled. This can cause serious"
15250 + " problems due to overlapping packages.\n")
15251 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
15255 def multiple_actions(action1, action2):
15256 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
15257 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
15260 def insert_optional_args(args):
15262 Parse optional arguments and insert a value if one has
15263 not been provided. This is done before feeding the args
15264 to the optparse parser since that parser does not support
15265 this feature natively.
15269 jobs_opts = ("-j", "--jobs")
15270 default_arg_opts = {
15271 '--deselect' : ('n',),
15272 '--root-deps' : ('rdeps',),
15274 arg_stack = args[:]
15275 arg_stack.reverse()
15277 arg = arg_stack.pop()
15279 default_arg_choices = default_arg_opts.get(arg)
15280 if default_arg_choices is not None:
15281 new_args.append(arg)
15282 if arg_stack and arg_stack[-1] in default_arg_choices:
15283 new_args.append(arg_stack.pop())
15285 # insert default argument
15286 new_args.append('True')
15289 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
15290 if not (short_job_opt or arg in jobs_opts):
15291 new_args.append(arg)
15294 # Insert an empty placeholder in order to
15295 # satisfy the requirements of optparse.
15297 new_args.append("--jobs")
15300 if short_job_opt and len(arg) > 2:
15301 if arg[:2] == "-j":
15303 job_count = int(arg[2:])
15305 saved_opts = arg[2:]
15308 saved_opts = arg[1:].replace("j", "")
15310 if job_count is None and arg_stack:
15312 job_count = int(arg_stack[-1])
15316 # Discard the job count from the stack
15317 # since we're consuming it here.
15320 if job_count is None:
15321 # unlimited number of jobs
15322 new_args.append("True")
15324 new_args.append(str(job_count))
15326 if saved_opts is not None:
15327 new_args.append("-" + saved_opts)
15331 def parse_opts(tmpcmdline, silent=False):
15336 global actions, options, shortmapping
15338 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15339 argument_options = {
15341 "help":"specify the location for portage configuration files",
15345 "help":"enable or disable color output",
15347 "choices":("y", "n")
15351 "help" : "remove atoms from the world file",
15353 "choices" : ("True", "n")
15358 "help" : "Specifies the number of packages to build " + \
15364 "--load-average": {
15366 "help" :"Specifies that no new builds should be started " + \
15367 "if there are other builds running and the load average " + \
15368 "is at least LOAD (a floating-point number).",
15374 "help":"include unnecessary build time dependencies",
15376 "choices":("y", "n")
15379 "help":"specify conditions to trigger package reinstallation",
15381 "choices":["changed-use"]
15384 "help" : "specify the target root filesystem for merging packages",
15389 "help" : "modify interpretation of depedencies",
15391 "choices" :("True", "rdeps")
15395 from optparse import OptionParser
15396 parser = OptionParser()
15397 if parser.has_option("--help"):
15398 parser.remove_option("--help")
15400 for action_opt in actions:
15401 parser.add_option("--" + action_opt, action="store_true",
15402 dest=action_opt.replace("-", "_"), default=False)
15403 for myopt in options:
15404 parser.add_option(myopt, action="store_true",
15405 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15406 for shortopt, longopt in shortmapping.iteritems():
15407 parser.add_option("-" + shortopt, action="store_true",
15408 dest=longopt.lstrip("--").replace("-", "_"), default=False)
15409 for myalias, myopt in longopt_aliases.iteritems():
15410 parser.add_option(myalias, action="store_true",
15411 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15413 for myopt, kwargs in argument_options.iteritems():
15414 parser.add_option(myopt,
15415 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15417 tmpcmdline = insert_optional_args(tmpcmdline)
15419 myoptions, myargs = parser.parse_args(args=tmpcmdline)
15421 if myoptions.deselect == "True":
15422 myoptions.deselect = True
15424 if myoptions.root_deps == "True":
15425 myoptions.root_deps = True
15429 if myoptions.jobs == "True":
15433 jobs = int(myoptions.jobs)
15437 if jobs is not True and \
15441 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15442 (myoptions.jobs,), noiselevel=-1)
15444 myoptions.jobs = jobs
15446 if myoptions.load_average:
15448 load_average = float(myoptions.load_average)
15452 if load_average <= 0.0:
15453 load_average = None
15455 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15456 (myoptions.load_average,), noiselevel=-1)
15458 myoptions.load_average = load_average
15460 for myopt in options:
15461 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15463 myopts[myopt] = True
15465 for myopt in argument_options:
15466 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15470 if myoptions.searchdesc:
15471 myoptions.search = True
15473 for action_opt in actions:
15474 v = getattr(myoptions, action_opt.replace("-", "_"))
15477 multiple_actions(myaction, action_opt)
15479 myaction = action_opt
15481 if myaction is None and myoptions.deselect is True:
15482 myaction = 'deselect'
15486 return myaction, myopts, myfiles
15488 def validate_ebuild_environment(trees):
15489 for myroot in trees:
15490 settings = trees[myroot]["vartree"].settings
15491 settings.validate()
15493 def clear_caches(trees):
15494 for d in trees.itervalues():
15495 d["porttree"].dbapi.melt()
15496 d["porttree"].dbapi._aux_cache.clear()
15497 d["bintree"].dbapi._aux_cache.clear()
15498 d["bintree"].dbapi._clear_cache()
15499 d["vartree"].dbapi.linkmap._clear_cache()
15500 portage.dircache.clear()
15503 def load_emerge_config(trees=None):
15505 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15506 v = os.environ.get(envvar, None)
15507 if v and v.strip():
15509 trees = portage.create_trees(trees=trees, **kwargs)
15511 for root, root_trees in trees.iteritems():
15512 settings = root_trees["vartree"].settings
15513 setconfig = load_default_config(settings, root_trees)
15514 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15516 settings = trees["/"]["vartree"].settings
15518 for myroot in trees:
15520 settings = trees[myroot]["vartree"].settings
15523 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15524 mtimedb = portage.MtimeDB(mtimedbfile)
15526 return settings, trees, mtimedb
15528 def adjust_config(myopts, settings):
15529 """Make emerge specific adjustments to the config."""
15531 # To enhance usability, make some vars case insensitive by forcing them to
15533 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15534 if myvar in settings:
15535 settings[myvar] = settings[myvar].lower()
15536 settings.backup_changes(myvar)
15539 # Kill noauto as it will break merges otherwise.
15540 if "noauto" in settings.features:
15541 settings.features.remove('noauto')
15542 settings['FEATURES'] = ' '.join(sorted(settings.features))
15543 settings.backup_changes("FEATURES")
15547 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15548 except ValueError, e:
15549 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15550 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15551 settings["CLEAN_DELAY"], noiselevel=-1)
15552 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15553 settings.backup_changes("CLEAN_DELAY")
15555 EMERGE_WARNING_DELAY = 10
15557 EMERGE_WARNING_DELAY = int(settings.get(
15558 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15559 except ValueError, e:
15560 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15561 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15562 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15563 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15564 settings.backup_changes("EMERGE_WARNING_DELAY")
15566 if "--quiet" in myopts:
15567 settings["PORTAGE_QUIET"]="1"
15568 settings.backup_changes("PORTAGE_QUIET")
15570 if "--verbose" in myopts:
15571 settings["PORTAGE_VERBOSE"] = "1"
15572 settings.backup_changes("PORTAGE_VERBOSE")
15574 # Set so that configs will be merged regardless of remembered status
15575 if ("--noconfmem" in myopts):
15576 settings["NOCONFMEM"]="1"
15577 settings.backup_changes("NOCONFMEM")
15579 # Set various debug markers... They should be merged somehow.
15582 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15583 if PORTAGE_DEBUG not in (0, 1):
15584 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15585 PORTAGE_DEBUG, noiselevel=-1)
15586 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15589 except ValueError, e:
15590 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15591 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15592 settings["PORTAGE_DEBUG"], noiselevel=-1)
15594 if "--debug" in myopts:
15596 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15597 settings.backup_changes("PORTAGE_DEBUG")
15599 if settings.get("NOCOLOR") not in ("yes","true"):
15600 portage.output.havecolor = 1
15602 """The explicit --color < y | n > option overrides the NOCOLOR environment
15603 variable and stdout auto-detection."""
15604 if "--color" in myopts:
15605 if "y" == myopts["--color"]:
15606 portage.output.havecolor = 1
15607 settings["NOCOLOR"] = "false"
15609 portage.output.havecolor = 0
15610 settings["NOCOLOR"] = "true"
15611 settings.backup_changes("NOCOLOR")
15612 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15613 portage.output.havecolor = 0
15614 settings["NOCOLOR"] = "true"
15615 settings.backup_changes("NOCOLOR")
15617 def apply_priorities(settings):
15621 def nice(settings):
15623 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15624 except (OSError, ValueError), e:
15625 out = portage.output.EOutput()
15626 out.eerror("Failed to change nice value to '%s'" % \
15627 settings["PORTAGE_NICENESS"])
15628 out.eerror("%s\n" % str(e))
15630 def ionice(settings):
15632 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15634 ionice_cmd = shlex.split(ionice_cmd)
15638 from portage.util import varexpand
15639 variables = {"PID" : str(os.getpid())}
15640 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15643 rval = portage.process.spawn(cmd, env=os.environ)
15644 except portage.exception.CommandNotFound:
15645 # The OS kernel probably doesn't support ionice,
15646 # so return silently.
15649 if rval != os.EX_OK:
15650 out = portage.output.EOutput()
15651 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15652 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15654 def display_missing_pkg_set(root_config, set_name):
15657 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15658 "The following sets exist:") % \
15659 colorize("INFORM", set_name))
15662 for s in sorted(root_config.sets):
15663 msg.append(" %s" % s)
15666 writemsg_level("".join("%s\n" % l for l in msg),
15667 level=logging.ERROR, noiselevel=-1)
15669 def expand_set_arguments(myfiles, myaction, root_config):
15671 setconfig = root_config.setconfig
15673 sets = setconfig.getSets()
15675 # In order to know exactly which atoms/sets should be added to the
15676 # world file, the depgraph performs set expansion later. It will get
15677 # confused about where the atoms came from if it's not allowed to
15678 # expand them itself.
15679 do_not_expand = (None, )
15682 if a in ("system", "world"):
15683 newargs.append(SETPREFIX+a)
15690 # separators for set arguments
15694 # WARNING: all operators must be of equal length
15696 DIFF_OPERATOR = "-@"
15697 UNION_OPERATOR = "+@"
15699 for i in range(0, len(myfiles)):
15700 if myfiles[i].startswith(SETPREFIX):
15703 x = myfiles[i][len(SETPREFIX):]
15706 start = x.find(ARG_START)
15707 end = x.find(ARG_END)
15708 if start > 0 and start < end:
15709 namepart = x[:start]
15710 argpart = x[start+1:end]
15712 # TODO: implement proper quoting
15713 args = argpart.split(",")
15717 k, v = a.split("=", 1)
15720 options[a] = "True"
15721 setconfig.update(namepart, options)
15722 newset += (x[:start-len(namepart)]+namepart)
15723 x = x[end+len(ARG_END):]
15727 myfiles[i] = SETPREFIX+newset
15729 sets = setconfig.getSets()
15731 # display errors that occured while loading the SetConfig instance
15732 for e in setconfig.errors:
15733 print colorize("BAD", "Error during set creation: %s" % e)
15735 # emerge relies on the existance of sets with names "world" and "system"
15736 required_sets = ("world", "system")
15739 for s in required_sets:
15741 missing_sets.append(s)
15743 if len(missing_sets) > 2:
15744 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15745 missing_sets_str += ', and "%s"' % missing_sets[-1]
15746 elif len(missing_sets) == 2:
15747 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15749 missing_sets_str = '"%s"' % missing_sets[-1]
15750 msg = ["emerge: incomplete set configuration, " + \
15751 "missing set(s): %s" % missing_sets_str]
15753 msg.append(" sets defined: %s" % ", ".join(sets))
15754 msg.append(" This usually means that '%s'" % \
15755 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15756 msg.append(" is missing or corrupt.")
15758 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15760 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15763 if a.startswith(SETPREFIX):
15764 # support simple set operations (intersection, difference and union)
15765 # on the commandline. Expressions are evaluated strictly left-to-right
15766 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15767 expression = a[len(SETPREFIX):]
15770 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15771 is_pos = expression.rfind(IS_OPERATOR)
15772 diff_pos = expression.rfind(DIFF_OPERATOR)
15773 union_pos = expression.rfind(UNION_OPERATOR)
15774 op_pos = max(is_pos, diff_pos, union_pos)
15775 s1 = expression[:op_pos]
15776 s2 = expression[op_pos+len(IS_OPERATOR):]
15777 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15779 display_missing_pkg_set(root_config, s2)
15781 expr_sets.insert(0, s2)
15782 expr_ops.insert(0, op)
15784 if not expression in sets:
15785 display_missing_pkg_set(root_config, expression)
15787 expr_sets.insert(0, expression)
15788 result = set(setconfig.getSetAtoms(expression))
15789 for i in range(0, len(expr_ops)):
15790 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15791 if expr_ops[i] == IS_OPERATOR:
15792 result.intersection_update(s2)
15793 elif expr_ops[i] == DIFF_OPERATOR:
15794 result.difference_update(s2)
15795 elif expr_ops[i] == UNION_OPERATOR:
15798 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15799 newargs.extend(result)
15801 s = a[len(SETPREFIX):]
15803 display_missing_pkg_set(root_config, s)
15805 setconfig.active.append(s)
15807 set_atoms = setconfig.getSetAtoms(s)
15808 except portage.exception.PackageSetNotFound, e:
15809 writemsg_level(("emerge: the given set '%s' " + \
15810 "contains a non-existent set named '%s'.\n") % \
15811 (s, e), level=logging.ERROR, noiselevel=-1)
15813 if myaction in unmerge_actions and \
15814 not sets[s].supportsOperation("unmerge"):
15815 sys.stderr.write("emerge: the given set '%s' does " % s + \
15816 "not support unmerge operations\n")
15818 elif not set_atoms:
15819 print "emerge: '%s' is an empty set" % s
15820 elif myaction not in do_not_expand:
15821 newargs.extend(set_atoms)
15823 newargs.append(SETPREFIX+s)
15824 for e in sets[s].errors:
15828 return (newargs, retval)
15830 def repo_name_check(trees):
15831 missing_repo_names = set()
15832 for root, root_trees in trees.iteritems():
15833 if "porttree" in root_trees:
15834 portdb = root_trees["porttree"].dbapi
15835 missing_repo_names.update(portdb.porttrees)
15836 repos = portdb.getRepositories()
15838 missing_repo_names.discard(portdb.getRepositoryPath(r))
15839 if portdb.porttree_root in missing_repo_names and \
15840 not os.path.exists(os.path.join(
15841 portdb.porttree_root, "profiles")):
15842 # This is normal if $PORTDIR happens to be empty,
15843 # so don't warn about it.
15844 missing_repo_names.remove(portdb.porttree_root)
15846 if missing_repo_names:
15848 msg.append("WARNING: One or more repositories " + \
15849 "have missing repo_name entries:")
15851 for p in missing_repo_names:
15852 msg.append("\t%s/profiles/repo_name" % (p,))
15854 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15855 "should be a plain text file containing a unique " + \
15856 "name for the repository on the first line.", 70))
15857 writemsg_level("".join("%s\n" % l for l in msg),
15858 level=logging.WARNING, noiselevel=-1)
15860 return bool(missing_repo_names)
15862 def repo_name_duplicate_check(trees):
15864 for root, root_trees in trees.iteritems():
15865 if 'porttree' in root_trees:
15866 portdb = root_trees['porttree'].dbapi
15867 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
15868 for repo_name, paths in portdb._ignored_repos:
15869 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
15870 ignored_repos.setdefault(k, []).extend(paths)
15874 msg.append('WARNING: One or more repositories ' + \
15875 'have been ignored due to duplicate')
15876 msg.append(' profiles/repo_name entries:')
15878 for k in sorted(ignored_repos):
15879 msg.append(' %s overrides' % (k,))
15880 for path in ignored_repos[k]:
15881 msg.append(' %s' % (path,))
15883 msg.extend(' ' + x for x in textwrap.wrap(
15884 "All profiles/repo_name entries must be unique in order " + \
15885 "to avoid having duplicates ignored. " + \
15886 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
15887 "/etc/make.conf if you would like to disable this warning."))
15888 writemsg_level(''.join('%s\n' % l for l in msg),
15889 level=logging.WARNING, noiselevel=-1)
15891 return bool(ignored_repos)
15893 def config_protect_check(trees):
15894 for root, root_trees in trees.iteritems():
15895 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15896 msg = "!!! CONFIG_PROTECT is empty"
15898 msg += " for '%s'" % root
15899 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15901 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15903 if "--quiet" in myopts:
15904 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15905 print "!!! one of the following fully-qualified ebuild names instead:\n"
15906 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15907 print " " + colorize("INFORM", cp)
15910 s = search(root_config, spinner, "--searchdesc" in myopts,
15911 "--quiet" not in myopts, "--usepkg" in myopts,
15912 "--usepkgonly" in myopts)
15913 null_cp = portage.dep_getkey(insert_category_into_atom(
15915 cat, atom_pn = portage.catsplit(null_cp)
15916 s.searchkey = atom_pn
15917 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15920 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15921 print "!!! one of the above fully-qualified ebuild names instead.\n"
15923 def profile_check(trees, myaction, myopts):
15924 if myaction in ("info", "sync"):
15926 elif "--version" in myopts or "--help" in myopts:
15928 for root, root_trees in trees.iteritems():
15929 if root_trees["root_config"].settings.profiles:
15931 # generate some profile related warning messages
15932 validate_ebuild_environment(trees)
15933 msg = "If you have just changed your profile configuration, you " + \
15934 "should revert back to the previous configuration. Due to " + \
15935 "your current profile being invalid, allowed actions are " + \
15936 "limited to --help, --info, --sync, and --version."
15937 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15938 level=logging.ERROR, noiselevel=-1)
15943 global portage # NFC why this is necessary now - genone
15944 portage._disable_legacy_globals()
15945 # Disable color until we're sure that it should be enabled (after
15946 # EMERGE_DEFAULT_OPTS has been parsed).
15947 portage.output.havecolor = 0
15948 # This first pass is just for options that need to be known as early as
15949 # possible, such as --config-root. They will be parsed again later,
15950 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15951 # the value of --config-root).
15952 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15953 if "--debug" in myopts:
15954 os.environ["PORTAGE_DEBUG"] = "1"
15955 if "--config-root" in myopts:
15956 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15957 if "--root" in myopts:
15958 os.environ["ROOT"] = myopts["--root"]
15960 # Portage needs to ensure a sane umask for the files it creates.
15962 settings, trees, mtimedb = load_emerge_config()
15963 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15964 rval = profile_check(trees, myaction, myopts)
15965 if rval != os.EX_OK:
15968 if portage._global_updates(trees, mtimedb["updates"]):
15970 # Reload the whole config from scratch.
15971 settings, trees, mtimedb = load_emerge_config(trees=trees)
15972 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15974 xterm_titles = "notitles" not in settings.features
15977 if "--ignore-default-opts" not in myopts:
15978 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15979 tmpcmdline.extend(sys.argv[1:])
15980 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15982 if "--digest" in myopts:
15983 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15984 # Reload the whole config from scratch so that the portdbapi internal
15985 # config is updated with new FEATURES.
15986 settings, trees, mtimedb = load_emerge_config(trees=trees)
15987 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15989 for myroot in trees:
15990 mysettings = trees[myroot]["vartree"].settings
15991 mysettings.unlock()
15992 adjust_config(myopts, mysettings)
15993 if '--pretend' not in myopts and myaction in \
15994 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15995 mysettings["PORTAGE_COUNTER_HASH"] = \
15996 trees[myroot]["vartree"].dbapi._counter_hash()
15997 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15999 del myroot, mysettings
16001 apply_priorities(settings)
16003 spinner = stdout_spinner()
16004 if "candy" in settings.features:
16005 spinner.update = spinner.update_scroll
16007 if "--quiet" not in myopts:
16008 portage.deprecated_profile_check(settings=settings)
16009 repo_name_check(trees)
16010 repo_name_duplicate_check(trees)
16011 config_protect_check(trees)
16013 for mytrees in trees.itervalues():
16014 mydb = mytrees["porttree"].dbapi
16015 # Freeze the portdbapi for performance (memoize all xmatch results).
16019 if "moo" in myfiles:
16022 Larry loves Gentoo (""" + platform.system() + """)
16024 _______________________
16025 < Have you mooed today? >
16026 -----------------------
16036 ext = os.path.splitext(x)[1]
16037 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
16038 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
16041 root_config = trees[settings["ROOT"]]["root_config"]
16042 if myaction == "list-sets":
16043 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
16047 # only expand sets for actions taking package arguments
16048 oldargs = myfiles[:]
16049 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
16050 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
16051 if retval != os.EX_OK:
16054 # Need to handle empty sets specially, otherwise emerge will react
16055 # with the help message for empty argument lists
16056 if oldargs and not myfiles:
16057 print "emerge: no targets left after set expansion"
16060 if ("--tree" in myopts) and ("--columns" in myopts):
16061 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
16064 if ("--quiet" in myopts):
16065 spinner.update = spinner.update_quiet
16066 portage.util.noiselimit = -1
16068 # Always create packages if FEATURES=buildpkg
16069 # Imply --buildpkg if --buildpkgonly
16070 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
16071 if "--buildpkg" not in myopts:
16072 myopts["--buildpkg"] = True
16074 # Always try and fetch binary packages if FEATURES=getbinpkg
16075 if ("getbinpkg" in settings.features):
16076 myopts["--getbinpkg"] = True
16078 if "--buildpkgonly" in myopts:
16079 # --buildpkgonly will not merge anything, so
16080 # it cancels all binary package options.
16081 for opt in ("--getbinpkg", "--getbinpkgonly",
16082 "--usepkg", "--usepkgonly"):
16083 myopts.pop(opt, None)
16085 if "--fetch-all-uri" in myopts:
16086 myopts["--fetchonly"] = True
16088 if "--skipfirst" in myopts and "--resume" not in myopts:
16089 myopts["--resume"] = True
16091 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
16092 myopts["--usepkgonly"] = True
16094 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
16095 myopts["--getbinpkg"] = True
16097 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
16098 myopts["--usepkg"] = True
16100 # Also allow -K to apply --usepkg/-k
16101 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
16102 myopts["--usepkg"] = True
16104 # Allow -p to remove --ask
16105 if ("--pretend" in myopts) and ("--ask" in myopts):
16106 print ">>> --pretend disables --ask... removing --ask from options."
16107 del myopts["--ask"]
16109 # forbid --ask when not in a terminal
16110 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
16111 if ("--ask" in myopts) and (not sys.stdin.isatty()):
16112 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
16116 if settings.get("PORTAGE_DEBUG", "") == "1":
16117 spinner.update = spinner.update_quiet
16119 if "python-trace" in settings.features:
16120 import portage.debug
16121 portage.debug.set_trace(True)
16123 if not ("--quiet" in myopts):
16124 if not sys.stdout.isatty() or ("--nospinner" in myopts):
16125 spinner.update = spinner.update_basic
16127 if myaction == 'version':
16128 print getportageversion(settings["PORTDIR"], settings["ROOT"],
16129 settings.profile_path, settings["CHOST"],
16130 trees[settings["ROOT"]]["vartree"].dbapi)
16132 elif "--help" in myopts:
16133 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16136 if "--debug" in myopts:
16137 print "myaction", myaction
16138 print "myopts", myopts
16140 if not myaction and not myfiles and "--resume" not in myopts:
16141 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16144 pretend = "--pretend" in myopts
16145 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
16146 buildpkgonly = "--buildpkgonly" in myopts
16148 # check if root user is the current user for the actions where emerge needs this
16149 if portage.secpass < 2:
16150 # We've already allowed "--version" and "--help" above.
16151 if "--pretend" not in myopts and myaction not in ("search","info"):
16152 need_superuser = myaction in ('clean', 'depclean', 'deselect',
16153 'prune', 'unmerge') or not \
16155 (buildpkgonly and secpass >= 1) or \
16156 myaction in ("metadata", "regen") or \
16157 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
16158 if portage.secpass < 1 or \
16161 access_desc = "superuser"
16163 access_desc = "portage group"
16164 # Always show portage_group_warning() when only portage group
16165 # access is required but the user is not in the portage group.
16166 from portage.data import portage_group_warning
16167 if "--ask" in myopts:
16168 myopts["--pretend"] = True
16169 del myopts["--ask"]
16170 print ("%s access is required... " + \
16171 "adding --pretend to options.\n") % access_desc
16172 if portage.secpass < 1 and not need_superuser:
16173 portage_group_warning()
16175 sys.stderr.write(("emerge: %s access is " + \
16176 "required.\n\n") % access_desc)
16177 if portage.secpass < 1 and not need_superuser:
16178 portage_group_warning()
16181 disable_emergelog = False
16182 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
16184 disable_emergelog = True
16186 if myaction in ("search", "info"):
16187 disable_emergelog = True
16188 if disable_emergelog:
16189 """ Disable emergelog for everything except build or unmerge
16190 operations. This helps minimize parallel emerge.log entries that can
16191 confuse log parsers. We especially want it disabled during
16192 parallel-fetch, which uses --resume --fetchonly."""
16194 def emergelog(*pargs, **kargs):
16197 if not "--pretend" in myopts:
16198 emergelog(xterm_titles, "Started emerge on: "+\
16199 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
16202 myelogstr=" ".join(myopts)
16204 myelogstr+=" "+myaction
16206 myelogstr += " " + " ".join(oldargs)
16207 emergelog(xterm_titles, " *** emerge " + myelogstr)
16210 def emergeexitsig(signum, frame):
16211 signal.signal(signal.SIGINT, signal.SIG_IGN)
16212 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16213 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
16214 sys.exit(100+signum)
16215 signal.signal(signal.SIGINT, emergeexitsig)
16216 signal.signal(signal.SIGTERM, emergeexitsig)
16219 """This gets out final log message in before we quit."""
16220 if "--pretend" not in myopts:
16221 emergelog(xterm_titles, " *** terminating.")
16222 if "notitles" not in settings.features:
16224 portage.atexit_register(emergeexit)
16226 if myaction in ("config", "metadata", "regen", "sync"):
16227 if "--pretend" in myopts:
16228 sys.stderr.write(("emerge: The '%s' action does " + \
16229 "not support '--pretend'.\n") % myaction)
16232 if "sync" == myaction:
16233 return action_sync(settings, trees, mtimedb, myopts, myaction)
16234 elif "metadata" == myaction:
16235 action_metadata(settings, portdb, myopts)
16236 elif myaction=="regen":
16237 validate_ebuild_environment(trees)
16238 return action_regen(settings, portdb, myopts.get("--jobs"),
16239 myopts.get("--load-average"))
16241 elif "config"==myaction:
16242 validate_ebuild_environment(trees)
16243 action_config(settings, trees, myopts, myfiles)
16246 elif "search"==myaction:
16247 validate_ebuild_environment(trees)
16248 action_search(trees[settings["ROOT"]]["root_config"],
16249 myopts, myfiles, spinner)
16251 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
16252 validate_ebuild_environment(trees)
16253 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
16254 myopts, myaction, myfiles, spinner)
16255 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
16256 post_emerge(root_config, myopts, mtimedb, rval)
16259 elif myaction == 'info':
16261 # Ensure atoms are valid before calling unmerge().
16262 vardb = trees[settings["ROOT"]]["vartree"].dbapi
16265 if is_valid_package_atom(x):
16267 valid_atoms.append(
16268 portage.dep_expand(x, mydb=vardb, settings=settings))
16269 except portage.exception.AmbiguousPackageName, e:
16270 msg = "The short ebuild name \"" + x + \
16271 "\" is ambiguous. Please specify " + \
16272 "one of the following " + \
16273 "fully-qualified ebuild names instead:"
16274 for line in textwrap.wrap(msg, 70):
16275 writemsg_level("!!! %s\n" % (line,),
16276 level=logging.ERROR, noiselevel=-1)
16278 writemsg_level(" %s\n" % colorize("INFORM", i),
16279 level=logging.ERROR, noiselevel=-1)
16280 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
16284 msg.append("'%s' is not a valid package atom." % (x,))
16285 msg.append("Please check ebuild(5) for full details.")
16286 writemsg_level("".join("!!! %s\n" % line for line in msg),
16287 level=logging.ERROR, noiselevel=-1)
16290 return action_info(settings, trees, myopts, valid_atoms)
16292 # "update", "system", or just process files:
16294 validate_ebuild_environment(trees)
16297 if x.startswith(SETPREFIX) or \
16298 is_valid_package_atom(x):
16300 if x[:1] == os.sep:
16308 msg.append("'%s' is not a valid package atom." % (x,))
16309 msg.append("Please check ebuild(5) for full details.")
16310 writemsg_level("".join("!!! %s\n" % line for line in msg),
16311 level=logging.ERROR, noiselevel=-1)
16314 if "--pretend" not in myopts:
16315 display_news_notification(root_config, myopts)
16316 retval = action_build(settings, trees, mtimedb,
16317 myopts, myaction, myfiles, spinner)
16318 root_config = trees[settings["ROOT"]]["root_config"]
16319 post_emerge(root_config, myopts, mtimedb, retval)